From 236d912eb47cbb6e56103a63ea67c6d454d8c36c Mon Sep 17 00:00:00 2001 From: shreelakshmijoshi Date: Thu, 12 Sep 2024 12:21:15 +0530 Subject: [PATCH 1/5] Refactor: merge Deployer & DeployerDev --- .../apd/acl/server/deploy/DeployerDev.java | 304 ++++++++++++++++-- 1 file changed, 274 insertions(+), 30 deletions(-) diff --git a/src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java b/src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java index 8a5e49f..0171b52 100644 --- a/src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java +++ b/src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java @@ -1,34 +1,74 @@ package iudx.apd.acl.server.deploy; +import com.hazelcast.config.Config; +import com.hazelcast.config.DiscoveryStrategyConfig; +import com.hazelcast.zookeeper.ZookeeperDiscoveryProperties; +import com.hazelcast.zookeeper.ZookeeperDiscoveryStrategyFactory; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.binder.jvm.ClassLoaderMetrics; +import io.micrometer.core.instrument.binder.jvm.JvmGcMetrics; +import io.micrometer.core.instrument.binder.jvm.JvmMemoryMetrics; +import io.micrometer.core.instrument.binder.jvm.JvmThreadMetrics; +import io.micrometer.core.instrument.binder.system.ProcessorMetrics; import io.vertx.core.DeploymentOptions; import io.vertx.core.Vertx; import io.vertx.core.VertxOptions; import io.vertx.core.cli.CLI; import io.vertx.core.cli.CommandLine; import io.vertx.core.cli.Option; +import io.vertx.core.cli.TypedOption; import io.vertx.core.eventbus.EventBusOptions; +import io.vertx.core.http.HttpServerOptions; +import io.vertx.core.json.JsonArray; import io.vertx.core.json.JsonObject; +import io.vertx.core.metrics.MetricsOptions; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.micrometer.Label; +import io.vertx.micrometer.MicrometerMetricsOptions; +import io.vertx.micrometer.VertxPrometheusOptions; +import io.vertx.micrometer.backends.BackendRegistries; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.Arrays; +import java.util.EnumSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; /** - * Deploys non-clustered vert.x instance of the server. As a JAR, the application requires 1 runtime + * Deploys non-clustered vert.x instance of the server. As a JAR, the application requires 4 runtime * argument: * * * - *

e.g. java -jar ./fatjar.jar -c configs/config.json + *

e.g. java -jar ./fatjar.jar -c configs/config.json -C false -i localhost -m + * iudx.apd.acl.server.authenticator.JwtAuthenticationVerticle + * ,iudx.apd.acl.server.apiserver.ApiServerVerticle */ public class DeployerDev { private static final Logger LOGGER = LogManager.getLogger(DeployerDev.class); + private static Vertx vertxInstance; + private static ClusterManager mgr; + /** + * Recursively deploy all modules. + * + * @param vertx the vert.x instance + * @param configs the JSON configuration + * @param i for recursive base case + */ public static void recursiveDeploy(Vertx vertx, JsonObject configs, int i) { if (i >= configs.getJsonArray("modules").size()) { LOGGER.info("Deployed all"); @@ -51,17 +91,14 @@ public static void recursiveDeploy(Vertx vertx, JsonObject configs, int i) { deploymentOptions.setMaxWorkerExecuteTimeUnit(TimeUnit.MINUTES); } - vertx.deployVerticle( - moduleName, - deploymentOptions, - ar -> { - if (ar.succeeded()) { - LOGGER.info("Deployed " + moduleName); - recursiveDeploy(vertx, configs, i + 1); - } else { - LOGGER.fatal("Failed to deploy " + moduleName + " cause:", ar.cause()); - } - }); + vertx.deployVerticle(moduleName, deploymentOptions, ar -> { + if (ar.succeeded()) { + LOGGER.info("Deployed " + moduleName); + recursiveDeploy(vertx, configs, i + 1); + } else { + LOGGER.fatal("Failed to deploy " + moduleName + " cause:", ar.cause()); + } + }); } private static JsonObject getConfigForModule(int moduleIndex, JsonObject configurations) { @@ -72,7 +109,7 @@ private static JsonObject getConfigForModule(int moduleIndex, JsonObject configu public static void deploy(String configPath) { EventBusOptions ebOptions = new EventBusOptions(); - VertxOptions options = new VertxOptions().setEventBusOptions(ebOptions); + VertxOptions options = new VertxOptions().setEventBusOptions(ebOptions).setMetricsOptions(getMetricsOptions()); String config; try { @@ -87,34 +124,241 @@ public static void deploy(String configPath) { } JsonObject configuration = new JsonObject(config); Vertx vertx = Vertx.vertx(options); + setVertxInstance(vertx); + setJvmMetrics(); + recursiveDeploy(vertx, configuration, 0); + + } + + private static void setVertxInstance(Vertx vertx) { + vertxInstance = vertx; } public static void main(String[] args) { - CLI cli = - CLI.create("IUDX ACL APD Server") - .setSummary("A CLI to deploy the acl-apd server") - .addOption( - new Option() - .setLongName("help") - .setShortName("h") - .setFlag(true) - .setDescription("display help")) - .addOption( - new Option() - .setLongName("config") - .setShortName("c") - .setRequired(true) - .setDescription("configuration file")); + + CLI cli = CLI.create("DX ACL APD Server").setSummary("A CLI to deploy the acl-apd server") + .addOption(new Option().setLongName("help").setShortName("h").setFlag(true).setDescription("display help")) + .addOption( + new Option().setLongName("config").setShortName("c").setRequired(true).setDescription("configuration file")) + .addOption(new Option().setLongName("isClustered").setShortName("C").setRequired(true).setDefaultValue("false") + .setDescription("Is it being deployed in clustered mode")) + + .addOption(new Option().setLongName("host").setShortName("i").setRequired(true).setDefaultValue("localhost") + .setDescription("public host")) + + .addOption( + new TypedOption().setType(String.class).setLongName("modules").setShortName("m").setRequired(false) + .setDefaultValue("all").setParsedAsList(true).setDescription( + "comma separated list of verticle names to deploy. " + + "If omitted, or if `all` is passed, all verticles are deployed")); StringBuilder usageString = new StringBuilder(); cli.usage(usageString); CommandLine commandLine = cli.parse(Arrays.asList(args), false); + boolean isDeploymentInClusteredMode = Boolean.parseBoolean(commandLine.getOptionValue("isClustered")); + + if (commandLine.isValid() && !commandLine.isFlagEnabled("help")) { String configPath = commandLine.getOptionValue("config"); - deploy(configPath); + String host = commandLine.getOptionValue("host"); + List passedModules = commandLine.getOptionValues("modules"); + List modules = passedModules.stream().distinct().collect(Collectors.toList()); + if (!isDeploymentInClusteredMode) { + deploy(configPath); + }/* `all` is also passed by default if no -m option given.*/ else if (modules.contains("all")) { + deployInClusteredMode(configPath, host, List.of()); + } else { + deployInClusteredMode(configPath, host, modules); + } + + Runtime.getRuntime().addShutdownHook(new Thread(() -> gracefulShutdown())); } else { LOGGER.info(usageString); } } + + + /** + * Deploy clustered vert.x instance. + * + * @param configPath the path for JSON config file + * @param host String + * @param modules list of modules to deploy. If list is empty, all modules are deployed + */ + public static void deployInClusteredMode(String configPath, String host, List modules) { + String config; + try { + config = new String(Files.readAllBytes(Paths.get(configPath)), StandardCharsets.UTF_8); + } catch (Exception e) { + LOGGER.fatal("Couldn't read configuration file"); + return; + } + if (config.length() < 1) { + LOGGER.fatal("Couldn't read configuration file"); + return; + } + JsonObject configuration = new JsonObject(config); + List zookeepers = configuration.getJsonArray("zookeepers").getList(); + String clusterId = configuration.getString("clusterId"); + mgr = getClusterManager(host, zookeepers, clusterId); + EventBusOptions ebOptions = new EventBusOptions().setClusterPublicHost(host); + VertxOptions options = + new VertxOptions().setClusterManager(mgr).setEventBusOptions(ebOptions).setMetricsOptions(getMetricsOptions()); + LOGGER.debug("metrics-options" + options.getMetricsOptions()); + Vertx.clusteredVertx(options, res -> { + if (res.succeeded()) { + vertxInstance = res.result(); + LOGGER.debug(vertxInstance.isMetricsEnabled()); + setJvmMetrics(); + if (modules.isEmpty()) { + recursiveDeploy(vertxInstance, configuration, 0); + } else { + recursiveDeploy(vertxInstance, configuration, modules); + } + } else { + LOGGER.fatal("Could not join cluster"); + } + }); + } + + /** + * Recursively deploy modules/verticles (if they exist) present in the `modules` list. + * + * @param vertx the vert.x instance + * @param configs the JSON configuration + * @param modules the list of modules to deploy + */ + public static void recursiveDeploy(Vertx vertx, JsonObject configs, List modules) { + if (modules.isEmpty()) { + LOGGER.info("Deployed requested verticles"); + return; + } + JsonArray configuredModules = configs.getJsonArray("modules"); + + String moduleName = modules.get(0); + JsonObject config = + configuredModules.stream().map(obj -> (JsonObject) obj).filter(obj -> obj.getString("id").equals(moduleName)) + .findFirst().orElse(new JsonObject()); + + if (config.isEmpty()) { + LOGGER.fatal("Failed to deploy " + moduleName + " cause: Not Found"); + return; + } + // get common configs and add this to config object + JsonObject commonConfigs = configs.getJsonObject("commonConfig"); + config.mergeIn(commonConfigs, true); + int numInstances = config.getInteger("verticleInstances"); + DeploymentOptions deploymentOptions = new DeploymentOptions().setInstances(numInstances).setConfig(config); + boolean isWorkerVerticle = config.getBoolean("isWorkerVerticle"); + if (isWorkerVerticle) { + LOGGER.info("worker verticle : " + config.getString("id")); + deploymentOptions.setWorkerPoolName(config.getString("threadPoolName")); + deploymentOptions.setWorkerPoolSize(config.getInteger("threadPoolSize")); + deploymentOptions.setWorker(true); + deploymentOptions.setMaxWorkerExecuteTime(30L); + deploymentOptions.setMaxWorkerExecuteTimeUnit(TimeUnit.MINUTES); + } + + vertx.deployVerticle(moduleName, deploymentOptions, ar -> { + if (ar.succeeded()) { + LOGGER.info("Deployed " + moduleName); + modules.remove(0); + recursiveDeploy(vertx, configs, modules); + } else { + LOGGER.fatal("Failed to deploy " + moduleName + " cause:", ar.cause()); + } + }); + } + + public static ClusterManager getClusterManager(String host, List zookeepers, String clusterId) { + Config config = new Config(); + config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false); + config.getNetworkConfig().setPublicAddress(host); + config.setProperty("hazelcast.discovery.enabled", "true"); + config.setProperty("hazelcast.logging.type", "log4j2"); + DiscoveryStrategyConfig discoveryStrategyConfig = + new DiscoveryStrategyConfig(new ZookeeperDiscoveryStrategyFactory()); + discoveryStrategyConfig.addProperty(ZookeeperDiscoveryProperties.ZOOKEEPER_URL.key(), String.join(",", zookeepers)); + discoveryStrategyConfig.addProperty(ZookeeperDiscoveryProperties.GROUP.key(), clusterId); + config.getNetworkConfig().getJoin().getDiscoveryConfig().addDiscoveryStrategyConfig(discoveryStrategyConfig); + + return new HazelcastClusterManager(config); + } + + public static MetricsOptions getMetricsOptions() { + return new MicrometerMetricsOptions().setPrometheusOptions( + new VertxPrometheusOptions().setEnabled(true).setStartEmbeddedServer(true) + .setEmbeddedServerOptions(new HttpServerOptions().setPort(9000))) + .setLabels(EnumSet.of(Label.EB_ADDRESS, Label.EB_FAILURE, Label.HTTP_CODE, Label.HTTP_METHOD)).setEnabled(true); + } + + public static void setJvmMetrics() { + LOGGER.debug("Setting JVM metrics"); + MeterRegistry registry = BackendRegistries.getDefaultNow(); + LOGGER.debug(registry); + new ClassLoaderMetrics().bindTo(registry); + new JvmMemoryMetrics().bindTo(registry); + new JvmGcMetrics().bindTo(registry); + new ProcessorMetrics().bindTo(registry); + new JvmThreadMetrics().bindTo(registry); + } + + public static void gracefulShutdown() { + Set deployIdSet = vertxInstance.deploymentIDs(); + LOGGER.info("Shutting down the application"); + CountDownLatch latchVerticles = new CountDownLatch(deployIdSet.size()); + CountDownLatch latchCluster = new CountDownLatch(1); + CountDownLatch latchVertx = new CountDownLatch(1); + LOGGER.debug("number of verticles being undeployed are:" + deployIdSet.size()); + // shutdown verticles + for (String deploymentId : deployIdSet) { + vertxInstance.undeploy(deploymentId, handler -> { + if (handler.succeeded()) { + LOGGER.debug(deploymentId + " verticle successfully Undeployed"); + latchVerticles.countDown(); + } else { + LOGGER.warn(deploymentId + "Undeploy failed!"); + } + }); + } + + try { + latchVerticles.await(5, TimeUnit.SECONDS); + LOGGER.info("All the verticles undeployed"); + return; + + } catch (Exception e) { + e.printStackTrace(); + } + + try { + latchCluster.await(5, TimeUnit.SECONDS); + // shutdown vertx + vertxInstance.close(handler -> { + if (handler.succeeded()) { + LOGGER.info("vertx closed succesfully"); + latchVertx.countDown(); + } else { + LOGGER.warn("Vertx didn't close properly, reason:" + handler.cause()); + } + }); + } catch (Exception e) { + e.printStackTrace(); + } + + try { + latchVertx.await(5, TimeUnit.SECONDS); + // then shut down log4j + if (LogManager.getContext() instanceof LoggerContext) { + LOGGER.debug("Shutting down log4j2"); + LogManager.shutdown(LogManager.getContext()); + } else { + LOGGER.warn("Unable to shutdown log4j2"); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + } From e7586d50f1866c2cf90caa4fe00b85f77a30c594 Mon Sep 17 00:00:00 2001 From: shreelakshmijoshi Date: Thu, 12 Sep 2024 12:31:33 +0530 Subject: [PATCH 2/5] Refactor: Zookeeper URL --- docs/SETUP-and-Installation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/SETUP-and-Installation.md b/docs/SETUP-and-Installation.md index 104ecbf..eac980e 100644 --- a/docs/SETUP-and-Installation.md +++ b/docs/SETUP-and-Installation.md @@ -143,11 +143,11 @@ export LOG_LEVEL=INFO - `iudx.iudx.apd.acl.server-dev-0.0.1-SNAPSHOT-fat.jar` - non-clustered vert.x and does not contain micrometer metrics #### Running the clustered JAR -**Note**: The clustered JAR requires Zookeeper to be installed. Refer [here](https://zookeeper.apache.org/doc/r3.3.3/zookeeperStarted.html) to learn more about how to set up Zookeeper. Additionally, the `zookeepers` key in the config being used needs to be updated with the IP address/domain of the system running Zookeeper. +**Note**: The clustered JAR requires Zookeeper to be installed. Refer [here](https://zookeeper.apache.org/doc/current/zookeeperStarted.html) to learn more about how to set up Zookeeper. Additionally, the `zookeepers` key in the config being used needs to be updated with the IP address/domain of the system running Zookeeper. The JAR requires 3 runtime arguments when running: * --config/-c : path to the config file -* --hostname/-i : the hostname for clustering +* --host/-i : the hostname for clustering * --modules/-m : comma separated list of module names to deploy e.g. ```java -jar target/iudx.iudx.apd.acl.server-cluster-0.0.1-SNAPSHOT-fat.jar --host $(hostname) From 8a6998049a5e5d05ebacbac86b577a7f24e018fa Mon Sep 17 00:00:00 2001 From: shreelakshmijoshi Date: Thu, 12 Sep 2024 12:48:21 +0530 Subject: [PATCH 3/5] Refactor: resolve checkstyle issue --- .../apd/acl/server/deploy/DeployerDev.java | 102 +++++++++--------- 1 file changed, 52 insertions(+), 50 deletions(-) diff --git a/src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java b/src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java index 0171b52..6069eac 100644 --- a/src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java +++ b/src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java @@ -101,6 +101,55 @@ public static void recursiveDeploy(Vertx vertx, JsonObject configs, int i) { }); } + /** + * Recursively deploy modules/verticles (if they exist) present in the `modules` list. + * + * @param vertx the vert.x instance + * @param configs the JSON configuration + * @param modules the list of modules to deploy + */ + public static void recursiveDeploy(Vertx vertx, JsonObject configs, List modules) { + if (modules.isEmpty()) { + LOGGER.info("Deployed requested verticles"); + return; + } + JsonArray configuredModules = configs.getJsonArray("modules"); + + String moduleName = modules.get(0); + JsonObject config = + configuredModules.stream().map(obj -> (JsonObject) obj).filter(obj -> obj.getString("id").equals(moduleName)) + .findFirst().orElse(new JsonObject()); + + if (config.isEmpty()) { + LOGGER.fatal("Failed to deploy " + moduleName + " cause: Not Found"); + return; + } + // get common configs and add this to config object + JsonObject commonConfigs = configs.getJsonObject("commonConfig"); + config.mergeIn(commonConfigs, true); + int numInstances = config.getInteger("verticleInstances"); + DeploymentOptions deploymentOptions = new DeploymentOptions().setInstances(numInstances).setConfig(config); + boolean isWorkerVerticle = config.getBoolean("isWorkerVerticle"); + if (isWorkerVerticle) { + LOGGER.info("worker verticle : " + config.getString("id")); + deploymentOptions.setWorkerPoolName(config.getString("threadPoolName")); + deploymentOptions.setWorkerPoolSize(config.getInteger("threadPoolSize")); + deploymentOptions.setWorker(true); + deploymentOptions.setMaxWorkerExecuteTime(30L); + deploymentOptions.setMaxWorkerExecuteTimeUnit(TimeUnit.MINUTES); + } + + vertx.deployVerticle(moduleName, deploymentOptions, ar -> { + if (ar.succeeded()) { + LOGGER.info("Deployed " + moduleName); + modules.remove(0); + recursiveDeploy(vertx, configs, modules); + } else { + LOGGER.fatal("Failed to deploy " + moduleName + " cause:", ar.cause()); + } + }); + } + private static JsonObject getConfigForModule(int moduleIndex, JsonObject configurations) { JsonObject commonConfigs = configurations.getJsonObject("commonConfig"); JsonObject config = configurations.getJsonArray("modules").getJsonObject(moduleIndex); @@ -150,7 +199,8 @@ public static void main(String[] args) { .addOption( new TypedOption().setType(String.class).setLongName("modules").setShortName("m").setRequired(false) .setDefaultValue("all").setParsedAsList(true).setDescription( - "comma separated list of verticle names to deploy. " + + "comma separated list of verticle names to deploy. " + + "If omitted, or if `all` is passed, all verticles are deployed")); StringBuilder usageString = new StringBuilder(); @@ -166,7 +216,7 @@ public static void main(String[] args) { List modules = passedModules.stream().distinct().collect(Collectors.toList()); if (!isDeploymentInClusteredMode) { deploy(configPath); - }/* `all` is also passed by default if no -m option given.*/ else if (modules.contains("all")) { + } /* `all` is also passed by default if no -m option given.*/ else if (modules.contains("all")) { deployInClusteredMode(configPath, host, List.of()); } else { deployInClusteredMode(configPath, host, modules); @@ -222,54 +272,6 @@ public static void deployInClusteredMode(String configPath, String host, List modules) { - if (modules.isEmpty()) { - LOGGER.info("Deployed requested verticles"); - return; - } - JsonArray configuredModules = configs.getJsonArray("modules"); - - String moduleName = modules.get(0); - JsonObject config = - configuredModules.stream().map(obj -> (JsonObject) obj).filter(obj -> obj.getString("id").equals(moduleName)) - .findFirst().orElse(new JsonObject()); - - if (config.isEmpty()) { - LOGGER.fatal("Failed to deploy " + moduleName + " cause: Not Found"); - return; - } - // get common configs and add this to config object - JsonObject commonConfigs = configs.getJsonObject("commonConfig"); - config.mergeIn(commonConfigs, true); - int numInstances = config.getInteger("verticleInstances"); - DeploymentOptions deploymentOptions = new DeploymentOptions().setInstances(numInstances).setConfig(config); - boolean isWorkerVerticle = config.getBoolean("isWorkerVerticle"); - if (isWorkerVerticle) { - LOGGER.info("worker verticle : " + config.getString("id")); - deploymentOptions.setWorkerPoolName(config.getString("threadPoolName")); - deploymentOptions.setWorkerPoolSize(config.getInteger("threadPoolSize")); - deploymentOptions.setWorker(true); - deploymentOptions.setMaxWorkerExecuteTime(30L); - deploymentOptions.setMaxWorkerExecuteTimeUnit(TimeUnit.MINUTES); - } - - vertx.deployVerticle(moduleName, deploymentOptions, ar -> { - if (ar.succeeded()) { - LOGGER.info("Deployed " + moduleName); - modules.remove(0); - recursiveDeploy(vertx, configs, modules); - } else { - LOGGER.fatal("Failed to deploy " + moduleName + " cause:", ar.cause()); - } - }); - } public static ClusterManager getClusterManager(String host, List zookeepers, String clusterId) { Config config = new Config(); From dd46b3b0149bcba447abc39b4c907037fd49f435 Mon Sep 17 00:00:00 2001 From: shreelakshmijoshi Date: Thu, 12 Sep 2024 16:16:06 +0530 Subject: [PATCH 4/5] Refactor: remove Deployer Removed Deployer.java class and dereferenced it from pom.xml after merging the functionalities between Deployer and DeployerDev classes as per issue #165 --- pom.xml | 1 - .../iudx/apd/acl/server/deploy/Deployer.java | 378 ------------------ 2 files changed, 379 deletions(-) delete mode 100644 src/main/java/iudx/apd/acl/server/deploy/Deployer.java diff --git a/pom.xml b/pom.xml index c192c7d..e07bee9 100644 --- a/pom.xml +++ b/pom.xml @@ -32,7 +32,6 @@ 5.10.2 1.19.7 UTF-8 - iudx.apd.acl.server.deploy.Deployer iudx.apd.acl.server.deploy.DeployerDev diff --git a/src/main/java/iudx/apd/acl/server/deploy/Deployer.java b/src/main/java/iudx/apd/acl/server/deploy/Deployer.java deleted file mode 100644 index f9a4d55..0000000 --- a/src/main/java/iudx/apd/acl/server/deploy/Deployer.java +++ /dev/null @@ -1,378 +0,0 @@ -package iudx.apd.acl.server.deploy; - -import com.hazelcast.config.Config; -import com.hazelcast.config.DiscoveryStrategyConfig; -import com.hazelcast.zookeeper.ZookeeperDiscoveryProperties; -import com.hazelcast.zookeeper.ZookeeperDiscoveryStrategyFactory; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.binder.jvm.ClassLoaderMetrics; -import io.micrometer.core.instrument.binder.jvm.JvmGcMetrics; -import io.micrometer.core.instrument.binder.jvm.JvmMemoryMetrics; -import io.micrometer.core.instrument.binder.jvm.JvmThreadMetrics; -import io.micrometer.core.instrument.binder.system.ProcessorMetrics; -import io.vertx.core.DeploymentOptions; -import io.vertx.core.Promise; -import io.vertx.core.Vertx; -import io.vertx.core.VertxOptions; -import io.vertx.core.cli.CLI; -import io.vertx.core.cli.CommandLine; -import io.vertx.core.cli.Option; -import io.vertx.core.cli.TypedOption; -import io.vertx.core.eventbus.EventBusOptions; -import io.vertx.core.http.HttpServerOptions; -import io.vertx.core.json.JsonArray; -import io.vertx.core.json.JsonObject; -import io.vertx.core.metrics.MetricsOptions; -import io.vertx.core.spi.cluster.ClusterManager; -import io.vertx.micrometer.Label; -import io.vertx.micrometer.MicrometerMetricsOptions; -import io.vertx.micrometer.VertxPrometheusOptions; -import io.vertx.micrometer.backends.BackendRegistries; -import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.Arrays; -import java.util.EnumSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.core.LoggerContext; - -/** - * Deploys clustered vert.x instance of the server. As a JAR, the application requires 3 runtime - * arguments: - * - *

- * - *

e.g. java -jar ./fatjar.jar --host $(hostname) -c configs/config.json -m - * iudx.apd.acl.server.authenticator.JwtAuthenticationVerticle - * ,iudx.apd.acl.server.apiserver.ApiServerVerticle - */ -public class Deployer { - private static final Logger logger = LogManager.getLogger(Deployer.class); - private static ClusterManager mgr; - private static Vertx vertx; - - /** - * Recursively deploy all modules. - * - * @param vertx the vert.x instance - * @param configs the JSON configuration - * @param i for recursive base case - */ - public static void recursiveDeploy(Vertx vertx, JsonObject configs, int i) { - if (i >= configs.getJsonArray("modules").size()) { - logger.info("Deployed all"); - return; - } - JsonObject moduleConfigurations = getConfigForModule(i, configs); - String moduleName = moduleConfigurations.getString("id"); - int numInstances = moduleConfigurations.getInteger("verticleInstances"); - - DeploymentOptions deploymentOptions = - new DeploymentOptions().setInstances(numInstances).setConfig(moduleConfigurations); - - boolean isWorkerVerticle = moduleConfigurations.getBoolean("isWorkerVerticle"); - if (isWorkerVerticle) { - logger.info("worker verticle : " + moduleConfigurations.getString("id")); - deploymentOptions.setWorkerPoolName(moduleConfigurations.getString("threadPoolName")); - deploymentOptions.setWorkerPoolSize(moduleConfigurations.getInteger("threadPoolSize")); - deploymentOptions.setWorker(true); - deploymentOptions.setMaxWorkerExecuteTime(30L); - deploymentOptions.setMaxWorkerExecuteTimeUnit(TimeUnit.MINUTES); - } - - vertx.deployVerticle( - moduleName, - deploymentOptions, - ar -> { - if (ar.succeeded()) { - logger.info("Deployed " + moduleName); - recursiveDeploy(vertx, configs, i + 1); - } else { - logger.fatal("Failed to deploy " + moduleName + " cause:", ar.cause()); - } - }); - } - - /** - * Recursively deploy modules/verticles (if they exist) present in the `modules` list. - * - * @param vertx the vert.x instance - * @param configs the JSON configuration - * @param modules the list of modules to deploy - */ - public static void recursiveDeploy(Vertx vertx, JsonObject configs, List modules) { - if (modules.isEmpty()) { - logger.info("Deployed requested verticles"); - return; - } - JsonArray configuredModules = configs.getJsonArray("modules"); - - String moduleName = modules.get(0); - JsonObject config = - configuredModules.stream() - .map(obj -> (JsonObject) obj) - .filter(obj -> obj.getString("id").equals(moduleName)) - .findFirst() - .orElse(new JsonObject()); - - if (config.isEmpty()) { - logger.fatal("Failed to deploy " + moduleName + " cause: Not Found"); - return; - } - // get common configs and add this to config object - JsonObject commonConfigs = configs.getJsonObject("commonConfig"); - config.mergeIn(commonConfigs, true); - int numInstances = config.getInteger("verticleInstances"); - DeploymentOptions deploymentOptions = - new DeploymentOptions().setInstances(numInstances).setConfig(config); - boolean isWorkerVerticle = config.getBoolean("isWorkerVerticle"); - if (isWorkerVerticle) { - logger.info("worker verticle : " + config.getString("id")); - deploymentOptions.setWorkerPoolName(config.getString("threadPoolName")); - deploymentOptions.setWorkerPoolSize(config.getInteger("threadPoolSize")); - deploymentOptions.setWorker(true); - deploymentOptions.setMaxWorkerExecuteTime(30L); - deploymentOptions.setMaxWorkerExecuteTimeUnit(TimeUnit.MINUTES); - } - - vertx.deployVerticle( - moduleName, - deploymentOptions, - ar -> { - if (ar.succeeded()) { - logger.info("Deployed " + moduleName); - modules.remove(0); - recursiveDeploy(vertx, configs, modules); - } else { - logger.fatal("Failed to deploy " + moduleName + " cause:", ar.cause()); - } - }); - } - - private static JsonObject getConfigForModule(int moduleIndex, JsonObject configurations) { - JsonObject commonConfigs = configurations.getJsonObject("commonConfig"); - JsonObject config = configurations.getJsonArray("modules").getJsonObject(moduleIndex); - return config.mergeIn(commonConfigs, true); - } - - public static ClusterManager getClusterManager( - String host, List zookeepers, String clusterId) { - Config config = new Config(); - config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false); - config.getNetworkConfig().setPublicAddress(host); - config.setProperty("hazelcast.discovery.enabled", "true"); - config.setProperty("hazelcast.logging.type", "log4j2"); - DiscoveryStrategyConfig discoveryStrategyConfig = - new DiscoveryStrategyConfig(new ZookeeperDiscoveryStrategyFactory()); - discoveryStrategyConfig.addProperty( - ZookeeperDiscoveryProperties.ZOOKEEPER_URL.key(), String.join(",", zookeepers)); - discoveryStrategyConfig.addProperty(ZookeeperDiscoveryProperties.GROUP.key(), clusterId); - config - .getNetworkConfig() - .getJoin() - .getDiscoveryConfig() - .addDiscoveryStrategyConfig(discoveryStrategyConfig); - - return new HazelcastClusterManager(config); - } - - public static MetricsOptions getMetricsOptions() { - return new MicrometerMetricsOptions() - .setPrometheusOptions( - new VertxPrometheusOptions() - .setEnabled(true) - .setStartEmbeddedServer(true) - .setEmbeddedServerOptions(new HttpServerOptions().setPort(9000))) - // .setPublishQuantiles(true)) - .setLabels( - EnumSet.of(Label.EB_ADDRESS, Label.EB_FAILURE, Label.HTTP_CODE, Label.HTTP_METHOD)) - .setEnabled(true); - } - - public static void setJvmMetrics() { - MeterRegistry registry = BackendRegistries.getDefaultNow(); - logger.debug(registry); - new ClassLoaderMetrics().bindTo(registry); - new JvmMemoryMetrics().bindTo(registry); - new JvmGcMetrics().bindTo(registry); - new ProcessorMetrics().bindTo(registry); - new JvmThreadMetrics().bindTo(registry); - } - - /** - * Deploy clustered vert.x instance. - * - * @param configPath the path for JSON config file - * @param host String - * @param modules list of modules to deploy. If list is empty, all modules are deployed - */ - public static void deploy(String configPath, String host, List modules) { - String config; - try { - config = new String(Files.readAllBytes(Paths.get(configPath)), StandardCharsets.UTF_8); - } catch (Exception e) { - logger.fatal("Couldn't read configuration file"); - return; - } - if (config.length() < 1) { - logger.fatal("Couldn't read configuration file"); - return; - } - JsonObject configuration = new JsonObject(config); - List zookeepers = configuration.getJsonArray("zookeepers").getList(); - String clusterId = configuration.getString("clusterId"); - mgr = getClusterManager(host, zookeepers, clusterId); - EventBusOptions ebOptions = new EventBusOptions().setClusterPublicHost(host); - VertxOptions options = - new VertxOptions() - .setClusterManager(mgr) - .setEventBusOptions(ebOptions) - .setMetricsOptions(getMetricsOptions()); - logger.debug("metrics-options" + options.getMetricsOptions()); - Vertx.clusteredVertx( - options, - res -> { - if (res.succeeded()) { - vertx = res.result(); - logger.debug(vertx.isMetricsEnabled()); - setJvmMetrics(); - if (modules.isEmpty()) { - recursiveDeploy(vertx, configuration, 0); - } else { - recursiveDeploy(vertx, configuration, modules); - } - } else { - logger.fatal("Could not join cluster"); - } - }); - } - - public static void gracefulShutdown() { - Set deployIdSet = vertx.deploymentIDs(); - Logger logger1 = LogManager.getLogger(Deployer.class); - logger1.info("Shutting down the application"); - CountDownLatch latchVerticles = new CountDownLatch(deployIdSet.size()); - CountDownLatch latchCluster = new CountDownLatch(1); - CountDownLatch latchVertx = new CountDownLatch(1); - logger1.debug("number of verticles being undeployed are:" + deployIdSet.size()); - // shutdown verticles - for (String deploymentId : deployIdSet) { - vertx.undeploy( - deploymentId, - handler -> { - if (handler.succeeded()) { - logger1.debug(deploymentId + " verticle successfully Undeployed"); - latchVerticles.countDown(); - } else { - logger1.warn(deploymentId + "Undeploy failed!"); - } - }); - } - - try { - latchVerticles.await(5, TimeUnit.SECONDS); - logger1.info("All the verticles undeployed"); - Promise promise = Promise.promise(); - // leave the cluster - mgr.leave(promise); - logger1.info("vertx left cluster succesfully"); - } catch (Exception e) { - e.printStackTrace(); - } - - try { - latchCluster.await(5, TimeUnit.SECONDS); - // shutdown vertx - vertx.close( - handler -> { - if (handler.succeeded()) { - logger1.info("vertx closed succesfully"); - latchVertx.countDown(); - } else { - logger1.warn("Vertx didn't close properly, reason:" + handler.cause()); - } - }); - } catch (Exception e) { - e.printStackTrace(); - } - - try { - latchVertx.await(5, TimeUnit.SECONDS); - // then shut down log4j - if (LogManager.getContext() instanceof LoggerContext) { - logger1.debug("Shutting down log4j2"); - LogManager.shutdown(LogManager.getContext()); - } else { - logger1.warn("Unable to shutdown log4j2"); - } - } catch (Exception e) { - e.printStackTrace(); - } - } - - public static void main(String[] args) { - CLI cli = - CLI.create("IUDX Rs") - .setSummary("A CLI to deploy the acl-apd server") - .addOption( - new Option() - .setLongName("help") - .setShortName("h") - .setFlag(true) - .setDescription("display help")) - .addOption( - new Option() - .setLongName("config") - .setShortName("c") - .setRequired(true) - .setDescription("configuration file")) - .addOption( - new Option() - .setLongName("host") - .setShortName("i") - .setRequired(true) - .setDescription("public host")) - .addOption( - new TypedOption() - .setType(String.class) - .setLongName("modules") - .setShortName("m") - .setRequired(false) - .setDefaultValue("all") - .setParsedAsList(true) - .setDescription( - "comma separated list of verticle names to deploy. " - + "If omitted, or if `all` is passed, all verticles are deployed")); - - StringBuilder usageString = new StringBuilder(); - cli.usage(usageString); - CommandLine commandLine = cli.parse(Arrays.asList(args), false); - if (commandLine.isValid() && !commandLine.isFlagEnabled("help")) { - String configPath = commandLine.getOptionValue("config"); - String host = commandLine.getOptionValue("host"); - List passedModules = commandLine.getOptionValues("modules"); - List modules = passedModules.stream().distinct().collect(Collectors.toList()); - - /* `all` is also passed by default if no -m option given.*/ - if (modules.contains("all")) { - deploy(configPath, host, List.of()); - } else { - deploy(configPath, host, modules); - } - Runtime.getRuntime().addShutdownHook(new Thread(() -> gracefulShutdown())); - } else { - logger.info(usageString); - } - } -} \ No newline at end of file From 9fafc218835ac6252cbd943bbdd8b50b2c9c2e9c Mon Sep 17 00:00:00 2001 From: shreelakshmijoshi Date: Thu, 12 Sep 2024 16:35:06 +0530 Subject: [PATCH 5/5] Refactor: change filename --- pom.xml | 4 ++-- .../apd/acl/server/deploy/{DeployerDev.java => Deployer.java} | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) rename src/main/java/iudx/apd/acl/server/deploy/{DeployerDev.java => Deployer.java} (99%) diff --git a/pom.xml b/pom.xml index e07bee9..85a9a39 100644 --- a/pom.xml +++ b/pom.xml @@ -32,7 +32,7 @@ 5.10.2 1.19.7 UTF-8 - iudx.apd.acl.server.deploy.DeployerDev + iudx.apd.acl.server.deploy.Deployer @@ -543,7 +543,7 @@ acl-apd-server - iudx.apd.acl.server.deploy.DeployerDev + iudx.apd.acl.server.deploy.Deployer -jar {project.build.directory}/${project.artifactId}-dev-${project.version}-fat.jar diff --git a/src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java b/src/main/java/iudx/apd/acl/server/deploy/Deployer.java similarity index 99% rename from src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java rename to src/main/java/iudx/apd/acl/server/deploy/Deployer.java index 6069eac..2c19f84 100644 --- a/src/main/java/iudx/apd/acl/server/deploy/DeployerDev.java +++ b/src/main/java/iudx/apd/acl/server/deploy/Deployer.java @@ -57,8 +57,8 @@ * iudx.apd.acl.server.authenticator.JwtAuthenticationVerticle * ,iudx.apd.acl.server.apiserver.ApiServerVerticle */ -public class DeployerDev { - private static final Logger LOGGER = LogManager.getLogger(DeployerDev.class); +public class Deployer { + private static final Logger LOGGER = LogManager.getLogger(Deployer.class); private static Vertx vertxInstance; private static ClusterManager mgr;