From f3035a0640aeeaa0edceae428ce662e619587163 Mon Sep 17 00:00:00 2001
From: shantstepanian <17996546+shantstepanian@users.noreply.github.com>
Date: Wed, 4 Dec 2019 17:24:38 -0500
Subject: [PATCH] Preventing concurrent deployments against an environment
[#256] - see [#111] also
---
CHANGELOG.md | 23 +
.../com/gs/obevo/api/platform/AuditLock.java | 36 +
.../gs/obevo/api/platform/ChangeAuditDao.java | 3 +
.../java/com/gs/obevo/impl/MainDeployer.kt | 405 +++---
.../obevo/impl/changeauditdao/InMemLock.java | 35 +
.../IncrementalChangeTypeCommandCalculator.kt | 4 +-
.../FixedAbstractYAMLBasedConfiguration.java | 2 +-
.../db2/Db2ToInMemorySqlTranslator.java | 162 +--
.../obevo-db-postgresql/dbviewer.sh | 37 +
.../obevo-db-postgresql/getIpForDbviewer.sh | 29 +
.../platforms/postgresql/PostgreSqlLock.kt | 56 +
.../postgresql/PostgreSqlSqlExecutor.java | 99 +-
.../postgresql/PostgreSqlDeployerIT.java | 30 +-
.../gs/obevo/db/api/platform/SqlExecutor.java | 6 +
.../changeauditdao/NoOpChangeAuditDao.java | 121 +-
.../SameSchemaChangeAuditDao.java | 6 +
.../impl/platforms/AbstractSqlExecutor.java | 218 ++--
.../syntaxparser/UnparseVisitor.java | 1106 ++++++++---------
.../mongodb/impl/MongoDbChangeAuditDao.java | 7 +
.../src/site/markdown/app-versioning.md | 162 +++
.../src/site/markdown/dev-setup-sybase-ase.md | 2 +-
21 files changed, 1508 insertions(+), 1041 deletions(-)
create mode 100644 obevo-core/src/main/java/com/gs/obevo/api/platform/AuditLock.java
create mode 100644 obevo-core/src/main/java/com/gs/obevo/impl/changeauditdao/InMemLock.java
create mode 100755 obevo-db-impls/obevo-db-postgresql/dbviewer.sh
create mode 100755 obevo-db-impls/obevo-db-postgresql/getIpForDbviewer.sh
create mode 100644 obevo-db-impls/obevo-db-postgresql/src/main/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlLock.kt
create mode 100644 obevo-site/src/site/markdown/app-versioning.md
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cd51d767..5afeef00 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,9 +3,12 @@
## 8.0.0
### Functionality Improvements
+#111: Preventing concurrent deploys against a given schema
+
### Technical Improvements
Upgrading to JDK 8 bytecode
+
Upgrading to JGraphT 1.3.0 (first required JDK 8 dependency)
### Bug Fixes
@@ -16,14 +19,20 @@ Correcting error messages on graph cycles for complex databases
### Functionality Improvements
#239: MongoDB productionization: collection now treated as an incremental change type, reverse-engineering support, clean functionality built
+
#231 #233: Correct reverse-engineering of table indices to their correct tables
+
#232 #233: Support reverse-engineering of triggers
+
#231 #235: Removing redundant unique indexes for Oracle Primary Key reverse-engineering
+
#236: Support Character Set Encoding for Oracle Reverse Engineering
+
Allowing valid special characters (e.g. #) to be in object names, as certain RDBMS platforms also allow them
### Technical Improvements
DB2 build updated to use the new Docker Hub coordinates from IBM
+
#252: Optimize Images 610.60kb -> 448.30kb (26.58%) and fix typo
### Bug Fixes
@@ -34,7 +43,9 @@ DB2 build updated to use the new Docker Hub coordinates from IBM
### Functionality Improvements
#182: Adding Hibernate reverse-engineering API. See [ORM Integration docs](https://goldmansachs.github.io/obevo/orm-integration.html) for more details.
+
#221 #223 #225: Oracle reverse-engineering improvements - unicode characters, nested tables, types, comments
+
#228: PostgreSQL improvements for kata - reverse-engineering, in-memory databases
### Technical Improvements
@@ -53,6 +64,7 @@ DB2 build updated to use the new Docker Hub coordinates from IBM
### Bug Fixes
#212: Fixing inability to handle DB2 reorg exceptions during static data queries. Previously, reorg detection only worked on update statements
+
#210 #213: Oracle - ignoring DATABASE LINKs during reverse-engineering, instead of erroring out. Separate ticket #186 is there for DATABASE LINK and materialized view support
@@ -60,13 +72,18 @@ DB2 build updated to use the new Docker Hub coordinates from IBM
### Functionality Improvements
#199: Adding support for PostgreSQL roles and extensions in the environment setup step
+
#202: Add option to export graph representation to a file
+
#196: Adding UUID support for CSV data loads for PostgreSQL
+
Initial MySQL support (still in Alpha)
### Technical Improvements
Moving more of the code over to Kotlin
+
#153: Refactor of dependency implementation
+
#193: Docker onboarding for testing
### Bug Fixes
@@ -82,6 +99,7 @@ Moving more of the code over to Kotlin
### Bug Fixes
#188: Correcting the metadata retrieval for ASE and PostgreSQL
+
#184: Documentation cleanups
@@ -92,13 +110,18 @@ Moving more of the code over to Kotlin
### Technical Improvements
#173: Support YAML/JSON configurations and move away from type safe config towards commons-config v2
+
#175: Removing retrolambda, moving back to Java 7, and initial support for Kotlin
+
#150: Documentation updates
### Bug Fixes
#125: Clarify error messages when reading merge config file (missing input dirs, forward-slashes specified)
+
#165: Supporting Unicode in regular table/db object files and avoiding "TokenMgrError: Lexical error at line ..." issues
+
#169: Fixing missing quotes in deploy.sh/bat files in obevo-cli; otherwise, spaces in JAVA_HOME or OBEVO_HOME were not supported.
+
#166: Clearer error message if an invalid platform type is specified in config
diff --git a/obevo-core/src/main/java/com/gs/obevo/api/platform/AuditLock.java b/obevo-core/src/main/java/com/gs/obevo/api/platform/AuditLock.java
new file mode 100644
index 00000000..93113496
--- /dev/null
+++ b/obevo-core/src/main/java/com/gs/obevo/api/platform/AuditLock.java
@@ -0,0 +1,36 @@
+/**
+ * Copyright 2017 Goldman Sachs.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.gs.obevo.api.platform;
+
+import com.gs.obevo.api.appdata.PhysicalSchema;
+
+/**
+ * Component to ensure that only a single client can invoke a deploy on an Obevo environment.
+ * This component is mainly called from MainDeployer.
+ * As of today, the lock is environment wide (i.e. not per {@link PhysicalSchema}); this may be refactored in the future.
+ */
+public interface AuditLock {
+ /**
+ * Acquire a lock on the environment.
+ */
+ void lock();
+
+ /**
+ * Release the lock on the environment. Okay to throw exceptions here, as MainDeployer will handle
+ * and ignore exceptions when calling this.
+ */
+ void unlock();
+}
diff --git a/obevo-core/src/main/java/com/gs/obevo/api/platform/ChangeAuditDao.java b/obevo-core/src/main/java/com/gs/obevo/api/platform/ChangeAuditDao.java
index b24d422a..56016578 100644
--- a/obevo-core/src/main/java/com/gs/obevo/api/platform/ChangeAuditDao.java
+++ b/obevo-core/src/main/java/com/gs/obevo/api/platform/ChangeAuditDao.java
@@ -18,6 +18,7 @@
import com.gs.obevo.api.appdata.Change;
import com.gs.obevo.api.appdata.DeployExecution;
import org.eclipse.collections.api.list.ImmutableList;
+import org.jetbrains.annotations.NotNull;
/**
* Interface to access the audit table for a given environment.
@@ -66,4 +67,6 @@ public interface ChangeAuditDao {
* Removes all changes related to the incoming changed object based on the {@link Change#getObjectKey()}.
*/
void deleteObjectChanges(Change change);
+
+ @NotNull AuditLock acquireLock();
}
diff --git a/obevo-core/src/main/java/com/gs/obevo/impl/MainDeployer.kt b/obevo-core/src/main/java/com/gs/obevo/impl/MainDeployer.kt
index a047124d..abbfc998 100644
--- a/obevo-core/src/main/java/com/gs/obevo/impl/MainDeployer.kt
+++ b/obevo-core/src/main/java/com/gs/obevo/impl/MainDeployer.kt
@@ -53,7 +53,17 @@ import org.eclipse.collections.impl.block.factory.Predicates
import org.eclipse.collections.impl.block.factory.StringFunctions
import org.eclipse.collections.impl.factory.Lists
import org.eclipse.collections.impl.factory.Sets
+import org.jgrapht.Graph
+import org.jgrapht.graph.DefaultEdge
+import org.jgrapht.io.ComponentNameProvider
+import org.jgrapht.io.DOTExporter
+import org.jgrapht.io.GmlExporter
+import org.jgrapht.io.GraphMLExporter
+import org.jgrapht.io.IntegerComponentNameProvider
+import org.jgrapht.io.MatrixExporter
import org.slf4j.LoggerFactory
+import java.io.FileWriter
+import java.io.Writer
import java.sql.Timestamp
import java.util.Date
import java.util.concurrent.TimeUnit
@@ -132,239 +142,252 @@ class MainDeployer
>(
val deployStrategy = getDeployMode(deployerArgs)
- val deployedChanges = readDeployedChanges(deployerArgs)
- mainInputReader.logChanges("deployed", deployedChanges)
-
- // TODO ensure that we've handled the split between static data and others properly
- val changeInputSetMap = this.textDependencyExtractor.calculateDependencies(
- changeInputs.filter { it.changeKey.changeType.isEnrichableForDependenciesInText }
- )
-
- val newChangeInputSetMap = mutableMapOf>()
- val packageChanges = changeInputs.filter { it.objectKey.changeType.name == ChangeType.PACKAGE_STR || it.objectKey.changeType.name == ChangeType.PACKAGE_BODY }
- .map { it.objectKey.objectName }.toSet()
- changeInputSetMap.onEach { entry ->
- val change = entry.key
- val dependencies = entry.value
- if (change.objectKey.changeType.name == ChangeType.PACKAGE_BODY) {
- newChangeInputSetMap.put(change, dependencies.filterNot { packageChanges.contains(it.target) }.toSet())
- } else {
- newChangeInputSetMap.put(change, dependencies)
- }
- }
+ val lock = artifactDeployerDao.acquireLock()
+ try {
+ LOG.info("Attempting to acquire deploy lock")
+ lock.lock()
+ LOG.info("Deploy lock acquired")
- var sourceChanges = changeInputs.collect { input ->
- val change: Change
- if (input.isRerunnable) {
- change = ChangeRerunnable(input.changeKey, input.contentHash, input.content)
- } else {
- change = ChangeIncremental(
- input.changeKey,
- input.orderWithinObject,
- input.contentHash,
- input.content,
- input.rollbackIfAlreadyDeployedContent,
- input.isActive
- )
- change.isDrop = input.isDrop
- change.baselinedChanges = input.baselinedChanges
- change.parallelGroup = input.parallelGroup
- change.isKeepIncrementalOrder = input.isKeepIncrementalOrder
- }
+ val deployedChanges = readDeployedChanges(deployerArgs)
+ mainInputReader.logChanges("deployed", deployedChanges)
- change.metadataSection = input.metadataSection
+ // TODO ensure that we've handled the split between static data and others properly
+ val changeInputSetMap = this.textDependencyExtractor.calculateDependencies(
+ changeInputs.filter { it.changeKey.changeType.isEnrichableForDependenciesInText }
+ )
- // TODO should not need separate converted*Content fields in Change. Should only be in ChangeInput - see GITHUB#191
- change.convertedContent = input.convertedContent
- change.rollbackContent = input.rollbackContent
- change.convertedRollbackContent = input.convertedRollbackContent
+ val newChangeInputSetMap = mutableMapOf>()
+ val packageChanges = changeInputs.filter { it.objectKey.changeType.name == ChangeType.PACKAGE_STR || it.objectKey.changeType.name == ChangeType.PACKAGE_BODY }
+ .map { it.objectKey.objectName }.toSet()
+ changeInputSetMap.onEach { entry ->
+ val change = entry.key
+ val dependencies = entry.value
+ if (change.objectKey.changeType.name == ChangeType.PACKAGE_BODY) {
+ newChangeInputSetMap.put(change, dependencies.filterNot { packageChanges.contains(it.target) }.toSet())
+ } else {
+ newChangeInputSetMap.put(change, dependencies)
+ }
+ }
- change.changeInput = input
- change.orderWithinObject = input.orderWithinObject
+ var sourceChanges = changeInputs.collect { input ->
+ val change: Change
+ if (input.isRerunnable) {
+ change = ChangeRerunnable(input.changeKey, input.contentHash, input.content)
+ } else {
+ change = ChangeIncremental(
+ input.changeKey,
+ input.orderWithinObject,
+ input.contentHash,
+ input.content,
+ input.rollbackIfAlreadyDeployedContent,
+ input.isActive
+ )
+ change.isDrop = input.isDrop
+ change.baselinedChanges = input.baselinedChanges
+ change.parallelGroup = input.parallelGroup
+ change.isKeepIncrementalOrder = input.isKeepIncrementalOrder
+ }
- change.order = input.order
- change.applyGrants = input.applyGrants
- change.changeset = input.changeset
+ change.metadataSection = input.metadataSection
- change.codeDependencies = Sets.immutable.withAll(
- newChangeInputSetMap.get(input) // option 1 - use the inputs extracted from the next if possible
- ?: input.codeDependencies // option 2 - use the pre-populated codeDependencies value
- ?: emptySet() // fallback - default to empty set
- )
+ // TODO should not need separate converted*Content fields in Change. Should only be in ChangeInput - see GITHUB#191
+ change.convertedContent = input.convertedContent
+ change.rollbackContent = input.rollbackContent
+ change.convertedRollbackContent = input.convertedRollbackContent
- change.dropContent = input.dropContent
- change.permissionScheme = input.permissionScheme
+ change.changeInput = input
+ change.orderWithinObject = input.orderWithinObject
- return@collect change
- }
+ change.order = input.order
+ change.applyGrants = input.applyGrants
+ change.changeset = input.changeset
- // add rollback scripts here
+ change.codeDependencies = Sets.immutable.withAll(
+ newChangeInputSetMap.get(input) // option 1 - use the inputs extracted from the next if possible
+ ?: input.codeDependencies // option 2 - use the pre-populated codeDependencies value
+ ?: emptySet() // fallback - default to empty set
+ )
- val changePairs = ChangesetCreator.getChangePairs(deployedChanges, sourceChanges)
+ change.dropContent = input.dropContent
+ change.permissionScheme = input.permissionScheme
- if (deployerArgs.isRollback) {
- // Add back rollback changes to the sourceList so that they can take part in the change calculation
- val rollbacksToAddBack = changePairs
- .filter { !it.changeKey.changeType.isRerunnable && it.sourceChange == null && it.deployedChange != null }
- .map { it.deployedChange as ChangeIncremental }
+ return@collect change
+ }
- rollbacksToAddBack.forEach { it.isRollbackActivated = true }
+ // add rollback scripts here
- sourceChanges = sourceChanges.newWithAll(rollbacksToAddBack)
- }
+ val changePairs = ChangesetCreator.getChangePairs(deployedChanges, sourceChanges)
- // TODO refactor into separate method
- if (env.platform.isDropOrderRequired) {
- // In this block, we set the "dependentChanges" field on the drop objects to ensure they can be sorted for dependencies later on
- val dropsToEnrich = changePairs
- .filter { it.changeKey.changeType.isRerunnable && it.sourceChange == null && it.deployedChange != null }
- .map { it.deployedChange!! }
+ if (deployerArgs.isRollback) {
+ // Add back rollback changes to the sourceList so that they can take part in the change calculation
+ val rollbacksToAddBack = changePairs
+ .filter { !it.changeKey.changeType.isRerunnable && it.sourceChange == null && it.deployedChange != null }
+ .map { it.deployedChange as ChangeIncremental }
- val dropsByObjectName = dropsToEnrich.associateBy { env.platform.convertDbObjectName().valueOf(it.objectName) }
+ rollbacksToAddBack.forEach { it.isRollbackActivated = true }
- val dropsForTextProcessing = dropsToEnrich.map { drop ->
- val sql = changeTypeBehaviorRegistry.getChangeTypeBehavior(drop.changeType).getDefinitionFromEnvironment(drop);
- LOG.debug("Found the sql from the DB for dropping: {}", sql)
- TextDependencyExtractableImpl(drop.objectName, sql ?: "", drop)
+ sourceChanges = sourceChanges.newWithAll(rollbacksToAddBack)
}
- val dropDependencies = this.textDependencyExtractor.calculateDependencies(dropsForTextProcessing)
+ // TODO refactor into separate method
+ if (env.platform.isDropOrderRequired) {
+ // In this block, we set the "dependentChanges" field on the drop objects to ensure they can be sorted for dependencies later on
+ val dropsToEnrich = changePairs
+ .filter { it.changeKey.changeType.isRerunnable && it.sourceChange == null && it.deployedChange != null }
+ .map { it.deployedChange!! }
- dropsForTextProcessing.forEach { it.codeDependencies = Sets.immutable.ofAll(dropDependencies.get(it)) }
+ val dropsByObjectName = dropsToEnrich.associateBy { env.platform.convertDbObjectName().valueOf(it.objectName) }
- for (drop in dropsForTextProcessing) {
- drop.codeDependencies?.let { deps ->
- if (deps.notEmpty()) {
- drop.payload.dependentChanges = Sets.immutable.ofAll(deps.map { dropsByObjectName[it.target] })
- }
+ val dropsForTextProcessing = dropsToEnrich.map { drop ->
+ val sql = changeTypeBehaviorRegistry.getChangeTypeBehavior(drop.changeType).getDefinitionFromEnvironment(drop);
+ LOG.debug("Found the sql from the DB for dropping: {}", sql)
+ TextDependencyExtractableImpl(drop.objectName, sql ?: "", drop)
}
- }
- }
+ val dropDependencies = this.textDependencyExtractor.calculateDependencies(dropsForTextProcessing)
- val dependencyGraph = graphEnricher.createDependencyGraph(sourceChanges, deployerArgs.isRollback)
+ dropsForTextProcessing.forEach { it.codeDependencies = Sets.immutable.ofAll(dropDependencies.get(it)) }
- deployerArgs.sourceGraphExportFile?.let { sourceGraphOutputFile ->
- val exporterFormat = deployerArgs.sourceGraphExportFormat ?: GraphExportFormat.DOT
- // TODO undo this change
-// val exporterFunc = getExporterFunc(exporterFormat)
-// FileWriter(sourceGraphOutputFile).use { exporterFunc(it, dependencyGraph) }
- }
-
- sourceChanges.each { it.dependentChanges = Sets.immutable.ofAll(GraphUtil.getDependencyNodes(dependencyGraph, it)) }
+ for (drop in dropsForTextProcessing) {
+ drop.codeDependencies?.let { deps ->
+ if (deps.notEmpty()) {
+ drop.payload.dependentChanges = Sets.immutable.ofAll(deps.map { dropsByObjectName[it.target] })
+ }
+ }
+ }
+ }
- val artifactsToProcess = changesetCreator.determineChangeset(changePairs, sourceChanges, deployStrategy.isInitAllowedOnHashExceptions)
- .applyDeferredPredicate(deployerArgs.changesetPredicate)
- validatePriorToDeployment(env, deployStrategy, sourceChanges, deployedChanges, artifactsToProcess)
- deployerPlugin.validatePriorToDeployment(env, deployStrategy, sourceChanges, deployedChanges, artifactsToProcess)
+ val dependencyGraph = graphEnricher.createDependencyGraph(sourceChanges, deployerArgs.isRollback)
- if (this.shouldProceedWithDbChange(artifactsToProcess, deployerArgs)) {
- for (schema in env.physicalSchemas) {
- deployerPlugin.initializeSchema(env, schema)
+ deployerArgs.sourceGraphExportFile?.let { sourceGraphOutputFile ->
+ val exporterFormat = deployerArgs.sourceGraphExportFormat ?: GraphExportFormat.DOT
+ // TODO undo this change
+ val exporterFunc = getExporterFunc(exporterFormat)
+ FileWriter(sourceGraphOutputFile).use { exporterFunc(it, dependencyGraph) }
}
- // Note - only init the audit table if we actually proceed w/ a deploy
- this.deployExecutionDao.init()
- this.artifactDeployerDao.init()
-
- val executionsBySchema = env.schemas.associateBy({it.name}, { schema ->
- val deployExecution = DeployExecutionImpl(
- deployerArgs.deployRequesterId,
- credential.username,
- schema.name,
- PlatformConfiguration.getInstance().toolVersion,
- Timestamp(Date().time),
- deployerArgs.isPerformInitOnly,
- deployerArgs.isRollback,
- deployerArgs.productVersion,
- deployerArgs.reason,
- deployerArgs.deployExecutionAttributes
- )
- deployExecution.status = DeployExecutionStatus.IN_PROGRESS
- deployExecutionDao.persistNew(deployExecution, env.getPhysicalSchema(schema.name))
- deployExecution
- })
-
- // If there are no deployments required, then just update the artifact tables and return
- if (!artifactsToProcess.isDeploymentNeeded) {
- LOG.info("No changes detected in the database deployment. Updating Deploy Status")
- executionsBySchema.values.forEach { deployExecution ->
- deployExecution.status = DeployExecutionStatus.SUCCEEDED
- this.deployExecutionDao.update(deployExecution)
+ sourceChanges.each { it.dependentChanges = Sets.immutable.ofAll(GraphUtil.getDependencyNodes(dependencyGraph, it)) }
+
+ val artifactsToProcess = changesetCreator.determineChangeset(changePairs, sourceChanges, deployStrategy.isInitAllowedOnHashExceptions)
+ .applyDeferredPredicate(deployerArgs.changesetPredicate)
+
+ validatePriorToDeployment(env, deployStrategy, sourceChanges, deployedChanges, artifactsToProcess)
+ deployerPlugin.validatePriorToDeployment(env, deployStrategy, sourceChanges, deployedChanges, artifactsToProcess)
+
+ if (this.shouldProceedWithDbChange(artifactsToProcess, deployerArgs)) {
+ env.physicalSchemas.forEach { deployerPlugin.initializeSchema(env, it) }
+
+ // Note - only init the audit table if we actually proceed w/ a deploy
+ this.deployExecutionDao.init()
+ this.artifactDeployerDao.init()
+
+ val executionsBySchema = env.schemas.associateBy({ it.name }, { schema ->
+ val deployExecution = DeployExecutionImpl(
+ deployerArgs.deployRequesterId,
+ credential.username,
+ schema.name,
+ PlatformConfiguration.getInstance().toolVersion,
+ Timestamp(Date().time),
+ deployerArgs.isPerformInitOnly,
+ deployerArgs.isRollback,
+ deployerArgs.productVersion,
+ deployerArgs.reason,
+ deployerArgs.deployExecutionAttributes
+ )
+ deployExecution.status = DeployExecutionStatus.IN_PROGRESS
+ deployExecutionDao.persistNew(deployExecution, env.getPhysicalSchema(schema.name))
+ deployExecution
+ })
+
+ // If there are no deployments required, then just update the artifact tables and return
+ if (!artifactsToProcess.isDeploymentNeeded) {
+ LOG.info("No changes detected in the database deployment. Updating Deploy Status")
+ executionsBySchema.values.forEach { deployExecution ->
+ deployExecution.status = DeployExecutionStatus.SUCCEEDED
+ this.deployExecutionDao.update(deployExecution)
+ }
+ return
}
- return
- }
-
- val action = if (deployerArgs.isRollback) "Rollback" else "Deployment"
- var mainDeploymentSuccess = false
- val cec = CommandExecutionContext()
- try {
- this.doExecute(artifactsToProcess, deployStrategy, onboardingStrategy, executionsBySchema, cec)
- LOG.info("$action has Completed Successfully!")
- executionsBySchema.values.forEach { deployExecution ->
- deployExecution.status = DeployExecutionStatus.SUCCEEDED
- this.deployExecutionDao.update(deployExecution)
- }
+ val action = if (deployerArgs.isRollback) "Rollback" else "Deployment"
- mainDeploymentSuccess = true
- } catch (exc: RuntimeException) {
- LOG.info("$action has Failed. We will error out, but first complete the post-deploy step")
- executionsBySchema.values.forEach { deployExecution ->
- deployExecution.status = DeployExecutionStatus.FAILED
- this.deployExecutionDao.update(deployExecution)
- }
- throw exc
- } finally {
- LOG.info("Executing the post-deploy step")
+ var mainDeploymentSuccess = false
+ val cec = CommandExecutionContext()
try {
- deployerPlugin.doPostDeployAction(env, sourceChanges)
- this.postDeployAction.value(env)
+ this.doExecute(artifactsToProcess, deployStrategy, onboardingStrategy, executionsBySchema, cec)
+ LOG.info("$action has Completed Successfully!")
+ executionsBySchema.values.forEach { deployExecution ->
+ deployExecution.status = DeployExecutionStatus.SUCCEEDED
+ this.deployExecutionDao.update(deployExecution)
+ }
+
+ mainDeploymentSuccess = true
} catch (exc: RuntimeException) {
- if (mainDeploymentSuccess) {
- LOG.info("Exception found in the post-deploy step", exc)
- throw exc
- } else {
- LOG.error("Exception found in the post-deploy step; printing it out here, but there was an exception during the regular deploy as well", exc)
+ LOG.info("$action has Failed. We will error out, but first complete the post-deploy step")
+ executionsBySchema.values.forEach { deployExecution ->
+ deployExecution.status = DeployExecutionStatus.FAILED
+ this.deployExecutionDao.update(deployExecution)
+ }
+ throw exc
+ } finally {
+ LOG.info("Executing the post-deploy step")
+ try {
+ deployerPlugin.doPostDeployAction(env, sourceChanges)
+ this.postDeployAction.value(env)
+ } catch (exc: RuntimeException) {
+ if (mainDeploymentSuccess) {
+ LOG.info("Exception found in the post-deploy step", exc)
+ throw exc
+ } else {
+ LOG.error("Exception found in the post-deploy step; printing it out here, but there was an exception during the regular deploy as well", exc)
+ }
}
- }
- LOG.info("Post-deploy step completed")
+ LOG.info("Post-deploy step completed")
- val warnings = cec.warnings
- if (warnings.notEmpty()) {
- LOG.info("")
- LOG.info("Summary of warnings from this deployment; please address:\n{}", warnings.collect(StringFunctions.prepend(" ")).makeString("\n"))
- }
+ val warnings = cec.warnings
+ if (warnings.notEmpty()) {
+ LOG.info("")
+ LOG.info("Summary of warnings from this deployment; please address:\n{}", warnings.collect(StringFunctions.prepend(" ")).makeString("\n"))
+ }
- LOG.info("Deploy complete!")
+ LOG.info("Deploy complete!")
+ }
+ }
+ } finally {
+ LOG.info("Attempting to release deploy lock")
+ try {
+ lock.unlock()
+ LOG.info("Deploy lock released")
+ } catch (_: Exception) {
+ LOG.info("Deploy lock release failed; ignoring exception")
}
}
}
-// private fun getExporterFunc(exporterFormat: Enum): (Writer, Graph) -> Unit {
-// val vertexNameProvider : ComponentNameProvider = ComponentNameProvider {
-// change : Change -> change.objectName + "." + change.changeName
-// }
-//
-// // TODO Temporary - undo this change!
-// when (exporterFormat) {
-// GraphExportFormat.DOT -> return { writer: Writer, graph: Graph ->
-// DOTExporter(IntegerComponentNameProvider(), vertexNameProvider, null).export(writer, graph)
-// }
-// GraphExportFormat.GML -> return { writer: Writer, graph: Graph ->
-// GmlExporter(IntegerComponentNameProvider(), vertexNameProvider, IntegerEdgeNameProvider(), null).export(writer, graph)
-// }
-// GraphExportFormat.GRAPHML -> return { writer: Writer, graph: Graph ->
-// GraphMLExporter(IntegerComponentNameProvider(), vertexNameProvider, IntegerEdgeNameProvider(), null).export(writer, graph)
-// }
-// GraphExportFormat.MATRIX -> return { writer: Writer, graph: Graph ->
-// MatrixExporter().exportAdjacencyMatrix(writer, graph)
-// }
-// else -> throw IllegalArgumentException("Export Format $exporterFormat is not supported here")
-// }
-// }
+ private fun getExporterFunc(exporterFormat: Enum): (Writer, Graph) -> Unit {
+ val vertexNameProvider : ComponentNameProvider = ComponentNameProvider {
+ change : Change -> change.objectName + "." + change.changeName
+ }
+
+ // TODO Temporary - undo this change!
+ when (exporterFormat) {
+ GraphExportFormat.DOT -> return { writer: Writer, graph: Graph ->
+ DOTExporter(IntegerComponentNameProvider(), vertexNameProvider, null).exportGraph(graph, writer)
+ }
+ GraphExportFormat.GML -> return { writer: Writer, graph: Graph ->
+ GmlExporter(IntegerComponentNameProvider(), vertexNameProvider, IntegerComponentNameProvider(), null).exportGraph(graph, writer)
+ }
+ GraphExportFormat.GRAPHML -> return { writer: Writer, graph: Graph ->
+ GraphMLExporter(IntegerComponentNameProvider(), vertexNameProvider, IntegerComponentNameProvider(), null).exportGraph(graph, writer)
+ }
+ GraphExportFormat.MATRIX -> return { writer: Writer, graph: Graph ->
+ MatrixExporter().exportGraph(graph, writer)
+ }
+ else -> throw IllegalArgumentException("Export Format $exporterFormat is not supported here")
+ }
+ }
private fun logArgumentMetrics(deployerArgs: MainDeployerArgs) {
deployMetricsCollector.addMetric("args.onboardingMode", deployerArgs.isOnboardingMode)
diff --git a/obevo-core/src/main/java/com/gs/obevo/impl/changeauditdao/InMemLock.java b/obevo-core/src/main/java/com/gs/obevo/impl/changeauditdao/InMemLock.java
new file mode 100644
index 00000000..061eb660
--- /dev/null
+++ b/obevo-core/src/main/java/com/gs/obevo/impl/changeauditdao/InMemLock.java
@@ -0,0 +1,35 @@
+/**
+ * Copyright 2017 Goldman Sachs.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.gs.obevo.impl.changeauditdao;
+
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import com.gs.obevo.api.platform.AuditLock;
+
+public class InMemLock implements AuditLock {
+ private final Lock lock = new ReentrantLock();
+
+ @Override
+ public void lock() {
+ lock.lock();
+ }
+
+ @Override
+ public void unlock() {
+ lock.unlock();
+ }
+}
diff --git a/obevo-core/src/main/java/com/gs/obevo/impl/changetypes/IncrementalChangeTypeCommandCalculator.kt b/obevo-core/src/main/java/com/gs/obevo/impl/changetypes/IncrementalChangeTypeCommandCalculator.kt
index ecc50e92..b16a79bf 100644
--- a/obevo-core/src/main/java/com/gs/obevo/impl/changetypes/IncrementalChangeTypeCommandCalculator.kt
+++ b/obevo-core/src/main/java/com/gs/obevo/impl/changetypes/IncrementalChangeTypeCommandCalculator.kt
@@ -148,7 +148,7 @@ class IncrementalChangeTypeCommandCalculator internal constructor(private val nu
}
} else {
if (initAllowedOnHashExceptions) {
- // SHANT handle init exceptions
+ // TODO handle init exceptions
changeset.add(changeCommandFactory.createUpdateAuditTableOnly(source, "initOnly"))
} else {
changeset.add(HashMismatchWarning(source, deployed))
@@ -244,7 +244,7 @@ class IncrementalChangeTypeCommandCalculator internal constructor(private val nu
for (baseline in baselinedDrops) {
if (!successfulBaselinedChanges.contains(baseline)) {
- // SHANT do the baseline check here (collect changes that need to be cleared out)
+ // TODO do the baseline check here (collect changes that need to be cleared out)
changeset.add(changeCommandFactory.createImproperlyRemovedWarning(baseline))
}
}
diff --git a/obevo-core/src/main/java/org/apache/commons/configuration2/FixedAbstractYAMLBasedConfiguration.java b/obevo-core/src/main/java/org/apache/commons/configuration2/FixedAbstractYAMLBasedConfiguration.java
index 2bfcbad0..5f4347cd 100644
--- a/obevo-core/src/main/java/org/apache/commons/configuration2/FixedAbstractYAMLBasedConfiguration.java
+++ b/obevo-core/src/main/java/org/apache/commons/configuration2/FixedAbstractYAMLBasedConfiguration.java
@@ -123,7 +123,7 @@ private ImmutableNode constructHierarchy(ImmutableNode.Builder parent,
constructHierarchy(subtree, (Map) value);
parent.addChild(children);
}
- // Shant added this fix for the Collection block
+ // DEVELOPER NOTE - this is the sectino modified by the Obevo developers to add the Collection block
else if (value instanceof Collection)
{
boolean areAllChildConfigs = true;
diff --git a/obevo-db-impls/obevo-db-db2/src/main/java/com/gs/obevo/db/impl/platforms/db2/Db2ToInMemorySqlTranslator.java b/obevo-db-impls/obevo-db-db2/src/main/java/com/gs/obevo/db/impl/platforms/db2/Db2ToInMemorySqlTranslator.java
index 31a3af87..c5c7b7ba 100644
--- a/obevo-db-impls/obevo-db-db2/src/main/java/com/gs/obevo/db/impl/platforms/db2/Db2ToInMemorySqlTranslator.java
+++ b/obevo-db-impls/obevo-db-db2/src/main/java/com/gs/obevo/db/impl/platforms/db2/Db2ToInMemorySqlTranslator.java
@@ -1,81 +1,81 @@
-/**
- * Copyright 2017 Goldman Sachs.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package com.gs.obevo.db.impl.platforms.db2;
-
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.gs.obevo.api.appdata.ChangeInput;
-import com.gs.obevo.db.impl.platforms.sqltranslator.PostColumnSqlTranslator;
-import com.gs.obevo.db.impl.platforms.sqltranslator.PostParsedSqlTranslator;
-import com.gs.obevo.db.impl.platforms.sqltranslator.UnparsedSqlTranslator;
-import com.gs.obevo.db.sqlparser.syntaxparser.CreateTable;
-import com.gs.obevo.db.sqlparser.syntaxparser.CreateTableColumn;
-import org.eclipse.collections.api.list.ImmutableList;
-import org.eclipse.collections.impl.factory.Lists;
-
-public class Db2ToInMemorySqlTranslator implements PostColumnSqlTranslator, PostParsedSqlTranslator, UnparsedSqlTranslator {
- private final Pattern defaultPattern = Pattern.compile("(?i)((?:not\\s+)?null)\\s+default\\s+(.*)");
-
- // SHANT add test cases for this in the integration test
- // these are allowable by db2 (i.e. to use dots instead of colons), but HSQL does not
- public static final ImmutableList ACCEPTED_DATE_FORMATS = Lists.immutable.with(
- "yyyy-MM-dd-HH.mm.ss.SSS",
- "yyyy-MM-dd HH.mm.ss.SSS"
- );
-
- static final Pattern identityPattern =
- Pattern.compile("(?i)\\bgenerated\\s+(.*)as\\s+identity\\s*(\\(.*\\))?");
-
- private final Pattern loggedPattern =
- Pattern.compile("(?i)(not\\s+)?\\blogged\\b");
- private final Pattern compactPattern =
- Pattern.compile("(?i)(not\\s+)?\\bcompact\\b");
-
- @Override
- public String handlePostColumnText(String postColumnText, CreateTableColumn column, CreateTable table) {
- // default clause seems to require a reversal in HSQL - only for DB2?
- Matcher defaultMatcher = this.defaultPattern.matcher(postColumnText);
- while (defaultMatcher.find()) {
- String nullClause = defaultMatcher.group(1);
- String defaultClause = defaultMatcher.group(2);
- postColumnText = defaultMatcher.replaceFirst("DEFAULT " + defaultClause + " " + nullClause);
- defaultMatcher = this.defaultPattern.matcher(postColumnText);
- }
-
- Matcher loggedMatcher = this.loggedPattern.matcher(postColumnText);
- if (loggedMatcher.find()) {
- postColumnText = loggedMatcher.replaceAll(" ");
- }
- Matcher compactMatcher = this.compactPattern.matcher(postColumnText);
- if (compactMatcher.find()) {
- postColumnText = compactMatcher.replaceAll(" ");
- }
-
- return postColumnText;
- }
-
- @Override
- public String handleAnySqlPostTranslation(String string, ChangeInput change) {
- return string.replaceAll("(?i)current\\s+timestamp", "current_timestamp");
- }
-
- @Override
- public String handleRawFullSql(String string, ChangeInput change) {
- // filter out specific db2 system calls like reorg
- return string.replaceAll("(?i)CALL\\s+SYSPROC.*", "");
- }
-}
+/**
+ * Copyright 2017 Goldman Sachs.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.gs.obevo.db.impl.platforms.db2;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.gs.obevo.api.appdata.ChangeInput;
+import com.gs.obevo.db.impl.platforms.sqltranslator.PostColumnSqlTranslator;
+import com.gs.obevo.db.impl.platforms.sqltranslator.PostParsedSqlTranslator;
+import com.gs.obevo.db.impl.platforms.sqltranslator.UnparsedSqlTranslator;
+import com.gs.obevo.db.sqlparser.syntaxparser.CreateTable;
+import com.gs.obevo.db.sqlparser.syntaxparser.CreateTableColumn;
+import org.eclipse.collections.api.list.ImmutableList;
+import org.eclipse.collections.impl.factory.Lists;
+
+public class Db2ToInMemorySqlTranslator implements PostColumnSqlTranslator, PostParsedSqlTranslator, UnparsedSqlTranslator {
+ private final Pattern defaultPattern = Pattern.compile("(?i)((?:not\\s+)?null)\\s+default\\s+(.*)");
+
+ // TODO add test cases for this in the integration test
+ // these are allowable by db2 (i.e. to use dots instead of colons), but HSQL does not
+ public static final ImmutableList ACCEPTED_DATE_FORMATS = Lists.immutable.with(
+ "yyyy-MM-dd-HH.mm.ss.SSS",
+ "yyyy-MM-dd HH.mm.ss.SSS"
+ );
+
+ static final Pattern identityPattern =
+ Pattern.compile("(?i)\\bgenerated\\s+(.*)as\\s+identity\\s*(\\(.*\\))?");
+
+ private final Pattern loggedPattern =
+ Pattern.compile("(?i)(not\\s+)?\\blogged\\b");
+ private final Pattern compactPattern =
+ Pattern.compile("(?i)(not\\s+)?\\bcompact\\b");
+
+ @Override
+ public String handlePostColumnText(String postColumnText, CreateTableColumn column, CreateTable table) {
+ // default clause seems to require a reversal in HSQL - only for DB2?
+ Matcher defaultMatcher = this.defaultPattern.matcher(postColumnText);
+ while (defaultMatcher.find()) {
+ String nullClause = defaultMatcher.group(1);
+ String defaultClause = defaultMatcher.group(2);
+ postColumnText = defaultMatcher.replaceFirst("DEFAULT " + defaultClause + " " + nullClause);
+ defaultMatcher = this.defaultPattern.matcher(postColumnText);
+ }
+
+ Matcher loggedMatcher = this.loggedPattern.matcher(postColumnText);
+ if (loggedMatcher.find()) {
+ postColumnText = loggedMatcher.replaceAll(" ");
+ }
+ Matcher compactMatcher = this.compactPattern.matcher(postColumnText);
+ if (compactMatcher.find()) {
+ postColumnText = compactMatcher.replaceAll(" ");
+ }
+
+ return postColumnText;
+ }
+
+ @Override
+ public String handleAnySqlPostTranslation(String string, ChangeInput change) {
+ return string.replaceAll("(?i)current\\s+timestamp", "current_timestamp");
+ }
+
+ @Override
+ public String handleRawFullSql(String string, ChangeInput change) {
+ // filter out specific db2 system calls like reorg
+ return string.replaceAll("(?i)CALL\\s+SYSPROC.*", "");
+ }
+}
diff --git a/obevo-db-impls/obevo-db-postgresql/dbviewer.sh b/obevo-db-impls/obevo-db-postgresql/dbviewer.sh
new file mode 100755
index 00000000..bd8a46f5
--- /dev/null
+++ b/obevo-db-impls/obevo-db-postgresql/dbviewer.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+#
+# Copyright 2017 Goldman Sachs.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+CONTAINER_NAME=obevo-pgadmin-instance
+
+RUNNING_CONTAINER_ID=$(docker ps -aqf "name=$CONTAINER_NAME")
+if [[ ! -z "$RUNNING_CONTAINER_ID" ]]
+then
+ echo "Shutting down old container"
+ docker stop $RUNNING_CONTAINER_ID
+ docker rm $RUNNING_CONTAINER_ID
+fi
+
+PGADMIN_PORT=8080
+PGADMIN_EMAIL="katadeployer@obevo-kata.com"
+PGADMIN_PASSWORD="katadeploypass"
+docker run --name $CONTAINER_NAME -p 8080:80 -e "PGADMIN_DEFAULT_EMAIL=$PGADMIN_EMAIL" -e "PGADMIN_DEFAULT_PASSWORD=$PGADMIN_PASSWORD" -d dpage/pgadmin4
+
+echo ""
+echo "pgadmin4 setup successful"
+echo ""
+echo "Please visit http://localhost:8080 w/ username = $PGADMIN_EMAIL and password as $PGADMIN_PASSWORD to access the page"
+
diff --git a/obevo-db-impls/obevo-db-postgresql/getIpForDbviewer.sh b/obevo-db-impls/obevo-db-postgresql/getIpForDbviewer.sh
new file mode 100755
index 00000000..d7d3cc98
--- /dev/null
+++ b/obevo-db-impls/obevo-db-postgresql/getIpForDbviewer.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+#
+# Copyright 2017 Goldman Sachs.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+CONTAINER_NAME=obevo-postgresql-instance
+
+RUNNING_CONTAINER_ID=$(docker ps -aqf "name=$CONTAINER_NAME")
+
+
+if [[ ! -z "$RUNNING_CONTAINER_ID" ]]
+then
+ docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $RUNNING_CONTAINER_ID
+else
+ echo "Container is not running"
+ exit 1
+fi
diff --git a/obevo-db-impls/obevo-db-postgresql/src/main/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlLock.kt b/obevo-db-impls/obevo-db-postgresql/src/main/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlLock.kt
new file mode 100644
index 00000000..b8b1a35d
--- /dev/null
+++ b/obevo-db-impls/obevo-db-postgresql/src/main/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlLock.kt
@@ -0,0 +1,56 @@
+/**
+ * Copyright 2017 Goldman Sachs.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.gs.obevo.db.impl.platforms.postgresql
+
+import com.gs.obevo.api.platform.AuditLock
+import com.gs.obevo.db.impl.core.jdbc.JdbcHelper
+import org.apache.commons.dbutils.handlers.ScalarHandler
+import org.slf4j.LoggerFactory
+import java.sql.Connection
+import java.time.Duration
+
+class PostgreSqlLock internal constructor(
+ private val jdbc: JdbcHelper,
+ private val conn: Connection
+) : AuditLock {
+ private val defaultRetryDelay = Duration.ofSeconds(5L)
+ private val lockId = 5749832 // using random integer as the lock ID for the pg advisory lock to prevent collisions with others
+
+ override fun lock() {
+ var lockAcquired = false
+
+ while (!lockAcquired) {
+ val sql = "SELECT pg_try_advisory_lock($lockId)"
+ LOG.info("Attempting to acquire Postgres server lock via {}", sql)
+ lockAcquired = jdbc.query(conn, sql, ScalarHandler())
+
+ if (!lockAcquired) {
+ LOG.info("Lock not yet available; waiting for {} seconds", defaultRetryDelay.seconds)
+
+ Thread.sleep(defaultRetryDelay.toMillis())
+ }
+ }
+ }
+
+ override fun unlock() {
+ val lockReleased = jdbc.query(conn, "SELECT pg_advisory_unlock($lockId)", ScalarHandler())
+ LOG.info("Postgres lock has been {} released", if (lockReleased) "successfully" else "unsuccessfully")
+ }
+
+ companion object {
+ private val LOG = LoggerFactory.getLogger(PostgreSqlLock::class.java)
+ }
+}
diff --git a/obevo-db-impls/obevo-db-postgresql/src/main/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlSqlExecutor.java b/obevo-db-impls/obevo-db-postgresql/src/main/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlSqlExecutor.java
index ceb15e2f..16c395b2 100644
--- a/obevo-db-impls/obevo-db-postgresql/src/main/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlSqlExecutor.java
+++ b/obevo-db-impls/obevo-db-postgresql/src/main/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlSqlExecutor.java
@@ -1,44 +1,55 @@
-/**
- * Copyright 2017 Goldman Sachs.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package com.gs.obevo.db.impl.platforms.postgresql;
-
-import java.sql.Connection;
-
-import javax.sql.DataSource;
-
-import com.gs.obevo.api.appdata.PhysicalSchema;
-import com.gs.obevo.db.impl.core.jdbc.JdbcHelper;
-import com.gs.obevo.db.impl.platforms.AbstractSqlExecutor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PostgreSqlSqlExecutor extends AbstractSqlExecutor {
- private static final Logger LOG = LoggerFactory.getLogger(PostgreSqlSqlExecutor.class);
-
- public PostgreSqlSqlExecutor(DataSource ds) {
- super(ds);
- }
-
- @Override
- public void setDataSourceSchema(Connection conn, PhysicalSchema schema) {
- // NOTE - SET SCHEMA 'schemaName' (with quotes) is only effective for PostgreSQL versions >= 8.4
- // For 8.3, we must use SET search_path TO schemaName (without quotes)
- // This is compatible w/ future versions as well; hence, we keep it
- // (unfortunately, can't easily bring up version 8.3 on an app server environment for easy testing)
- JdbcHelper jdbc = this.getJdbcTemplate();
- jdbc.update(conn, "SET search_path TO " + schema.getPhysicalName());
- }
-}
+/**
+ * Copyright 2017 Goldman Sachs.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.gs.obevo.db.impl.platforms.postgresql;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+
+import javax.sql.DataSource;
+
+import com.gs.obevo.api.appdata.PhysicalSchema;
+import com.gs.obevo.api.platform.AuditLock;
+import com.gs.obevo.db.impl.core.jdbc.JdbcHelper;
+import com.gs.obevo.db.impl.platforms.AbstractSqlExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class PostgreSqlSqlExecutor extends AbstractSqlExecutor {
+ private static final Logger LOG = LoggerFactory.getLogger(PostgreSqlSqlExecutor.class);
+
+ public PostgreSqlSqlExecutor(DataSource ds) {
+ super(ds);
+ }
+
+ @Override
+ public void setDataSourceSchema(Connection conn, PhysicalSchema schema) {
+ // NOTE - SET SCHEMA 'schemaName' (with quotes) is only effective for PostgreSQL versions >= 8.4
+ // For 8.3, we must use SET search_path TO schemaName (without quotes)
+ // This is compatible w/ future versions as well; hence, we keep it
+ // (unfortunately, can't easily bring up version 8.3 on an app server environment for easy testing)
+ JdbcHelper jdbc = this.getJdbcTemplate();
+ jdbc.update(conn, "SET search_path TO " + schema.getPhysicalName());
+ }
+
+ @Override
+ public AuditLock lock(Connection conn) {
+ try {
+ return new PostgreSqlLock(this.getJdbcTemplate(), getDs().getConnection());
+ } catch (SQLException e) {
+ throw new RuntimeException(e);
+ }
+ }
+}
diff --git a/obevo-db-impls/obevo-db-postgresql/src/test/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlDeployerIT.java b/obevo-db-impls/obevo-db-postgresql/src/test/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlDeployerIT.java
index f9de46fd..94b2e82a 100644
--- a/obevo-db-impls/obevo-db-postgresql/src/test/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlDeployerIT.java
+++ b/obevo-db-impls/obevo-db-postgresql/src/test/java/com/gs/obevo/db/impl/platforms/postgresql/PostgreSqlDeployerIT.java
@@ -19,6 +19,10 @@
import java.sql.Timestamp;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.function.Function;
import javax.sql.DataSource;
@@ -26,6 +30,7 @@
import com.gs.obevo.db.impl.core.jdbc.JdbcHelper;
import org.apache.commons.dbutils.DbUtils;
import org.eclipse.collections.api.block.function.primitive.IntToObjectFunction;
+import org.eclipse.collections.impl.factory.Lists;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -49,14 +54,31 @@ public PostgreSqlDeployerIT(IntToObjectFunction getAppCont
@Test
public void testDeploy() throws Exception {
- getAppContext.valueOf(1)
+ DbDeployerAppContext dbDeployerAppContext = getAppContext.valueOf(1)
.setupEnvInfra()
.setupEnvInfra()
- .cleanEnvironment()
- .deploy();
+ .cleanEnvironment();
+
+ Function threadInvoker = threadNumber -> {
+ System.out.println("DEPLOY THREAD " + threadNumber);
+ getAppContext.valueOf(1).deploy();
+ return null;
+ };
+
+ // Invoke the jobs in parallel to ensure that the postgresql locking works; only one deploy should go through,
+ // whereas the others will become no-ops
+ ExecutorService executorService = Executors.newFixedThreadPool(3);
+ List> futures = executorService.invokeAll(Lists.mutable.of(
+ () -> threadInvoker.apply(1),
+ () -> threadInvoker.apply(2),
+ () -> threadInvoker.apply(3)
+ ));
+ for (Future