From f18c56dc202aaf3e5ba4f0a5348803093c136a67 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Fri, 8 Oct 2021 08:04:24 +0100 Subject: [PATCH 01/34] WIP --- .../config/AkkaPersistenceConfig.scala | 20 +++++++ .../journal/dao/BaseByteArrayJournalDao.scala | 2 +- .../postgres/journal/dao/FlatJournalDao.scala | 4 +- .../postgres/journal/dao/JournalQueries.scala | 9 ++- .../postgres/journal/dao/JournalTables.scala | 28 +++++++++- .../dao/NestedPartitionsJournalDao.scala | 4 +- .../journal/dao/PartitionedJournalDao.scala | 4 +- .../akka/persistence/postgres/package.scala | 6 ++ .../postgres/nested-partitions-schema.sql | 55 +++++++++++++++++++ .../schema/postgres/partitioned-schema.sql | 55 +++++++++++++++++++ .../schema/postgres/plain-schema.sql | 55 +++++++++++++++++++ .../journal/dao/JournalQueriesTest.scala | 7 ++- .../journal/dao/JournalTablesTest.scala | 24 ++++++++ .../CurrentEventsByTagWithGapsTest.scala | 4 +- .../postgres/util/DropCreate.scala | 6 +- ...1-create-journal-persistence-ids-table.sql | 28 ++++++++++ ...unction-update-journal-persistence-ids.sql | 36 ++++++++++++ ...trigger-update-journal-persistence-ids.sql | 19 +++++++ ...tion-check-persistence-id-max-sequence.sql | 19 +++++++ ...gger-check-persistence-id-max-sequence.sql | 19 +++++++ ...populate-journal-persistence-ids-table.sql | 1 + 21 files changed, 392 insertions(+), 13 deletions(-) create mode 100644 scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql create mode 100644 scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql create mode 100644 scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql create mode 100644 scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql create mode 100644 scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql create mode 100644 scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql diff --git a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala index 7273886b..99f343d4 100644 --- a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala +++ b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala @@ -49,6 +49,25 @@ class JournalTableConfiguration(config: Config) { override def toString: String = s"JournalTableConfiguration($tableName,$schemaName,$columnNames)" } +class JournalPersistenceIdsTableColumnNames(config: Config) { + private val cfg = config.asConfig("tables.journalPersistenceIds.columnNames") + val persistenceId: String = cfg.as[String]("persistenceId", "persistence_id") + val maxSequenceNumber: String = cfg.as[String]("maxSequenceNumber", "max_sequence_number") + val maxOrdering: String = cfg.as[String]("maxOrdering", "max_ordering") + val minOrdering: String = cfg.as[String]("minOrdering", "min_ordering") + + override def toString: String = + s"JournalPersistenceIdsTableColumnNames($persistenceId,$maxSequenceNumber,$maxOrdering,$minOrdering)" +} + +class JournalPersistenceIdsTableConfiguration(config: Config) { + private val cfg = config.asConfig("tables.journalPersistenceIds") + val tableName: String = cfg.as[String]("tableName", "journal_persistence_ids") + val schemaName: Option[String] = cfg.as[String]("schemaName").trim + val columnNames: JournalPersistenceIdsTableColumnNames = new JournalPersistenceIdsTableColumnNames(config) + override def toString: String = s"JournalPersistenceIdsTableConfiguration($tableName,$schemaName,$columnNames)" +} + class SnapshotTableColumnNames(config: Config) { private val cfg = config.asConfig("tables.snapshot.columnNames") val persistenceId: String = cfg.as[String]("persistenceId", "persistence_id") @@ -122,6 +141,7 @@ class TagsConfig(config: Config) { class JournalConfig(config: Config) { val partitionsConfig = new JournalPartitionsConfiguration(config) val journalTableConfiguration = new JournalTableConfiguration(config) + val journalPersistenceIdsTableConfiguration = new JournalPersistenceIdsTableConfiguration(config) val pluginConfig = new JournalPluginConfig(config) val daoConfig = new BaseByteArrayJournalDaoConfig(config) val tagsConfig = new TagsConfig(config) diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala index 41a61845..ed0ae9f6 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala @@ -139,7 +139,7 @@ trait BaseByteArrayJournalDao extends JournalDaoWithUpdates with BaseJournalDaoW override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = for { - maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result) + maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result.headOption) } yield maybeHighestSeqNo.getOrElse(0L) override def messages( diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala index 12feb4c1..71dfeaf0 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala @@ -13,7 +13,9 @@ class FlatJournalDao(val db: Database, val journalConfig: JournalConfig, seriali implicit val ec: ExecutionContext, val mat: Materializer) extends BaseByteArrayJournalDao { - val queries = new JournalQueries(FlatJournalTable(journalConfig.journalTableConfiguration)) + val queries = new JournalQueries( + FlatJournalTable(journalConfig.journalTableConfiguration), + JournalPersistenceIdsTable(journalConfig.journalPersistenceIdsTableConfiguration)) val tagDao = new SimpleTagDao(db, journalConfig.tagsTableConfiguration) val eventTagConverter = new CachedTagIdResolver(tagDao, journalConfig.tagsConfig) val serializer = new ByteArrayJournalSerializer(serialization, eventTagConverter) diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala index 345127d7..93e2cfcd 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala @@ -10,7 +10,9 @@ import io.circe.Json import slick.lifted.TableQuery import slick.sql.FixedSqlAction -class JournalQueries(journalTable: TableQuery[JournalTable]) { +class JournalQueries( + journalTable: TableQuery[JournalTable], + journalPersistenceIdsTable: TableQuery[JournalPersistenceIdsTable]) { import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ @@ -48,8 +50,9 @@ class JournalQueries(journalTable: TableQuery[JournalTable]) { .map(_.deleted) .update(true) - private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = - journalTable.filter(_.persistenceId === persistenceId).map(_.sequenceNumber).max + private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]) = { + journalPersistenceIdsTable.filter(_.persistenceId === persistenceId).map(_.maxSequenceNumber).take(1) + } private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = journalTable.filter(_.deleted === true).filter(_.persistenceId === persistenceId).map(_.sequenceNumber).max diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala index 38735468..8aa0dc06 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala @@ -6,7 +6,7 @@ package akka.persistence.postgres package journal.dao -import akka.persistence.postgres.config.JournalTableConfiguration +import akka.persistence.postgres.config.{ JournalPersistenceIdsTableConfiguration, JournalTableConfiguration } import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ import io.circe.Json @@ -90,3 +90,29 @@ object NestedPartitionsJournalTable { def apply(journalTableCfg: JournalTableConfiguration): TableQuery[JournalTable] = FlatJournalTable.apply(journalTableCfg) } + +class JournalPersistenceIdsTable(_tableTag: Tag, journalPersistenceIdsTableCfg: JournalPersistenceIdsTableConfiguration) + extends Table[JournalPersistenceIdsRow]( + _tableTag, + _schemaName = journalPersistenceIdsTableCfg.schemaName, + _tableName = journalPersistenceIdsTableCfg.tableName) { + override def * = ( + persistenceId, + maxSequenceNumber, + minOrdering, + maxOrdering) <> (JournalPersistenceIdsRow.tupled, JournalPersistenceIdsRow.unapply) + + val persistenceId: Rep[String] = + column[String](journalPersistenceIdsTableCfg.columnNames.persistenceId, O.Length(255, varying = true)) + val maxSequenceNumber: Rep[Long] = column[Long](journalPersistenceIdsTableCfg.columnNames.maxSequenceNumber) + val minOrdering: Rep[Long] = column[Long](journalPersistenceIdsTableCfg.columnNames.minOrdering) + val maxOrdering: Rep[Long] = column[Long](journalPersistenceIdsTableCfg.columnNames.maxOrdering) + + val pk = primaryKey(s"${tableName}_pk", persistenceId) +} + +object JournalPersistenceIdsTable { + def apply( + journalPersistenceIdsTableCfg: JournalPersistenceIdsTableConfiguration): TableQuery[JournalPersistenceIdsTable] = + TableQuery(tag => new JournalPersistenceIdsTable(tag, journalPersistenceIdsTableCfg)) +} diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala index 30c34d66..41065e78 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala @@ -17,7 +17,9 @@ class NestedPartitionsJournalDao(db: Database, journalConfig: JournalConfig, ser implicit ec: ExecutionContext, mat: Materializer) extends FlatJournalDao(db, journalConfig, serialization) { - override val queries = new JournalQueries(NestedPartitionsJournalTable(journalConfig.journalTableConfiguration)) + override val queries = new JournalQueries( + NestedPartitionsJournalTable(journalConfig.journalTableConfiguration), + JournalPersistenceIdsTable(journalConfig.journalPersistenceIdsTableConfiguration)) private val journalTableCfg = journalConfig.journalTableConfiguration private val partitionSize = journalConfig.partitionsConfig.size private val partitionPrefix = journalConfig.partitionsConfig.prefix diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala index 68792bf6..07cd1a4a 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala @@ -16,7 +16,9 @@ class PartitionedJournalDao(db: Database, journalConfig: JournalConfig, serializ implicit ec: ExecutionContext, mat: Materializer) extends FlatJournalDao(db, journalConfig, serialization) { - override val queries = new JournalQueries(PartitionedJournalTable(journalConfig.journalTableConfiguration)) + override val queries = new JournalQueries( + PartitionedJournalTable(journalConfig.journalTableConfiguration), + JournalPersistenceIdsTable(journalConfig.journalPersistenceIdsTableConfiguration)) private val journalTableCfg = journalConfig.journalTableConfiguration private val partitionSize = journalConfig.partitionsConfig.size private val partitionPrefix = journalConfig.partitionsConfig.prefix diff --git a/core/src/main/scala/akka/persistence/postgres/package.scala b/core/src/main/scala/akka/persistence/postgres/package.scala index 979a44e2..8a3a8df7 100644 --- a/core/src/main/scala/akka/persistence/postgres/package.scala +++ b/core/src/main/scala/akka/persistence/postgres/package.scala @@ -16,4 +16,10 @@ package object postgres { message: Array[Byte], tags: List[Int], metadata: Json) + + final case class JournalPersistenceIdsRow( + persistenceId: String, + maxSequenceNumber: Long, + minOrdering: Long, + maxOrdering: Long) } diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index fc31fdcd..8d3fe87f 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -64,3 +64,58 @@ CREATE TABLE IF NOT EXISTS public.snapshot metadata jsonb NOT NULL, PRIMARY KEY (persistence_id, sequence_number) ); + +DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids on public.journal; +DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number on public.journal_persistence_ids; +DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); +DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); +DROP TABLE IF EXISTS public.journal_persistence_ids; + +CREATE TABLE public.journal_persistence_ids( + persistence_id TEXT NOT NULL, + max_sequence_number BIGINT NOT NULL, + min_ordering BIGINT NOT NULL, + max_ordering BIGINT NOT NULL, + PRIMARY KEY (persistence_id) +); + + +CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS +$$ +DECLARE +BEGIN + INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, min_ordering, max_ordering) + VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) + ON CONFLICT (persistence_id) DO UPDATE + SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); + + RETURN NEW; +END; +$$ +LANGUAGE plpgsql; + + +CREATE TRIGGER trig_update_journal_persistence_ids + AFTER INSERT ON public.journal + FOR EACH ROW + EXECUTE PROCEDURE public.update_journal_persistence_ids(); + + +CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS +$$ +DECLARE +BEGIN + IF NEW.max_sequence_number <= OLD.max_sequence_number THEN + RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; + END IF; + + RETURN NEW; +END; +$$ +LANGUAGE plpgsql; + + +CREATE TRIGGER trig_check_persistence_id_max_sequence_number + BEFORE UPDATE ON public.journal_persistence_ids + FOR EACH ROW + EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index dc9f20cf..d77d2d5c 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -65,3 +65,58 @@ CREATE TABLE IF NOT EXISTS public.snapshot metadata jsonb NOT NULL, PRIMARY KEY (persistence_id, sequence_number) ); + +DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids on public.journal; +DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number on public.journal_persistence_ids; +DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); +DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); +DROP TABLE IF EXISTS public.journal_persistence_ids; + +CREATE TABLE public.journal_persistence_ids( + persistence_id TEXT NOT NULL, + max_sequence_number BIGINT NOT NULL, + min_ordering BIGINT NOT NULL, + max_ordering BIGINT NOT NULL, + PRIMARY KEY (persistence_id) +); + + +CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS +$$ +DECLARE +BEGIN + INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, min_ordering, max_ordering) + VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) + ON CONFLICT (persistence_id) DO UPDATE + SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); + + RETURN NEW; +END; +$$ +LANGUAGE plpgsql; + + +CREATE TRIGGER trig_update_journal_persistence_ids + AFTER INSERT ON public.journal + FOR EACH ROW + EXECUTE PROCEDURE public.update_journal_persistence_ids(); + + +CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS +$$ +DECLARE +BEGIN + IF NEW.max_sequence_number <= OLD.max_sequence_number THEN + RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; + END IF; + + RETURN NEW; +END; +$$ +LANGUAGE plpgsql; + + +CREATE TRIGGER trig_check_persistence_id_max_sequence_number + BEFORE UPDATE ON public.journal_persistence_ids + FOR EACH ROW + EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index d38aec6f..afe9bbf2 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -38,3 +38,58 @@ CREATE TABLE IF NOT EXISTS public.snapshot metadata jsonb NOT NULL, PRIMARY KEY (persistence_id, sequence_number) ); + +DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids on public.journal; +DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number on public.journal_persistence_ids; +DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); +DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); +DROP TABLE IF EXISTS public.journal_persistence_ids; + +CREATE TABLE public.journal_persistence_ids( + persistence_id TEXT NOT NULL, + max_sequence_number BIGINT NOT NULL, + min_ordering BIGINT NOT NULL, + max_ordering BIGINT NOT NULL, + PRIMARY KEY (persistence_id) +); + + +CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS +$$ +DECLARE +BEGIN + INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, min_ordering, max_ordering) + VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) + ON CONFLICT (persistence_id) DO UPDATE + SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); + + RETURN NEW; +END; +$$ +LANGUAGE plpgsql; + + +CREATE TRIGGER trig_update_journal_persistence_ids + AFTER INSERT ON public.journal + FOR EACH ROW + EXECUTE PROCEDURE public.update_journal_persistence_ids(); + + +CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS +$$ +DECLARE +BEGIN + IF NEW.max_sequence_number <= OLD.max_sequence_number THEN + RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; + END IF; + + RETURN NEW; +END; +$$ +LANGUAGE plpgsql; + + +CREATE TRIGGER trig_check_persistence_id_max_sequence_number + BEFORE UPDATE ON public.journal_persistence_ids + FOR EACH ROW + EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala index 53964b00..0a16f648 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala @@ -17,7 +17,7 @@ class JournalQueriesTest extends BaseQueryTest { it should "create SQL query for highestSequenceNrForPersistenceId" in withJournalQueries { queries => queries.highestSequenceNrForPersistenceId( - "aaa") shouldBeSQL """select max("sequence_number") from "journal" where "persistence_id" = ?""" + "aaa") shouldBeSQL """select "max_sequence_number" from "journal_persistence_ids" where "persistence_id" = ? limit 1""" } it should "create SQL query for selectByPersistenceIdAndMaxSequenceNumber" in withJournalQueries { queries => @@ -64,7 +64,10 @@ class JournalQueriesTest extends BaseQueryTest { private def withJournalQueries(f: JournalQueries => Unit): Unit = { withActorSystem { implicit system => - f(new JournalQueries(FlatJournalTable.apply(journalConfig.journalTableConfiguration))) + f( + new JournalQueries( + FlatJournalTable.apply(journalConfig.journalTableConfiguration), + JournalPersistenceIdsTable.apply(journalConfig.journalPersistenceIdsTableConfiguration))) } } } diff --git a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalTablesTest.scala b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalTablesTest.scala index 16652847..f253cb84 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalTablesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalTablesTest.scala @@ -18,6 +18,7 @@ class JournalTablesTest extends TablesTestSpec { } { journalName should "be configured with a schema name" in { journalTable.baseTableRow.schemaName shouldBe journalTableConfiguration.schemaName + } it should "be configured with a table name" in { @@ -34,4 +35,27 @@ class JournalTablesTest extends TablesTestSpec { journalTable.baseTableRow.tags.toString shouldBe colName(journalTableConfiguration.columnNames.tags) } } + + val journalPersistenceIdsTableConfiguration = journalConfig.journalPersistenceIdsTableConfiguration + val journalPersistenceIdsTable = JournalPersistenceIdsTable(journalPersistenceIdsTableConfiguration) + + "JournalPersistenceIdsTable" should "be configured with a schema name" in { + journalPersistenceIdsTable.baseTableRow.schemaName shouldBe journalPersistenceIdsTableConfiguration.schemaName + } + + it should "be configured with a table name" in { + journalPersistenceIdsTable.baseTableRow.tableName shouldBe journalPersistenceIdsTableConfiguration.tableName + } + + it should "be configured with column names" in { + val colName = toColumnName(journalPersistenceIdsTableConfiguration.tableName)(_) + journalPersistenceIdsTable.baseTableRow.persistenceId.toString shouldBe colName( + journalPersistenceIdsTableConfiguration.columnNames.persistenceId) + journalPersistenceIdsTable.baseTableRow.maxSequenceNumber.toString shouldBe colName( + journalPersistenceIdsTableConfiguration.columnNames.maxSequenceNumber) + journalPersistenceIdsTable.baseTableRow.maxOrdering.toString shouldBe colName( + journalPersistenceIdsTableConfiguration.columnNames.maxOrdering) + journalPersistenceIdsTable.baseTableRow.minOrdering.toString shouldBe colName( + journalPersistenceIdsTableConfiguration.columnNames.minOrdering) + } } diff --git a/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala b/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala index 539c36b5..9ceedcb3 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala @@ -46,7 +46,9 @@ class CurrentEventsByTagWithGapsTest }.futureValue val journalTable = schemaType.table(journalConfig.journalTableConfiguration) - val journalQueries = new JournalQueries(journalTable) + val journalPersistenceIdsTable = + schemaType.persistenceIdsTable(journalConfig.journalPersistenceIdsTableConfiguration) + val journalQueries = new JournalQueries(journalTable, journalPersistenceIdsTable) val journalOps = new JavaDslPostgresReadJournalOperations(system) val tag = "testTag" diff --git a/core/src/test/scala/akka/persistence/postgres/util/DropCreate.scala b/core/src/test/scala/akka/persistence/postgres/util/DropCreate.scala index 928e2f70..890c7d81 100644 --- a/core/src/test/scala/akka/persistence/postgres/util/DropCreate.scala +++ b/core/src/test/scala/akka/persistence/postgres/util/DropCreate.scala @@ -6,10 +6,10 @@ package akka.persistence.postgres.util import java.sql.Statement - -import akka.persistence.postgres.config.JournalTableConfiguration +import akka.persistence.postgres.config.{ JournalPersistenceIdsTableConfiguration, JournalTableConfiguration } import akka.persistence.postgres.journal.dao.{ FlatJournalTable, + JournalPersistenceIdsTable, JournalTable, NestedPartitionsJournalTable, PartitionedJournalTable @@ -25,6 +25,8 @@ object Schema { lazy val schema: String = s"schema/postgres/$resourceNamePrefix-schema.sql" lazy val configName: String = s"${resourceNamePrefix}-application.conf" def table(journalTableCfg: JournalTableConfiguration): TableQuery[JournalTable] + def persistenceIdsTable(journalPersistenceIdsTableCfg: JournalPersistenceIdsTableConfiguration) + : TableQuery[JournalPersistenceIdsTable] = JournalPersistenceIdsTable.apply(journalPersistenceIdsTableCfg) } case object Plain extends SchemaType { diff --git a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql new file mode 100644 index 00000000..e25d9dd3 --- /dev/null +++ b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql @@ -0,0 +1,28 @@ +DO $$ +DECLARE + -- replace with appropriate values + schema CONSTANT TEXT := 'public'; + jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; + jpi_persistence_id_column CONSTANT TEXT := 'persistence_id'; + jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; + jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; + jpi_min_ordering_column CONSTANT TEXT := 'min_ordering'; + + -- variables + jpi_table TEXT; + sql TEXT; +BEGIN + jpi_table := schema || '.' || jpi_table_name; + + sql := 'CREATE TABLE IF NOT EXISTS ' || jpi_table || + '(' || + jpi_persistence_id_column || ' TEXT NOT NULL, ' || + jpi_max_sequence_number_column || ' BIGINT NOT NULL, ' || + jpi_max_ordering_column || ' BIGINT NOT NULL, ' || + jpi_min_ordering_column || ' BIGINT NOT NULL, ' || + 'PRIMARY KEY (' || jpi_persistence_id_column || ')' || + ')'; + + EXECUTE sql; +END ; +$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql new file mode 100644 index 00000000..5b0f6406 --- /dev/null +++ b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql @@ -0,0 +1,36 @@ +CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS +$$ +DECLARE + -- replace with appropriate values + schema CONSTANT TEXT := 'public'; + j_table_name CONSTANT TEXT := 'journal'; + j_persistence_id_column CONSTANT TEXT := 'persistence_id'; + j_sequence_number_column CONSTANT TEXT := 'sequence_number'; + j_ordering_column CONSTANT TEXT := 'ordering'; + jpi_persistence_ids_table_name CONSTANT TEXT := 'journal_persistence_ids'; + jpi_persistence_id_column CONSTANT TEXT := 'persistence_id'; + jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; + jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; + jpi_min_ordering_number_column CONSTANT TEXT := 'min_ordering'; + + -- variables + j_table TEXT; + jpi_table TEXT; + sql TEXT; +BEGIN + j_table := schema || '.' || j_table_name; + jpi_table := schema || '.' || jpi_table_name; + + sql := 'INSERT INTO ' || jpi_table || '(' || jpi_persistence_id_column || ',' || jpi_max_sequence_number_column || ',' || jpi_max_ordering_column || ',' || jpi_min_ordering_number_column || ')' || + 'VALUES (NEW.' || j_persistence_id_column || ', NEW.' || j_sequence_number_column || ', NEW.' || j_ordering_column || ', NEW.' || j_ordering_column || ')' || + 'ON CONFLICT (' || jpi_persistence_id_column || ') DO UPDATE' || + 'SET ' || + jpi_max_sequence_number_column || ' = NEW.' || j_sequence_number_column || ',' || + jpi_max_ordering_column || ' = NEW' || j_ordering_column || ',' || + jpi_min_ordering_column || ' = LEAST(' || jpi_table || '.' || jpi_min_ordering_column || ', NEW.' || j_ordering_column || ')'; + + EXECUTE sql; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql b/scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql new file mode 100644 index 00000000..25ee9636 --- /dev/null +++ b/scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql @@ -0,0 +1,19 @@ +DO $$ +DECLARE + -- replace with appropriate values + schema CONSTANT TEXT := 'public'; + j_table_name CONSTANT TEXT := 'journal'; + + -- variables + j_table TEXT; + sql TEXT; +BEGIN + j_table := schema || '.' || j_table_name; + + sql := 'CREATE TRIGGER trig_update_journal_persistence_id + AFTER INSERT ON ' || j_table || ' FOR EACH ROW + EXECUTE PROCEDURE ' || schema || '.update_journal_persistence_ids()'; + + EXECUTE sql; +END ; +$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql new file mode 100644 index 00000000..ad17d14c --- /dev/null +++ b/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql @@ -0,0 +1,19 @@ +-- replace schema value if required +CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS +$$ +DECLARE + -- replace with appropriate values + jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; + + -- variables + sql TEXT; +BEGIN + sql := 'IF NEW.' || jpi_max_sequence_number_column || ' <= OLD.' || jpi_max_sequence_number_column || ' THEN + RAISE EXCEPTION ''New max_sequence_number not higher than previous value''; + END IF;'; + + EXECUTE sql; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql new file mode 100644 index 00000000..2c0ed69a --- /dev/null +++ b/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql @@ -0,0 +1,19 @@ + +DO $$ +DECLARE + -- replace with appropriate values + schema CONSTANT TEXT := 'public'; + jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; + + -- variables + jpi_table TEXT; + sql TEXT; +BEGIN + jpi_table := schema || '.' || jpi_table_name; + sql := 'CREATE TRIGGER trig_check_persistence_id_max_sequence_number + BEFORE UPDATE ON ' || jpi_table || ' FOR EACH ROW + EXECUTE PROCEDURE ' || schema || '.check_persistence_id_max_sequence_number()'; + + EXECUTE sql; +END ; +$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql new file mode 100644 index 00000000..2126a4aa --- /dev/null +++ b/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql @@ -0,0 +1 @@ +-- TODO From 6a9d567a42449d1ca2cd5a4e074a1f17dce3deae Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Fri, 8 Oct 2021 15:49:47 +0100 Subject: [PATCH 02/34] Add migration to populate journal_persistence_ids table --- ...1-create-journal-persistence-ids-table.sql | 22 ++++++------- ...unction-update-journal-persistence-ids.sql | 6 ++-- ...trigger-update-journal-persistence-ids.sql | 12 +++---- ...tion-check-persistence-id-max-sequence.sql | 2 +- ...gger-check-persistence-id-max-sequence.sql | 13 ++++---- ...populate-journal-persistence-ids-table.sql | 32 ++++++++++++++++++- 6 files changed, 58 insertions(+), 29 deletions(-) diff --git a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql index e25d9dd3..0f8fe4aa 100644 --- a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql +++ b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql @@ -12,17 +12,17 @@ DECLARE jpi_table TEXT; sql TEXT; BEGIN - jpi_table := schema || '.' || jpi_table_name; + jpi_table := schema || '.' || jpi_table_name; - sql := 'CREATE TABLE IF NOT EXISTS ' || jpi_table || - '(' || - jpi_persistence_id_column || ' TEXT NOT NULL, ' || - jpi_max_sequence_number_column || ' BIGINT NOT NULL, ' || - jpi_max_ordering_column || ' BIGINT NOT NULL, ' || - jpi_min_ordering_column || ' BIGINT NOT NULL, ' || - 'PRIMARY KEY (' || jpi_persistence_id_column || ')' || - ')'; + sql := 'CREATE TABLE IF NOT EXISTS ' || jpi_table || + '(' || + jpi_persistence_id_column || ' TEXT NOT NULL, ' || + jpi_max_sequence_number_column || ' BIGINT NOT NULL, ' || + jpi_max_ordering_column || ' BIGINT NOT NULL, ' || + jpi_min_ordering_column || ' BIGINT NOT NULL, ' || + 'PRIMARY KEY (' || jpi_persistence_id_column || ')' || + ')'; - EXECUTE sql; -END ; + EXECUTE sql; +END; $$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql index 5b0f6406..8f621095 100644 --- a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql +++ b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql @@ -25,9 +25,9 @@ BEGIN 'VALUES (NEW.' || j_persistence_id_column || ', NEW.' || j_sequence_number_column || ', NEW.' || j_ordering_column || ', NEW.' || j_ordering_column || ')' || 'ON CONFLICT (' || jpi_persistence_id_column || ') DO UPDATE' || 'SET ' || - jpi_max_sequence_number_column || ' = NEW.' || j_sequence_number_column || ',' || - jpi_max_ordering_column || ' = NEW' || j_ordering_column || ',' || - jpi_min_ordering_column || ' = LEAST(' || jpi_table || '.' || jpi_min_ordering_column || ', NEW.' || j_ordering_column || ')'; + jpi_max_sequence_number_column || ' = NEW.' || j_sequence_number_column || ',' || + jpi_max_ordering_column || ' = NEW' || j_ordering_column || ',' || + jpi_min_ordering_column || ' = LEAST(' || jpi_table || '.' || jpi_min_ordering_column || ', NEW.' || j_ordering_column || ')'; EXECUTE sql; diff --git a/scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql b/scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql index 25ee9636..719115c4 100644 --- a/scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql +++ b/scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql @@ -8,12 +8,12 @@ DECLARE j_table TEXT; sql TEXT; BEGIN - j_table := schema || '.' || j_table_name; + j_table := schema || '.' || j_table_name; - sql := 'CREATE TRIGGER trig_update_journal_persistence_id - AFTER INSERT ON ' || j_table || ' FOR EACH ROW - EXECUTE PROCEDURE ' || schema || '.update_journal_persistence_ids()'; + sql := 'CREATE TRIGGER trig_update_journal_persistence_id + AFTER INSERT ON ' || j_table || ' FOR EACH ROW + EXECUTE PROCEDURE ' || schema || '.update_journal_persistence_ids()'; - EXECUTE sql; -END ; + EXECUTE sql; +END; $$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql index ad17d14c..0305cbd9 100644 --- a/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql +++ b/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql @@ -13,7 +13,7 @@ BEGIN END IF;'; EXECUTE sql; - + RETURN NEW; END; $$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql index 2c0ed69a..a7704c68 100644 --- a/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql +++ b/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql @@ -1,4 +1,3 @@ - DO $$ DECLARE -- replace with appropriate values @@ -9,11 +8,11 @@ DECLARE jpi_table TEXT; sql TEXT; BEGIN - jpi_table := schema || '.' || jpi_table_name; - sql := 'CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON ' || jpi_table || ' FOR EACH ROW - EXECUTE PROCEDURE ' || schema || '.check_persistence_id_max_sequence_number()'; + jpi_table := schema || '.' || jpi_table_name; + sql := 'CREATE TRIGGER trig_check_persistence_id_max_sequence_number + BEFORE UPDATE ON ' || jpi_table || ' FOR EACH ROW + EXECUTE PROCEDURE ' || schema || '.check_persistence_id_max_sequence_number()'; - EXECUTE sql; -END ; + EXECUTE sql; +END; $$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql index 2126a4aa..222c1e59 100644 --- a/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql +++ b/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql @@ -1 +1,31 @@ --- TODO +DO $$ +DECLARE + -- replace with appropriate values + schema CONSTANT TEXT := 'public'; + j_table_name CONSTANT TEXT := 'journal'; + j_persistence_id_column CONSTANT TEXT := 'persistence_id'; + j_sequence_number_column CONSTANT TEXT := 'sequence_number'; + j_ordering_column CONSTANT TEXT := 'ordering'; + jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; + jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; + jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; + jpi_min_ordering_column CONSTANT TEXT := 'min_ordering'; + + -- variables + j_table TEXT; + jpi_table TEXT; + sql TEXT; +BEGIN + j_table := schema || '.' || j_table_name; + jpi_table := schema || '.' || jpi_table_name; + sql := 'INSERT INTO ' || jpi_table || + ' SELECT ' || + j_persistence_id_column || ', ' || + 'max(' || j_sequence_number_column || '), ' || + 'max(' || j_ordering_column || '), ' || + 'min(' || j_ordering_column || ')' || + ' FROM ' || j_table || ' GROUP BY ' || j_persistence_id_column; + + EXECUTE sql; +END; +$$ LANGUAGE plpgsql; From 8394331e1038f1836ef5f9f5aad37f30cba2450f Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Mon, 11 Oct 2021 15:39:58 +0100 Subject: [PATCH 03/34] Only update journal_persistence_ids when necessary --- .../journal/dao/PartitionedJournalDao.scala | 3 +- .../postgres/nested-partitions-schema.sql | 31 +++---------------- .../schema/postgres/partitioned-schema.sql | 31 +++---------------- .../schema/postgres/plain-schema.sql | 31 +++---------------- ...unction-update-journal-persistence-ids.sql | 25 ++++++++------- ...tion-check-persistence-id-max-sequence.sql | 19 ------------ ...opulate-journal-persistence-ids-table.sql} | 0 ...gger-check-persistence-id-max-sequence.sql | 18 ----------- 8 files changed, 30 insertions(+), 128 deletions(-) delete mode 100644 scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql rename scripts/migration-0.6.0/{6-populate-journal-persistence-ids-table.sql => 4-populate-journal-persistence-ids-table.sql} (100%) delete mode 100644 scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala index 07cd1a4a..297005ae 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala @@ -1,7 +1,5 @@ package akka.persistence.postgres.journal.dao -import java.util.concurrent.atomic.AtomicReference - import akka.persistence.postgres.JournalRow import akka.persistence.postgres.config.JournalConfig import akka.persistence.postgres.db.DbErrors.{ withHandledIndexErrors, withHandledPartitionErrors } @@ -9,6 +7,7 @@ import akka.serialization.Serialization import akka.stream.Materializer import slick.jdbc.JdbcBackend.Database +import java.util.concurrent.atomic.AtomicReference import scala.collection.immutable.{ Nil, Seq } import scala.concurrent.{ ExecutionContext, Future } diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index 8d3fe87f..af2f1fbb 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -66,8 +66,6 @@ CREATE TABLE IF NOT EXISTS public.snapshot ); DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids on public.journal; -DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number on public.journal_persistence_ids; -DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; @@ -79,43 +77,24 @@ CREATE TABLE public.journal_persistence_ids( PRIMARY KEY (persistence_id) ); - CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS $$ DECLARE BEGIN - INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, min_ordering, max_ordering) + INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE - SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); + SET + max_sequence_number = GREATEST(public.journal_persistence_ids.max_sequence_number, NEW.sequence_number), + max_ordering = GREATEST(public.journal_persistence_ids.max_ordering, NEW.ordering), + min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); RETURN NEW; END; $$ LANGUAGE plpgsql; - CREATE TRIGGER trig_update_journal_persistence_ids AFTER INSERT ON public.journal FOR EACH ROW EXECUTE PROCEDURE public.update_journal_persistence_ids(); - - -CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS -$$ -DECLARE -BEGIN - IF NEW.max_sequence_number <= OLD.max_sequence_number THEN - RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; - END IF; - - RETURN NEW; -END; -$$ -LANGUAGE plpgsql; - - -CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON public.journal_persistence_ids - FOR EACH ROW - EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index d77d2d5c..48021542 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -67,8 +67,6 @@ CREATE TABLE IF NOT EXISTS public.snapshot ); DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids on public.journal; -DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number on public.journal_persistence_ids; -DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; @@ -80,43 +78,24 @@ CREATE TABLE public.journal_persistence_ids( PRIMARY KEY (persistence_id) ); - CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS $$ DECLARE BEGIN - INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, min_ordering, max_ordering) + INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE - SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); + SET + max_sequence_number = GREATEST(public.journal_persistence_ids.max_sequence_number, NEW.sequence_number), + max_ordering = GREATEST(public.journal_persistence_ids.max_ordering, NEW.ordering), + min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); RETURN NEW; END; $$ LANGUAGE plpgsql; - CREATE TRIGGER trig_update_journal_persistence_ids AFTER INSERT ON public.journal FOR EACH ROW EXECUTE PROCEDURE public.update_journal_persistence_ids(); - - -CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS -$$ -DECLARE -BEGIN - IF NEW.max_sequence_number <= OLD.max_sequence_number THEN - RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; - END IF; - - RETURN NEW; -END; -$$ -LANGUAGE plpgsql; - - -CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON public.journal_persistence_ids - FOR EACH ROW - EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index afe9bbf2..86ba3175 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -40,8 +40,6 @@ CREATE TABLE IF NOT EXISTS public.snapshot ); DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids on public.journal; -DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number on public.journal_persistence_ids; -DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; @@ -53,43 +51,24 @@ CREATE TABLE public.journal_persistence_ids( PRIMARY KEY (persistence_id) ); - CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS $$ DECLARE BEGIN - INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, min_ordering, max_ordering) + INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE - SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); + SET + max_sequence_number = GREATEST(public.journal_persistence_ids.max_sequence_number, NEW.sequence_number), + max_ordering = GREATEST(public.journal_persistence_ids.max_ordering, NEW.ordering), + min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); RETURN NEW; END; $$ LANGUAGE plpgsql; - CREATE TRIGGER trig_update_journal_persistence_ids AFTER INSERT ON public.journal FOR EACH ROW EXECUTE PROCEDURE public.update_journal_persistence_ids(); - - -CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS -$$ -DECLARE -BEGIN - IF NEW.max_sequence_number <= OLD.max_sequence_number THEN - RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; - END IF; - - RETURN NEW; -END; -$$ -LANGUAGE plpgsql; - - -CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON public.journal_persistence_ids - FOR EACH ROW - EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql index 8f621095..ec65a32a 100644 --- a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql +++ b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql @@ -1,3 +1,4 @@ +-- replace schema value if required CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS $$ DECLARE @@ -7,30 +8,32 @@ DECLARE j_persistence_id_column CONSTANT TEXT := 'persistence_id'; j_sequence_number_column CONSTANT TEXT := 'sequence_number'; j_ordering_column CONSTANT TEXT := 'ordering'; - jpi_persistence_ids_table_name CONSTANT TEXT := 'journal_persistence_ids'; + jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; jpi_persistence_id_column CONSTANT TEXT := 'persistence_id'; jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; - jpi_min_ordering_number_column CONSTANT TEXT := 'min_ordering'; + jpi_min_ordering_column CONSTANT TEXT := 'min_ordering'; -- variables j_table TEXT; jpi_table TEXT; + cols TEXT; + vals TEXT; + upds TEXT; sql TEXT; BEGIN j_table := schema || '.' || j_table_name; jpi_table := schema || '.' || jpi_table_name; + cols := jpi_persistence_id_column || ', ' || jpi_max_sequence_number_column || ', ' || jpi_max_ordering_column || ', ' || jpi_min_ordering_column; + vals := '($1).' || j_persistence_id_column || ', ($1).' || j_sequence_number_column || ', ($1).' || j_ordering_column || ',($1).' || j_ordering_column; + upds := jpi_max_sequence_number_column || ' = GREATEST(' || jpi_table || '.' || jpi_max_sequence_number_column || ', ($1).' || j_sequence_number_column || '), ' || + jpi_max_ordering_column || ' = GREATEST(' || jpi_table || '.' || jpi_max_ordering_column || ', ($1).' || j_ordering_column || '), ' || + jpi_min_ordering_column || ' = LEAST(' || jpi_table || '.' || jpi_min_ordering_column || ', ($1).' || j_ordering_column || ')'; - sql := 'INSERT INTO ' || jpi_table || '(' || jpi_persistence_id_column || ',' || jpi_max_sequence_number_column || ',' || jpi_max_ordering_column || ',' || jpi_min_ordering_number_column || ')' || - 'VALUES (NEW.' || j_persistence_id_column || ', NEW.' || j_sequence_number_column || ', NEW.' || j_ordering_column || ', NEW.' || j_ordering_column || ')' || - 'ON CONFLICT (' || jpi_persistence_id_column || ') DO UPDATE' || - 'SET ' || - jpi_max_sequence_number_column || ' = NEW.' || j_sequence_number_column || ',' || - jpi_max_ordering_column || ' = NEW' || j_ordering_column || ',' || - jpi_min_ordering_column || ' = LEAST(' || jpi_table || '.' || jpi_min_ordering_column || ', NEW.' || j_ordering_column || ')'; + sql := 'INSERT INTO ' || jpi_table || ' (' || cols || ') VALUES (' || vals || ') ' || + 'ON CONFLICT (' || jpi_persistence_id_column || ') DO UPDATE SET ' || upds; - EXECUTE sql; - + EXECUTE sql USING NEW; RETURN NEW; END; $$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql deleted file mode 100644 index 0305cbd9..00000000 --- a/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql +++ /dev/null @@ -1,19 +0,0 @@ --- replace schema value if required -CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS -$$ -DECLARE - -- replace with appropriate values - jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; - - -- variables - sql TEXT; -BEGIN - sql := 'IF NEW.' || jpi_max_sequence_number_column || ' <= OLD.' || jpi_max_sequence_number_column || ' THEN - RAISE EXCEPTION ''New max_sequence_number not higher than previous value''; - END IF;'; - - EXECUTE sql; - - RETURN NEW; -END; -$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/4-populate-journal-persistence-ids-table.sql similarity index 100% rename from scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql rename to scripts/migration-0.6.0/4-populate-journal-persistence-ids-table.sql diff --git a/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql deleted file mode 100644 index a7704c68..00000000 --- a/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql +++ /dev/null @@ -1,18 +0,0 @@ -DO $$ -DECLARE - -- replace with appropriate values - schema CONSTANT TEXT := 'public'; - jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; - - -- variables - jpi_table TEXT; - sql TEXT; -BEGIN - jpi_table := schema || '.' || jpi_table_name; - sql := 'CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON ' || jpi_table || ' FOR EACH ROW - EXECUTE PROCEDURE ' || schema || '.check_persistence_id_max_sequence_number()'; - - EXECUTE sql; -END; -$$ LANGUAGE plpgsql; From 181b520bdf42ec91b8a64eb49c8aeb91c5d78aa0 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Mon, 11 Oct 2021 17:36:28 +0100 Subject: [PATCH 04/34] Increase patience config timeout --- .../akka/persistence/postgres/SharedActorSystemTestSpec.scala | 4 ++-- .../persistence/postgres/SingleActorSystemPerTestSpec.scala | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/test/scala/akka/persistence/postgres/SharedActorSystemTestSpec.scala b/core/src/test/scala/akka/persistence/postgres/SharedActorSystemTestSpec.scala index b4de7585..c6cf78d7 100644 --- a/core/src/test/scala/akka/persistence/postgres/SharedActorSystemTestSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/SharedActorSystemTestSpec.scala @@ -29,8 +29,8 @@ abstract class SharedActorSystemTestSpec(val config: Config) extends SimpleSpec implicit lazy val mat: Materializer = SystemMaterializer(system).materializer implicit lazy val ec: ExecutionContext = system.dispatcher - implicit val pc: PatienceConfig = PatienceConfig(timeout = 1.minute) - implicit val timeout: Timeout = Timeout(1.minute) + implicit val pc: PatienceConfig = PatienceConfig(timeout = 2.minutes) + implicit val timeout = Timeout(1.minute) lazy val serialization = SerializationExtension(system) diff --git a/core/src/test/scala/akka/persistence/postgres/SingleActorSystemPerTestSpec.scala b/core/src/test/scala/akka/persistence/postgres/SingleActorSystemPerTestSpec.scala index d3287e68..129aef75 100644 --- a/core/src/test/scala/akka/persistence/postgres/SingleActorSystemPerTestSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/SingleActorSystemPerTestSpec.scala @@ -26,8 +26,8 @@ abstract class SingleActorSystemPerTestSpec(val config: Config) conf.withValue(path, configValue) }) - implicit val pc: PatienceConfig = PatienceConfig(timeout = 1.minute) - implicit val timeout: Timeout = Timeout(1.minute) + implicit val pc: PatienceConfig = PatienceConfig(timeout = 2.minutes) + implicit val timeout: Timeout = Timeout(1.minutes) val cfg: Config = config.getConfig("postgres-journal") val journalConfig = new JournalConfig(cfg) From 34a9757ee590220a55ec6e96c5f449612a546f91 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Tue, 12 Oct 2021 17:29:22 +0100 Subject: [PATCH 05/34] Styling --- .../resources/schema/postgres/nested-partitions-schema.sql | 4 ++-- .../src/test/resources/schema/postgres/partitioned-schema.sql | 4 ++-- core/src/test/resources/schema/postgres/plain-schema.sql | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index af2f1fbb..c207a029 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -65,7 +65,7 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); -DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids on public.journal; +DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON public.journal; DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; @@ -81,7 +81,7 @@ CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGG $$ DECLARE BEGIN - INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) + INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index 48021542..3745692d 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -66,7 +66,7 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); -DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids on public.journal; +DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON public.journal; DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; @@ -82,7 +82,7 @@ CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGG $$ DECLARE BEGIN - INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) + INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index 86ba3175..b80579e7 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -39,7 +39,7 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); -DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids on public.journal; +DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON public.journal; DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; @@ -55,7 +55,7 @@ CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGG $$ DECLARE BEGIN - INSERT into public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) + INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET From b170202178d53f2a1c7dc7aaafbd0e3adc84eb7f Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Tue, 12 Oct 2021 17:30:49 +0100 Subject: [PATCH 06/34] Use journal_persistence_ids on ReadJournal#allPersistenceIds query. Add required changes to migration artifact to support journal_persistence_ids table --- .../config/AkkaPersistenceConfig.scala | 1 + .../postgres/journal/dao/JournalQueries.scala | 13 ---- .../query/dao/ByteArrayReadJournalDao.scala | 2 +- .../query/dao/ReadJournalQueries.scala | 20 ++++-- .../journal/dao/JournalQueriesTest.scala | 10 --- .../query/dao/ReadJournalQueriesTest.scala | 2 +- .../journal/Jdbc4JournalMigration.scala | 2 + .../migration/journal/JournalSchema.scala | 61 +++++++++++++++++++ .../src/test/resources/base-migration.conf | 10 +++ .../postgres/migration/MigrationTest.scala | 6 ++ 10 files changed, 97 insertions(+), 30 deletions(-) diff --git a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala index 99f343d4..fe92acbc 100644 --- a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala +++ b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala @@ -176,6 +176,7 @@ case class JournalSequenceRetrievalConfig( class ReadJournalConfig(config: Config) { val journalTableConfiguration = new JournalTableConfiguration(config) + val journalPersistenceIdsTableConfiguration = new JournalPersistenceIdsTableConfiguration(config) val journalSequenceRetrievalConfiguration = JournalSequenceRetrievalConfig(config) val pluginConfig = new ReadJournalPluginConfig(config) val tagsConfig = new TagsConfig(config) diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala index 93e2cfcd..5359ce5a 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala @@ -21,9 +21,6 @@ class JournalQueries( def writeJournalRows(xs: Seq[JournalRow]): FixedSqlAction[Option[Int], NoStream, slick.dbio.Effect.Write] = compiledJournalTable ++= xs.sortBy(_.sequenceNumber) - private def selectAllJournalForPersistenceId(persistenceId: Rep[String]) = - journalTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc) - def delete(persistenceId: String, toSequenceNr: Long): FixedSqlAction[Int, NoStream, slick.dbio.Effect.Write] = { journalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber <= toSequenceNr).delete } @@ -61,16 +58,6 @@ class JournalQueries( val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _) - private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = - selectAllJournalForPersistenceId(persistenceId).filter(_.sequenceNumber <= maxSequenceNr) - - val selectByPersistenceIdAndMaxSequenceNumber = Compiled(_selectByPersistenceIdAndMaxSequenceNumber _) - - private def _allPersistenceIdsDistinct: Query[Rep[String], String, Seq] = - journalTable.map(_.persistenceId).distinct - - val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct) - private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala index 778ba038..be728730 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala @@ -32,7 +32,7 @@ trait BaseByteArrayReadJournalDao extends ReadJournalDao with BaseJournalDaoWith import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] = - Source.fromPublisher(db.stream(queries.allPersistenceIdsDistinct(max).result)) + Source.fromPublisher(db.stream(queries.allPersistenceIds(max).result)) override def eventsByTag( tag: String, diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala index 380b9b71..539a0ff4 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala @@ -7,22 +7,32 @@ package akka.persistence.postgres package query.dao import akka.persistence.postgres.config.ReadJournalConfig -import akka.persistence.postgres.journal.dao.{ FlatJournalTable, JournalTable } +import akka.persistence.postgres.journal.dao.{ FlatJournalTable, JournalPersistenceIdsTable, JournalTable } class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ private val journalTable: TableQuery[JournalTable] = FlatJournalTable(readJournalConfig.journalTableConfiguration) + private val journalPersistenceIdsTable: TableQuery[JournalPersistenceIdsTable] = JournalPersistenceIdsTable( + readJournalConfig.journalPersistenceIdsTableConfiguration) - private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = - baseTableQuery().map(_.persistenceId).distinct.take(max) + private def _allPersistenceIds(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = + if (readJournalConfig.includeDeleted) + journalPersistenceIdsTable.map(_.persistenceId).take(max) + else + journalPersistenceIdsTable + .joinLeft(journalTable.filter(_.deleted === false)) + .on(_.persistenceId === _.persistenceId) + .filter(_._2.isDefined) + .map(_._1.persistenceId) + .take(max) + + val allPersistenceIds = Compiled(_allPersistenceIds _) private def baseTableQuery() = if (readJournalConfig.includeDeleted) journalTable else journalTable.filter(_.deleted === false) - val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) - private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], diff --git a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala index 0a16f648..c3cc7d64 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala @@ -6,10 +6,6 @@ import io.circe.{ Json, JsonObject } class JournalQueriesTest extends BaseQueryTest { - it should "produce SQL query for distinct persistenceID" in withJournalQueries { queries => - queries.allPersistenceIdsDistinct shouldBeSQL """select distinct "persistence_id" from "journal"""" - } - it should "create SQL query for highestMarkedSequenceNrForPersistenceId" in withJournalQueries { queries => queries.highestMarkedSequenceNrForPersistenceId( "aaa") shouldBeSQL """select max("sequence_number") from "journal" where ("deleted" = true) and ("persistence_id" = ?)""" @@ -20,12 +16,6 @@ class JournalQueriesTest extends BaseQueryTest { "aaa") shouldBeSQL """select "max_sequence_number" from "journal_persistence_ids" where "persistence_id" = ? limit 1""" } - it should "create SQL query for selectByPersistenceIdAndMaxSequenceNumber" in withJournalQueries { queries => - queries.selectByPersistenceIdAndMaxSequenceNumber( - "aaa", - 11L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ("persistence_id" = ?) and ("sequence_number" <= ?) order by "sequence_number" desc""" - } - it should "create SQL query for messagesQuery" in withJournalQueries { queries => queries.messagesQuery( "aaa", diff --git a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala index 54c27721..18f09db5 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala @@ -5,7 +5,7 @@ import akka.persistence.postgres.util.BaseQueryTest class ReadJournalQueriesTest extends BaseQueryTest { it should "create SQL query for allPersistenceIdsDistinct" in withReadJournalQueries { queries => - queries.allPersistenceIdsDistinct(23L) shouldBeSQL """select distinct "persistence_id" from "journal" limit ?""" + queries.allPersistenceIds(23L) shouldBeSQL """select "persistence_id" from "journal_persistence_ids" limit ?""" } it should "create SQL query for messagesQuery" in withReadJournalQueries { queries => diff --git a/migration/src/main/scala/akka/persistence/postgres/migration/journal/Jdbc4JournalMigration.scala b/migration/src/main/scala/akka/persistence/postgres/migration/journal/Jdbc4JournalMigration.scala index ed8e9808..9fbb33b8 100644 --- a/migration/src/main/scala/akka/persistence/postgres/migration/journal/Jdbc4JournalMigration.scala +++ b/migration/src/main/scala/akka/persistence/postgres/migration/journal/Jdbc4JournalMigration.scala @@ -69,6 +69,7 @@ class Jdbc4JournalMigration(globalConfig: Config, tempTableName: String = "tmp_j for { _ <- journalSchema.createTable _ <- journalSchema.createTagsTable + _ <- journalSchema.createJournalPersistenceIdsTable } yield () } @@ -106,6 +107,7 @@ class Jdbc4JournalMigration(globalConfig: Config, tempTableName: String = "tmp_j val fut = for { _ <- db.run(createTables.transactionally) + _ <- db.run(journalSchema.createTriggers.transactionally) cnt <- dml.runReduce(_ + _) _ <- db.run(journalSchema.createSequence.transactionally) _ <- db.run(journalSchema.createIndexes.transactionally) diff --git a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala index c59b91df..77a9dbf9 100644 --- a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala +++ b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala @@ -24,6 +24,23 @@ private[journal] trait JournalSchema { def getTable: TableQuery[TempJournalTable] def createTable: DBIOAction[Unit, NoStream, Effect.Write] + def createJournalPersistenceIdsTable: DBIOAction[Unit, NoStream, Effect.Write] = { + val journalPersistenceIdsTableCfg = journalCfg.journalPersistenceIdsTableConfiguration + val fullTableName = + s"${journalPersistenceIdsTableCfg.schemaName.getOrElse("public")}.${journalPersistenceIdsTableCfg.tableName}" + + import journalPersistenceIdsTableCfg.columnNames._ + for { + _ <- sqlu"""CREATE TABLE #$fullTableName ( + #$persistenceId TEXT NOT NULL, + #$maxSequenceNumber BIGINT NOT NULL, + #$maxOrdering BIGINT NOT NULL, + #$minOrdering BIGINT NOT NULL, + PRIMARY KEY (#$persistenceId) + )""" + } yield () + } + def createTagsTable: DBIOAction[Unit, NoStream, Effect.Write] = { val tagsTableConfig = journalCfg.tagsTableConfiguration import tagsTableConfig.columnNames._ @@ -71,6 +88,50 @@ private[journal] trait JournalSchema { _ <- sqlu"""ALTER INDEX #${fullTmpTableName}_#${tags}_idx RENAME TO #${journalTableCfg.tableName}_#${tags}_idx""" _ <- sqlu"""ALTER INDEX #${fullTmpTableName}_pkey RENAME TO #${journalTableCfg.tableName}_pkey""" } yield () + + def createTriggers: DBIOAction[Unit, NoStream, Effect.Write] = { + val journalTableCfg = journalCfg.journalTableConfiguration + val journalPersistenceIdsTableCfg = journalCfg.journalPersistenceIdsTableConfiguration + val schema = journalPersistenceIdsTableCfg.schemaName.getOrElse("public") + val fullTableName = s"$schema.${journalPersistenceIdsTableCfg.tableName}" + val journalFullTableName = s"$schema.${journalTableCfg.tableName}" + + import journalPersistenceIdsTableCfg.columnNames._ + import journalTableCfg.columnNames.{ persistenceId => jPersistenceId, _ } + + for { + _ <- sqlu""" + CREATE OR REPLACE FUNCTION #$schema.update_journal_persistence_ids() RETURNS TRIGGER AS $$$$ + DECLARE + BEGIN + INSERT INTO #$fullTableName (#$persistenceId, #$maxSequenceNumber, #$maxOrdering, #$minOrdering) + VALUES (NEW.#$jPersistenceId, NEW.#$sequenceNumber, NEW.#$ordering, NEW.#$ordering) + ON CONFLICT (#$persistenceId) DO UPDATE + SET + #$maxSequenceNumber = GREATEST(#$fullTableName.#$maxSequenceNumber, NEW.#$sequenceNumber), + #$maxOrdering = GREATEST(#$fullTableName.#$maxOrdering, NEW.#$ordering), + #$minOrdering = LEAST(#$fullTableName.#$minOrdering, NEW.#$ordering); + + RETURN NEW; + END; + $$$$ LANGUAGE plpgsql; + """ + + _ <- sqlu""" + CREATE TRIGGER trig_update_journal_persistence_ids + AFTER INSERT ON #$journalFullTableName + FOR EACH ROW + EXECUTE PROCEDURE #$schema.update_journal_persistence_ids(); + """ + + _ <- sqlu""" + CREATE TRIGGER trig_update_journal_persistence_ids + AFTER INSERT ON #$fullTmpTableName + FOR EACH ROW + EXECUTE PROCEDURE #$schema.update_journal_persistence_ids(); + """ + } yield () + } } private[journal] object JournalSchema { diff --git a/migration/src/test/resources/base-migration.conf b/migration/src/test/resources/base-migration.conf index 13380519..c104d026 100644 --- a/migration/src/test/resources/base-migration.conf +++ b/migration/src/test/resources/base-migration.conf @@ -63,6 +63,16 @@ postgres-journal { size = 50 } } + journalPersistenceIds { + schemaName = "migration" + tableName = "fancy_journal_persistence_ids" + columnNames = { + persistenceId = "jpi_per_id" + maxSequenceNumber = "jpi_max_seq_num" + maxOrdering = "jpi_max_ord" + minOrdering = "jpi_min_ord" + } + } tags { schemaName = "migration" tableName = "fancy_tags" diff --git a/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala b/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala index fe36fd11..2fa8e3a3 100644 --- a/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala +++ b/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala @@ -177,6 +177,9 @@ trait PrepareDatabase extends BeforeAndAfterEach with BeforeAndAfterAll with Sca val journalTableConfig = journalConfig.journalTableConfiguration val journalTableName = journalTableConfig.tableName + val journalPersistenceIdsTableConfig = journalConfig.journalPersistenceIdsTableConfiguration + val journalPersistenceIdsTableName = journalPersistenceIdsTableConfig.tableName + val tagsTableConfig = journalConfig.tagsTableConfiguration import journalTableConfig.columnNames.{ tags => tagsCol, _ } for { @@ -185,6 +188,9 @@ trait PrepareDatabase extends BeforeAndAfterEach with BeforeAndAfterAll with Sca _ <- sqlu"""DROP TABLE IF EXISTS migration.old_#$journalTableName""" _ <- sqlu"""DROP TABLE IF EXISTS migration.#$tempJournalTableName""" _ <- sqlu"""DROP TABLE IF EXISTS migration.#$journalTableName""" + _ <- sqlu"""DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON migration.#$journalTableName""" + _ <- sqlu"""DROP FUNCTION IF EXISTS migration.update_journal_persistence_ids()""" + _ <- sqlu"""DROP TABLE IF EXISTS migration.#$journalPersistenceIdsTableName""" _ <- sqlu"""CREATE TABLE IF NOT EXISTS migration.#$journalTableName ( #$ordering BIGSERIAL, From cf781c0496569f95d94b35d77cb37992e9ae53b1 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Wed, 13 Oct 2021 08:36:43 +0100 Subject: [PATCH 07/34] Use journal_persistence_ids for maxJournalSequenceQuery --- .../persistence/postgres/query/dao/ReadJournalQueries.scala | 2 +- .../persistence/postgres/query/dao/ReadJournalQueriesTest.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala index 539a0ff4..6aa36128 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala @@ -65,6 +65,6 @@ class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { val orderingByOrdering = Compiled(_journalSequenceQuery _) val maxOrdering = Compiled { - journalTable.map(_.ordering).max.getOrElse(0L) + journalPersistenceIdsTable.map(_.maxOrdering).max.getOrElse(0L) } } diff --git a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala index 18f09db5..d90a943a 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala @@ -30,7 +30,7 @@ class ReadJournalQueriesTest extends BaseQueryTest { } it should "create SQL query for maxJournalSequenceQuery" in withReadJournalQueries { queries => - queries.maxOrdering shouldBeSQL """select max("ordering") from "journal"""" + queries.maxOrdering shouldBeSQL """select max("max_ordering") from "journal_persistence_ids"""" } private def withReadJournalQueries(f: ReadJournalQueries => Unit): Unit = { From 949383e9a6b057a588f7ad846ea510bc1eb75f49 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Wed, 13 Oct 2021 15:04:30 +0100 Subject: [PATCH 08/34] Revert usage of journal_persistence_ids on other queries besides highestSequenceNrForPersistenceId --- .../query/dao/ByteArrayReadJournalDao.scala | 2 +- .../query/dao/ReadJournalQueries.scala | 18 ++++-------------- .../query/dao/ReadJournalQueriesTest.scala | 4 ++-- ...populate-journal-persistence-ids-table.sql} | 0 4 files changed, 7 insertions(+), 17 deletions(-) rename scripts/migration-0.6.0/{4-populate-journal-persistence-ids-table.sql => 6-populate-journal-persistence-ids-table.sql} (100%) diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala index be728730..778ba038 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala @@ -32,7 +32,7 @@ trait BaseByteArrayReadJournalDao extends ReadJournalDao with BaseJournalDaoWith import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] = - Source.fromPublisher(db.stream(queries.allPersistenceIds(max).result)) + Source.fromPublisher(db.stream(queries.allPersistenceIdsDistinct(max).result)) override def eventsByTag( tag: String, diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala index 6aa36128..d0e3b5b0 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala @@ -13,21 +13,11 @@ class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ private val journalTable: TableQuery[JournalTable] = FlatJournalTable(readJournalConfig.journalTableConfiguration) - private val journalPersistenceIdsTable: TableQuery[JournalPersistenceIdsTable] = JournalPersistenceIdsTable( - readJournalConfig.journalPersistenceIdsTableConfiguration) - private def _allPersistenceIds(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = - if (readJournalConfig.includeDeleted) - journalPersistenceIdsTable.map(_.persistenceId).take(max) - else - journalPersistenceIdsTable - .joinLeft(journalTable.filter(_.deleted === false)) - .on(_.persistenceId === _.persistenceId) - .filter(_._2.isDefined) - .map(_._1.persistenceId) - .take(max) + private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = + baseTableQuery().map(_.persistenceId).distinct.take(max) - val allPersistenceIds = Compiled(_allPersistenceIds _) + val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) private def baseTableQuery() = if (readJournalConfig.includeDeleted) journalTable @@ -65,6 +55,6 @@ class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { val orderingByOrdering = Compiled(_journalSequenceQuery _) val maxOrdering = Compiled { - journalPersistenceIdsTable.map(_.maxOrdering).max.getOrElse(0L) + journalTable.map(_.ordering).max.getOrElse(0L) } } diff --git a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala index d90a943a..54c27721 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala @@ -5,7 +5,7 @@ import akka.persistence.postgres.util.BaseQueryTest class ReadJournalQueriesTest extends BaseQueryTest { it should "create SQL query for allPersistenceIdsDistinct" in withReadJournalQueries { queries => - queries.allPersistenceIds(23L) shouldBeSQL """select "persistence_id" from "journal_persistence_ids" limit ?""" + queries.allPersistenceIdsDistinct(23L) shouldBeSQL """select distinct "persistence_id" from "journal" limit ?""" } it should "create SQL query for messagesQuery" in withReadJournalQueries { queries => @@ -30,7 +30,7 @@ class ReadJournalQueriesTest extends BaseQueryTest { } it should "create SQL query for maxJournalSequenceQuery" in withReadJournalQueries { queries => - queries.maxOrdering shouldBeSQL """select max("max_ordering") from "journal_persistence_ids"""" + queries.maxOrdering shouldBeSQL """select max("ordering") from "journal"""" } private def withReadJournalQueries(f: ReadJournalQueries => Unit): Unit = { diff --git a/scripts/migration-0.6.0/4-populate-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql similarity index 100% rename from scripts/migration-0.6.0/4-populate-journal-persistence-ids-table.sql rename to scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql From 09bea4557bb31801650577fe3ef1c7e20438d620 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Wed, 13 Oct 2021 15:05:32 +0100 Subject: [PATCH 09/34] Keep highestSequenceNrForPersistenceId journal query and comment the new one which uses journal_persistence_ids table --- .../postgres/journal/dao/BaseByteArrayJournalDao.scala | 2 +- .../akka/persistence/postgres/journal/dao/JournalQueries.scala | 3 ++- .../persistence/postgres/journal/dao/JournalQueriesTest.scala | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala index ed0ae9f6..e776619a 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala @@ -139,7 +139,7 @@ trait BaseByteArrayJournalDao extends JournalDaoWithUpdates with BaseJournalDaoW override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = for { - maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result.headOption) + maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result) //.headOption) } yield maybeHighestSeqNo.getOrElse(0L) override def messages( diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala index 5359ce5a..f7a90e95 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala @@ -48,7 +48,8 @@ class JournalQueries( .update(true) private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]) = { - journalPersistenceIdsTable.filter(_.persistenceId === persistenceId).map(_.maxSequenceNumber).take(1) + journalTable.filter(_.persistenceId === persistenceId).map(_.sequenceNumber).max + // journalPersistenceIdsTable.filter(_.persistenceId === persistenceId).map(_.maxSequenceNumber).take(1) } private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = diff --git a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala index c3cc7d64..7a334223 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala @@ -13,7 +13,8 @@ class JournalQueriesTest extends BaseQueryTest { it should "create SQL query for highestSequenceNrForPersistenceId" in withJournalQueries { queries => queries.highestSequenceNrForPersistenceId( - "aaa") shouldBeSQL """select "max_sequence_number" from "journal_persistence_ids" where "persistence_id" = ? limit 1""" + "aaa") shouldBeSQL """select max("sequence_number") from "journal" where "persistence_id" = ?""" + // queries.highestSequenceNrForPersistenceId("aaa") shouldBeSQL """select "max_sequence_number" from "journal_persistence_ids" where "persistence_id" = ? limit 1""" } it should "create SQL query for messagesQuery" in withJournalQueries { queries => From 41cee5df59e8bf9d30188b47b92894a6e8c4df40 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Wed, 13 Oct 2021 15:06:52 +0100 Subject: [PATCH 10/34] Re-add trigger that checks if max-sequence-number on journal_persistence_ids is bigger, before updating the table. --- .../postgres/nested-partitions-schema.sql | 25 +++++++++++++++++-- .../schema/postgres/partitioned-schema.sql | 25 +++++++++++++++++-- .../schema/postgres/plain-schema.sql | 25 +++++++++++++++++-- .../migration/journal/JournalSchema.scala | 24 ++++++++++++++++-- .../postgres/migration/MigrationTest.scala | 3 +++ ...unction-update-journal-persistence-ids.sql | 4 +-- ...tion-check-persistence-id-max-sequence.sql | 18 +++++++++++++ ...gger-check-persistence-id-max-sequence.sql | 19 ++++++++++++++ 8 files changed, 133 insertions(+), 10 deletions(-) create mode 100644 scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql create mode 100644 scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index c207a029..fa4f347b 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -65,6 +65,8 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); +DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_persistence_ids; +DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON public.journal; DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; @@ -85,8 +87,8 @@ BEGIN VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET - max_sequence_number = GREATEST(public.journal_persistence_ids.max_sequence_number, NEW.sequence_number), - max_ordering = GREATEST(public.journal_persistence_ids.max_ordering, NEW.ordering), + max_sequence_number = NEW.sequence_number, + max_ordering = NEW.ordering, min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); RETURN NEW; @@ -98,3 +100,22 @@ CREATE TRIGGER trig_update_journal_persistence_ids AFTER INSERT ON public.journal FOR EACH ROW EXECUTE PROCEDURE public.update_journal_persistence_ids(); + +CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS +$$ +DECLARE +BEGIN + IF NEW.max_sequence_number <= OLD.max_sequence_number THEN + RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; + END IF; + + RETURN NEW; +END; +$$ +LANGUAGE plpgsql; + + +CREATE TRIGGER trig_check_persistence_id_max_sequence_number + BEFORE UPDATE ON public.journal_persistence_ids + FOR EACH ROW + EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index 3745692d..ce28662e 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -66,6 +66,8 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); +DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_persistence_ids; +DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON public.journal; DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; @@ -86,8 +88,8 @@ BEGIN VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET - max_sequence_number = GREATEST(public.journal_persistence_ids.max_sequence_number, NEW.sequence_number), - max_ordering = GREATEST(public.journal_persistence_ids.max_ordering, NEW.ordering), + max_sequence_number = NEW.sequence_number, + max_ordering = NEW.ordering, min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); RETURN NEW; @@ -99,3 +101,22 @@ CREATE TRIGGER trig_update_journal_persistence_ids AFTER INSERT ON public.journal FOR EACH ROW EXECUTE PROCEDURE public.update_journal_persistence_ids(); + +CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS +$$ +DECLARE +BEGIN + IF NEW.max_sequence_number <= OLD.max_sequence_number THEN + RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; + END IF; + + RETURN NEW; +END; +$$ +LANGUAGE plpgsql; + + +CREATE TRIGGER trig_check_persistence_id_max_sequence_number + BEFORE UPDATE ON public.journal_persistence_ids + FOR EACH ROW + EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index b80579e7..7faaf53e 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -39,6 +39,8 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); +DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_persistence_ids; +DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON public.journal; DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; @@ -59,8 +61,8 @@ BEGIN VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET - max_sequence_number = GREATEST(public.journal_persistence_ids.max_sequence_number, NEW.sequence_number), - max_ordering = GREATEST(public.journal_persistence_ids.max_ordering, NEW.ordering), + max_sequence_number = NEW.sequence_number, + max_ordering = NEW.ordering, min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); RETURN NEW; @@ -72,3 +74,22 @@ CREATE TRIGGER trig_update_journal_persistence_ids AFTER INSERT ON public.journal FOR EACH ROW EXECUTE PROCEDURE public.update_journal_persistence_ids(); + +CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS +$$ +DECLARE +BEGIN + IF NEW.max_sequence_number <= OLD.max_sequence_number THEN + RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; + END IF; + + RETURN NEW; +END; +$$ +LANGUAGE plpgsql; + + +CREATE TRIGGER trig_check_persistence_id_max_sequence_number + BEFORE UPDATE ON public.journal_persistence_ids + FOR EACH ROW + EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala index 77a9dbf9..06754505 100644 --- a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala +++ b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala @@ -108,8 +108,8 @@ private[journal] trait JournalSchema { VALUES (NEW.#$jPersistenceId, NEW.#$sequenceNumber, NEW.#$ordering, NEW.#$ordering) ON CONFLICT (#$persistenceId) DO UPDATE SET - #$maxSequenceNumber = GREATEST(#$fullTableName.#$maxSequenceNumber, NEW.#$sequenceNumber), - #$maxOrdering = GREATEST(#$fullTableName.#$maxOrdering, NEW.#$ordering), + #$maxSequenceNumber = NEW.#$sequenceNumber, + #$maxOrdering = NEW.#$ordering, #$minOrdering = LEAST(#$fullTableName.#$minOrdering, NEW.#$ordering); RETURN NEW; @@ -130,6 +130,26 @@ private[journal] trait JournalSchema { FOR EACH ROW EXECUTE PROCEDURE #$schema.update_journal_persistence_ids(); """ + + _ <- sqlu""" + CREATE OR REPLACE FUNCTION #$schema.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS $$$$ + DECLARE + BEGIN + IF NEW.#$maxSequenceNumber <= OLD.#$maxSequenceNumber THEN + RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; + END IF; + + RETURN NEW; + END; + $$$$ LANGUAGE plpgsql; + """ + + _ <- sqlu""" + CREATE TRIGGER trig_check_persistence_id_max_sequence_number + BEFORE UPDATE ON #$fullTableName + FOR EACH ROW + EXECUTE PROCEDURE #$schema.check_persistence_id_max_sequence_number(); + """ } yield () } } diff --git a/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala b/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala index 2fa8e3a3..bd4da7bc 100644 --- a/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala +++ b/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala @@ -190,6 +190,9 @@ trait PrepareDatabase extends BeforeAndAfterEach with BeforeAndAfterAll with Sca _ <- sqlu"""DROP TABLE IF EXISTS migration.#$journalTableName""" _ <- sqlu"""DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON migration.#$journalTableName""" _ <- sqlu"""DROP FUNCTION IF EXISTS migration.update_journal_persistence_ids()""" + _ <- + sqlu"""DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON migration.#$journalPersistenceIdsTableName""" + _ <- sqlu"""DROP FUNCTION IF EXISTS migration.check_persistence_id_max_sequence_number()""" _ <- sqlu"""DROP TABLE IF EXISTS migration.#$journalPersistenceIdsTableName""" _ <- sqlu"""CREATE TABLE IF NOT EXISTS migration.#$journalTableName ( diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql index ec65a32a..61a11de1 100644 --- a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql +++ b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql @@ -26,8 +26,8 @@ BEGIN jpi_table := schema || '.' || jpi_table_name; cols := jpi_persistence_id_column || ', ' || jpi_max_sequence_number_column || ', ' || jpi_max_ordering_column || ', ' || jpi_min_ordering_column; vals := '($1).' || j_persistence_id_column || ', ($1).' || j_sequence_number_column || ', ($1).' || j_ordering_column || ',($1).' || j_ordering_column; - upds := jpi_max_sequence_number_column || ' = GREATEST(' || jpi_table || '.' || jpi_max_sequence_number_column || ', ($1).' || j_sequence_number_column || '), ' || - jpi_max_ordering_column || ' = GREATEST(' || jpi_table || '.' || jpi_max_ordering_column || ', ($1).' || j_ordering_column || '), ' || + upds := jpi_max_sequence_number_column || ' = ($1).' || j_sequence_number_column || ', ' || + jpi_max_ordering_column || ' = ($1).' || j_ordering_column || ', ' || jpi_min_ordering_column || ' = LEAST(' || jpi_table || '.' || jpi_min_ordering_column || ', ($1).' || j_ordering_column || ')'; sql := 'INSERT INTO ' || jpi_table || ' (' || cols || ') VALUES (' || vals || ') ' || diff --git a/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql new file mode 100644 index 00000000..3846e1ce --- /dev/null +++ b/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql @@ -0,0 +1,18 @@ +-- replace schema value if required +CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS +$$ +DECLARE + -- replace with appropriate values + jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; + + -- variables + sql TEXT; +BEGIN + sql := 'IF NEW.' || jpi_max_sequence_number_column || ' <= OLD.' || jpi_max_sequence_number_column || ' THEN + RAISE EXCEPTION ''New max_sequence_number not higher than previous value''; + END IF;'; + + EXECUTE sql USING NEW; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql new file mode 100644 index 00000000..f20618d0 --- /dev/null +++ b/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql @@ -0,0 +1,19 @@ +DO $$ +DECLARE + -- replace with appropriate values + schema CONSTANT TEXT := 'public'; + jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; + + -- variables + jpi_table TEXT; + sql TEXT; +BEGIN + jpi_table := schema || '.' || jpi_table_name; + + sql := 'CREATE TRIGGER trig_check_persistence_id_max_sequence_number + BEFORE UPDATE ON ' || jpi_table || ' FOR EACH ROW + EXECUTE PROCEDURE ' || schema || '.check_persistence_id_max_sequence_number()'; + + EXECUTE sql; +END ; +$$ LANGUAGE plpgsql; From dfb426a9e8c846124ff89d40f320d5dff7541880 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Thu, 14 Oct 2021 11:45:20 +0100 Subject: [PATCH 11/34] Fix tests by making them respect sequence_number order --- .../journal/dao/BaseByteArrayJournalDao.scala | 2 +- .../journal/PostgresJournalSpec.scala | 35 ++++++++++--------- .../query/JournalSequenceActorTest.scala | 8 ++--- 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala index e776619a..f6513de8 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala @@ -139,7 +139,7 @@ trait BaseByteArrayJournalDao extends JournalDaoWithUpdates with BaseJournalDaoW override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = for { - maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result) //.headOption) + maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result) // .headOption) } yield maybeHighestSeqNo.getOrElse(0L) override def messages( diff --git a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala index c4126879..bad3148d 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala @@ -62,7 +62,6 @@ trait PartitionedJournalSpecTestCases { "A journal" must { "store events concurrently without any gaps or duplicates among ordering (offset) values" in { // given - val perId = "perId-1" val numOfSenders = 5 val batchSize = 1000 val senders = List.fill(numOfSenders)(TestProbe()).zipWithIndex @@ -72,29 +71,31 @@ trait PartitionedJournalSpecTestCases { .sequence { senders.map { case (sender, idx) => Future { - writeMessages((idx * batchSize) + 1, (idx + 1) * batchSize, perId, sender.ref, writerUuid) + writeMessages((idx * batchSize) + 1, (idx + 1) * batchSize, s"perId-${idx + 1}", sender.ref, writerUuid) } } } .futureValue(Timeout(Span(1, Minute))) // then - val journalOps = new ScalaPostgresReadJournalOperations(system) - journalOps.withCurrentEventsByPersistenceId()(perId) { tp => - tp.request(Long.MaxValue) - val replayedMessages = (1 to batchSize * numOfSenders).map { _ => - tp.expectNext() - } - tp.expectComplete() - val orderings = replayedMessages.map(_.offset).collect { case Sequence(value) => - value - } - orderings.size should equal(batchSize * numOfSenders) - val minOrd = orderings.min - val maxOrd = orderings.max - val expectedOrderings = (minOrd to maxOrd).toList + senders.foreach { case (_, idx) => + val journalOps = new ScalaPostgresReadJournalOperations(system) + journalOps.withCurrentEventsByPersistenceId()(s"perId-${idx + 1}") { tp => + tp.request(Long.MaxValue) + val replayedMessages = (1 to batchSize).map { _ => + tp.expectNext() + } + tp.expectComplete() + val orderings = replayedMessages.map(_.offset).collect { case Sequence(value) => + value + } + orderings.size should equal(batchSize) + val minOrd = orderings.min + val maxOrd = orderings.max + val expectedOrderings = (minOrd to maxOrd).toList - (orderings.sorted should contain).theSameElementsInOrderAs(expectedOrderings) + (orderings.sorted should contain).theSameElementsInOrderAs(expectedOrderings) + } } } } diff --git a/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala b/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala index be8babf4..b7173c75 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala @@ -6,7 +6,6 @@ package akka.persistence.postgres.query import java.util.concurrent.atomic.AtomicLong - import akka.actor.{ ActorRef, ActorSystem } import akka.pattern.ask import akka.persistence.postgres.config.JournalSequenceRetrievalConfig @@ -27,6 +26,7 @@ import slick.jdbc.{ JdbcBackend, JdbcCapabilities } import scala.concurrent.Future import scala.concurrent.duration._ +import scala.util.Random abstract class JournalSequenceActorTest(val schemaType: SchemaType) extends QueryTestSpec(schemaType.configName) { private val log = LoggerFactory.getLogger(classOf[JournalSequenceActorTest]) @@ -76,7 +76,7 @@ abstract class JournalSequenceActorTest(val schemaType: SchemaType) extends Quer JournalRow(id, deleted = false, "id", id, Array(0.toByte), Nil, emptyJson) } .grouped(10000) - .mapAsync(4) { rows => + .mapAsync(1) { rows => db.run(journalTable.forceInsertAll(rows)) } .runWith(Sink.ignore) @@ -112,7 +112,7 @@ abstract class JournalSequenceActorTest(val schemaType: SchemaType) extends Quer JournalRow(id, deleted = false, "id", id, Array(0.toByte), Nil, emptyJson) } .grouped(10000) - .mapAsync(4) { rows => + .mapAsync(1) { rows => db.run(journalTable.forceInsertAll(rows)) } .runWith(Sink.ignore) @@ -145,7 +145,7 @@ abstract class JournalSequenceActorTest(val schemaType: SchemaType) extends Quer JournalRow(id, deleted = false, "id", id, Array(0.toByte), Nil, emptyJson) } .grouped(10000) - .mapAsync(4) { rows => + .mapAsync(1) { rows => db.run(journalTable.forceInsertAll(rows)) } .runWith(Sink.ignore) From de1e319659bd7ed7d24b013dcbdd2bf75c7ee27c Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Mon, 25 Oct 2021 17:00:03 +0100 Subject: [PATCH 12/34] Remove unused artifacts and add comment on journal metadata migration table --- .../postgres/journal/dao/BaseByteArrayJournalDao.scala | 2 +- .../akka/persistence/postgres/journal/dao/JournalQueries.scala | 1 - .../migration-0.6.0/1-create-journal-persistence-ids-table.sql | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala index f6513de8..41a61845 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala @@ -139,7 +139,7 @@ trait BaseByteArrayJournalDao extends JournalDaoWithUpdates with BaseJournalDaoW override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = for { - maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result) // .headOption) + maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result) } yield maybeHighestSeqNo.getOrElse(0L) override def messages( diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala index f7a90e95..518892c5 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala @@ -49,7 +49,6 @@ class JournalQueries( private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]) = { journalTable.filter(_.persistenceId === persistenceId).map(_.sequenceNumber).max - // journalPersistenceIdsTable.filter(_.persistenceId === persistenceId).map(_.maxSequenceNumber).take(1) } private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = diff --git a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql index 0f8fe4aa..a2fe869c 100644 --- a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql +++ b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql @@ -1,3 +1,4 @@ +-- Depending on your use case consider partitioning this table. DO $$ DECLARE -- replace with appropriate values From 085424dafa61fd6ad759418d361bfc744af728f9 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Wed, 3 Nov 2021 16:19:43 +0000 Subject: [PATCH 13/34] Partition journal_persistence_ids table by hash --- .../postgres/config/AkkaPersistenceConfig.scala | 3 ++- .../postgres/journal/dao/JournalTables.scala | 2 ++ .../akka/persistence/postgres/package.scala | 1 + .../schema/postgres/nested-partitions-schema.sql | 10 +++++++--- .../schema/postgres/partitioned-schema.sql | 10 +++++++--- .../resources/schema/postgres/plain-schema.sql | 10 +++++++--- .../migration/journal/JournalSchema.scala | 11 ++++++++--- .../1-create-journal-persistence-ids-table.sql | 16 +++++++++++++--- ...e-function-update-journal-persistence-ids.sql | 3 ++- 9 files changed, 49 insertions(+), 17 deletions(-) diff --git a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala index fe92acbc..2c53c6a2 100644 --- a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala +++ b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala @@ -51,13 +51,14 @@ class JournalTableConfiguration(config: Config) { class JournalPersistenceIdsTableColumnNames(config: Config) { private val cfg = config.asConfig("tables.journalPersistenceIds.columnNames") + val id: String = cfg.as[String]("id", "id") val persistenceId: String = cfg.as[String]("persistenceId", "persistence_id") val maxSequenceNumber: String = cfg.as[String]("maxSequenceNumber", "max_sequence_number") val maxOrdering: String = cfg.as[String]("maxOrdering", "max_ordering") val minOrdering: String = cfg.as[String]("minOrdering", "min_ordering") override def toString: String = - s"JournalPersistenceIdsTableColumnNames($persistenceId,$maxSequenceNumber,$maxOrdering,$minOrdering)" + s"JournalPersistenceIdsTableColumnNames($id,$persistenceId,$maxSequenceNumber,$maxOrdering,$minOrdering)" } class JournalPersistenceIdsTableConfiguration(config: Config) { diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala index 8aa0dc06..cc137a18 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala @@ -97,11 +97,13 @@ class JournalPersistenceIdsTable(_tableTag: Tag, journalPersistenceIdsTableCfg: _schemaName = journalPersistenceIdsTableCfg.schemaName, _tableName = journalPersistenceIdsTableCfg.tableName) { override def * = ( + id, persistenceId, maxSequenceNumber, minOrdering, maxOrdering) <> (JournalPersistenceIdsRow.tupled, JournalPersistenceIdsRow.unapply) + val id: Rep[Long] = column[Long](journalPersistenceIdsTableCfg.columnNames.id) val persistenceId: Rep[String] = column[String](journalPersistenceIdsTableCfg.columnNames.persistenceId, O.Length(255, varying = true)) val maxSequenceNumber: Rep[Long] = column[Long](journalPersistenceIdsTableCfg.columnNames.maxSequenceNumber) diff --git a/core/src/main/scala/akka/persistence/postgres/package.scala b/core/src/main/scala/akka/persistence/postgres/package.scala index 8a3a8df7..ae439cef 100644 --- a/core/src/main/scala/akka/persistence/postgres/package.scala +++ b/core/src/main/scala/akka/persistence/postgres/package.scala @@ -18,6 +18,7 @@ package object postgres { metadata: Json) final case class JournalPersistenceIdsRow( + id: Long, persistenceId: String, maxSequenceNumber: Long, minOrdering: Long, diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index fa4f347b..d1719733 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -72,12 +72,16 @@ DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; CREATE TABLE public.journal_persistence_ids( + id BIGSERIAL, persistence_id TEXT NOT NULL, max_sequence_number BIGINT NOT NULL, min_ordering BIGINT NOT NULL, max_ordering BIGINT NOT NULL, - PRIMARY KEY (persistence_id) -); + PRIMARY KEY (id, persistence_id) +) PARTITION BY HASH(id); + +CREATE TABLE public.journal_persistence_ids_0 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE public.journal_persistence_ids_1 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 1); CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS $$ @@ -85,7 +89,7 @@ DECLARE BEGIN INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) - ON CONFLICT (persistence_id) DO UPDATE + ON CONFLICT (id, persistence_id) DO UPDATE SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index ce28662e..4d256edc 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -73,12 +73,16 @@ DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; CREATE TABLE public.journal_persistence_ids( + id BIGSERIAL, persistence_id TEXT NOT NULL, max_sequence_number BIGINT NOT NULL, min_ordering BIGINT NOT NULL, max_ordering BIGINT NOT NULL, - PRIMARY KEY (persistence_id) -); + PRIMARY KEY (id, persistence_id) +) PARTITION BY HASH(id); + +CREATE TABLE public.journal_persistence_ids_0 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE public.journal_persistence_ids_1 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 1); CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS $$ @@ -86,7 +90,7 @@ DECLARE BEGIN INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) - ON CONFLICT (persistence_id) DO UPDATE + ON CONFLICT (id, persistence_id) DO UPDATE SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index 7faaf53e..c05a931f 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -46,12 +46,16 @@ DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; CREATE TABLE public.journal_persistence_ids( + id BIGSERIAL, persistence_id TEXT NOT NULL, max_sequence_number BIGINT NOT NULL, min_ordering BIGINT NOT NULL, max_ordering BIGINT NOT NULL, - PRIMARY KEY (persistence_id) -); + PRIMARY KEY (id, persistence_id) +) PARTITION BY HASH(id); + +CREATE TABLE public.journal_persistence_ids_0 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE public.journal_persistence_ids_1 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 1); CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS $$ @@ -59,7 +63,7 @@ DECLARE BEGIN INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) - ON CONFLICT (persistence_id) DO UPDATE + ON CONFLICT (id, persistence_id) DO UPDATE SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, diff --git a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala index 06754505..1d2a3b3f 100644 --- a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala +++ b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala @@ -32,12 +32,17 @@ private[journal] trait JournalSchema { import journalPersistenceIdsTableCfg.columnNames._ for { _ <- sqlu"""CREATE TABLE #$fullTableName ( + #$id BIGSERIAL, #$persistenceId TEXT NOT NULL, #$maxSequenceNumber BIGINT NOT NULL, #$maxOrdering BIGINT NOT NULL, #$minOrdering BIGINT NOT NULL, - PRIMARY KEY (#$persistenceId) - )""" + PRIMARY KEY (#$id, #$persistenceId) + ) PARTITION BY HASH(#$id)""" + _ <- + sqlu"""CREATE TABLE #${fullTableName}_0 PARTITION OF #$fullTableName FOR VALUES WITH (MODULUS 2, REMAINDER 0)""" + _ <- + sqlu"""CREATE TABLE #${fullTableName}_1 PARTITION OF #$fullTableName FOR VALUES WITH (MODULUS 2, REMAINDER 1)""" } yield () } @@ -106,7 +111,7 @@ private[journal] trait JournalSchema { BEGIN INSERT INTO #$fullTableName (#$persistenceId, #$maxSequenceNumber, #$maxOrdering, #$minOrdering) VALUES (NEW.#$jPersistenceId, NEW.#$sequenceNumber, NEW.#$ordering, NEW.#$ordering) - ON CONFLICT (#$persistenceId) DO UPDATE + ON CONFLICT (#$id, #$persistenceId) DO UPDATE SET #$maxSequenceNumber = NEW.#$sequenceNumber, #$maxOrdering = NEW.#$ordering, diff --git a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql index a2fe869c..d67e953b 100644 --- a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql +++ b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql @@ -1,13 +1,16 @@ --- Depending on your use case consider partitioning this table. +-- Creates table and the amount of partitions defined by jpi_partitions_number. Default is 10. DO $$ DECLARE -- replace with appropriate values schema CONSTANT TEXT := 'public'; jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; + jpi_id_column CONSTANT TEXT := 'id'; jpi_persistence_id_column CONSTANT TEXT := 'persistence_id'; jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; jpi_min_ordering_column CONSTANT TEXT := 'min_ordering'; + jpi_partitions_table_name_perfix CONSTANT TEXT := 'journal_persistence_ids_'; + jpi_partitions_number CONSTANT INTEGER := 10; -- variables jpi_table TEXT; @@ -17,13 +20,20 @@ BEGIN sql := 'CREATE TABLE IF NOT EXISTS ' || jpi_table || '(' || + jpi_id_column || ' BIGSERIAL, ' || jpi_persistence_id_column || ' TEXT NOT NULL, ' || jpi_max_sequence_number_column || ' BIGINT NOT NULL, ' || jpi_max_ordering_column || ' BIGINT NOT NULL, ' || jpi_min_ordering_column || ' BIGINT NOT NULL, ' || - 'PRIMARY KEY (' || jpi_persistence_id_column || ')' || - ')'; + 'PRIMARY KEY (' || jpi_id_column || ', ' || jpi_persistence_id_column || ')' || + ') PARTITION BY HASH(' || jpi_id_column || ')'; EXECUTE sql; + + FOR i IN 0..(jpi_partitions_number - 1) LOOP + EXECUTE 'CREATE TABLE IF NOT EXISTS ' || jpi_partitions_table_name_perfix || i || + ' PARTITION OF ' || jpi_table || + ' FOR VALUES WITH (MODULUS 10, REMAINDER ' || i || ')'; + END LOOP; END; $$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql index 61a11de1..02c70403 100644 --- a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql +++ b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql @@ -9,6 +9,7 @@ DECLARE j_sequence_number_column CONSTANT TEXT := 'sequence_number'; j_ordering_column CONSTANT TEXT := 'ordering'; jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; + jpi_id_column CONSTANT TEXT := 'id'; jpi_persistence_id_column CONSTANT TEXT := 'persistence_id'; jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; @@ -31,7 +32,7 @@ BEGIN jpi_min_ordering_column || ' = LEAST(' || jpi_table || '.' || jpi_min_ordering_column || ', ($1).' || j_ordering_column || ')'; sql := 'INSERT INTO ' || jpi_table || ' (' || cols || ') VALUES (' || vals || ') ' || - 'ON CONFLICT (' || jpi_persistence_id_column || ') DO UPDATE SET ' || upds; + 'ON CONFLICT (' || jpi_id_column || ', ' || jpi_persistence_id_column || ') DO UPDATE SET ' || upds; EXECUTE sql USING NEW; RETURN NEW; From 76dafe58dad189660f68472e5338edb2a3f808bb Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Wed, 3 Nov 2021 18:54:16 +0000 Subject: [PATCH 14/34] Use correct value for hash partitioning modulus --- .../migration-0.6.0/1-create-journal-persistence-ids-table.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql index d67e953b..b1ad161a 100644 --- a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql +++ b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql @@ -33,7 +33,7 @@ BEGIN FOR i IN 0..(jpi_partitions_number - 1) LOOP EXECUTE 'CREATE TABLE IF NOT EXISTS ' || jpi_partitions_table_name_perfix || i || ' PARTITION OF ' || jpi_table || - ' FOR VALUES WITH (MODULUS 10, REMAINDER ' || i || ')'; + ' FOR VALUES WITH (MODULUS ' || jpi_partitions_number || ', REMAINDER ' || i || ')'; END LOOP; END; $$ LANGUAGE plpgsql; From b2736d119ed482494650742df8b0924d4015fb78 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Mon, 8 Nov 2021 15:39:40 +0000 Subject: [PATCH 15/34] Hash by persistence_id only --- .../schema/postgres/nested-partitions-schema.sql | 6 +++--- .../test/resources/schema/postgres/partitioned-schema.sql | 6 +++--- core/src/test/resources/schema/postgres/plain-schema.sql | 6 +++--- .../postgres/migration/journal/JournalSchema.scala | 6 +++--- .../1-create-journal-persistence-ids-table.sql | 8 ++++---- .../2-create-function-update-journal-persistence-ids.sql | 3 +-- 6 files changed, 17 insertions(+), 18 deletions(-) diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index d1719733..952e5fdb 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -77,8 +77,8 @@ CREATE TABLE public.journal_persistence_ids( max_sequence_number BIGINT NOT NULL, min_ordering BIGINT NOT NULL, max_ordering BIGINT NOT NULL, - PRIMARY KEY (id, persistence_id) -) PARTITION BY HASH(id); + PRIMARY KEY (persistence_id) +) PARTITION BY HASH(persistence_id); CREATE TABLE public.journal_persistence_ids_0 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 0); CREATE TABLE public.journal_persistence_ids_1 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 1); @@ -89,7 +89,7 @@ DECLARE BEGIN INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) - ON CONFLICT (id, persistence_id) DO UPDATE + ON CONFLICT (persistence_id) DO UPDATE SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index 4d256edc..875dfa70 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -78,8 +78,8 @@ CREATE TABLE public.journal_persistence_ids( max_sequence_number BIGINT NOT NULL, min_ordering BIGINT NOT NULL, max_ordering BIGINT NOT NULL, - PRIMARY KEY (id, persistence_id) -) PARTITION BY HASH(id); + PRIMARY KEY (persistence_id) +) PARTITION BY HASH(persistence_id); CREATE TABLE public.journal_persistence_ids_0 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 0); CREATE TABLE public.journal_persistence_ids_1 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 1); @@ -90,7 +90,7 @@ DECLARE BEGIN INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) - ON CONFLICT (id, persistence_id) DO UPDATE + ON CONFLICT (persistence_id) DO UPDATE SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index c05a931f..81b7fdf3 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -51,8 +51,8 @@ CREATE TABLE public.journal_persistence_ids( max_sequence_number BIGINT NOT NULL, min_ordering BIGINT NOT NULL, max_ordering BIGINT NOT NULL, - PRIMARY KEY (id, persistence_id) -) PARTITION BY HASH(id); + PRIMARY KEY (persistence_id) +) PARTITION BY HASH(persistence_id); CREATE TABLE public.journal_persistence_ids_0 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 0); CREATE TABLE public.journal_persistence_ids_1 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 1); @@ -63,7 +63,7 @@ DECLARE BEGIN INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) - ON CONFLICT (id, persistence_id) DO UPDATE + ON CONFLICT (persistence_id) DO UPDATE SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, diff --git a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala index 1d2a3b3f..3e4555e2 100644 --- a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala +++ b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala @@ -37,8 +37,8 @@ private[journal] trait JournalSchema { #$maxSequenceNumber BIGINT NOT NULL, #$maxOrdering BIGINT NOT NULL, #$minOrdering BIGINT NOT NULL, - PRIMARY KEY (#$id, #$persistenceId) - ) PARTITION BY HASH(#$id)""" + PRIMARY KEY (#$persistenceId) + ) PARTITION BY HASH(#$persistenceId)""" _ <- sqlu"""CREATE TABLE #${fullTableName}_0 PARTITION OF #$fullTableName FOR VALUES WITH (MODULUS 2, REMAINDER 0)""" _ <- @@ -111,7 +111,7 @@ private[journal] trait JournalSchema { BEGIN INSERT INTO #$fullTableName (#$persistenceId, #$maxSequenceNumber, #$maxOrdering, #$minOrdering) VALUES (NEW.#$jPersistenceId, NEW.#$sequenceNumber, NEW.#$ordering, NEW.#$ordering) - ON CONFLICT (#$id, #$persistenceId) DO UPDATE + ON CONFLICT (#$persistenceId) DO UPDATE SET #$maxSequenceNumber = NEW.#$sequenceNumber, #$maxOrdering = NEW.#$ordering, diff --git a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql index b1ad161a..e5322534 100644 --- a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql +++ b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql @@ -9,7 +9,7 @@ DECLARE jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; jpi_min_ordering_column CONSTANT TEXT := 'min_ordering'; - jpi_partitions_table_name_perfix CONSTANT TEXT := 'journal_persistence_ids_'; + jpi_partitions_table_name_prefix CONSTANT TEXT := 'journal_persistence_ids_'; jpi_partitions_number CONSTANT INTEGER := 10; -- variables @@ -25,13 +25,13 @@ BEGIN jpi_max_sequence_number_column || ' BIGINT NOT NULL, ' || jpi_max_ordering_column || ' BIGINT NOT NULL, ' || jpi_min_ordering_column || ' BIGINT NOT NULL, ' || - 'PRIMARY KEY (' || jpi_id_column || ', ' || jpi_persistence_id_column || ')' || - ') PARTITION BY HASH(' || jpi_id_column || ')'; + 'PRIMARY KEY (' || jpi_persistence_id_column || ')' || + ') PARTITION BY HASH(' || jpi_persistence_id_column || ')'; EXECUTE sql; FOR i IN 0..(jpi_partitions_number - 1) LOOP - EXECUTE 'CREATE TABLE IF NOT EXISTS ' || jpi_partitions_table_name_perfix || i || + EXECUTE 'CREATE TABLE IF NOT EXISTS ' || jpi_partitions_table_name_prefix || i || ' PARTITION OF ' || jpi_table || ' FOR VALUES WITH (MODULUS ' || jpi_partitions_number || ', REMAINDER ' || i || ')'; END LOOP; diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql index 02c70403..61a11de1 100644 --- a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql +++ b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql @@ -9,7 +9,6 @@ DECLARE j_sequence_number_column CONSTANT TEXT := 'sequence_number'; j_ordering_column CONSTANT TEXT := 'ordering'; jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; - jpi_id_column CONSTANT TEXT := 'id'; jpi_persistence_id_column CONSTANT TEXT := 'persistence_id'; jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; @@ -32,7 +31,7 @@ BEGIN jpi_min_ordering_column || ' = LEAST(' || jpi_table || '.' || jpi_min_ordering_column || ', ($1).' || j_ordering_column || ')'; sql := 'INSERT INTO ' || jpi_table || ' (' || cols || ') VALUES (' || vals || ') ' || - 'ON CONFLICT (' || jpi_id_column || ', ' || jpi_persistence_id_column || ') DO UPDATE SET ' || upds; + 'ON CONFLICT (' || jpi_persistence_id_column || ') DO UPDATE SET ' || upds; EXECUTE sql USING NEW; RETURN NEW; From f3771e76f1e9f0f3f9e54eb8a82d354cae0ddd1d Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Wed, 10 Nov 2021 10:33:52 +0000 Subject: [PATCH 16/34] Solve flaky test on PartitionedJournalSpecTestCases. With the addition of triggers that can refuse an insert on the journal, concurrent inserts on the journal for the same persistence_id can fail due to sequence number being smaller then what is already recorded. Therefore, not having gaps among ordering can only be verified across all persistence_ids and not a single one. --- .../journal/PostgresJournalSpec.scala | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala index bad3148d..74d7143f 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala @@ -60,7 +60,7 @@ trait PartitionedJournalSpecTestCases { this: PostgresJournalSpec => "A journal" must { - "store events concurrently without any gaps or duplicates among ordering (offset) values" in { + "store events concurrently for different persistence ids without creating duplicates or gaps among journal ordering (offset)" in { // given val numOfSenders = 5 val batchSize = 1000 @@ -77,26 +77,29 @@ trait PartitionedJournalSpecTestCases { } .futureValue(Timeout(Span(1, Minute))) - // then + val journalOps = new ScalaPostgresReadJournalOperations(system) + var orderings: IndexedSeq[Long] = IndexedSeq.empty + senders.foreach { case (_, idx) => - val journalOps = new ScalaPostgresReadJournalOperations(system) journalOps.withCurrentEventsByPersistenceId()(s"perId-${idx + 1}") { tp => tp.request(Long.MaxValue) val replayedMessages = (1 to batchSize).map { _ => tp.expectNext() } tp.expectComplete() - val orderings = replayedMessages.map(_.offset).collect { case Sequence(value) => + orderings = orderings ++ replayedMessages.map(_.offset).collect { case Sequence(value) => value } - orderings.size should equal(batchSize) - val minOrd = orderings.min - val maxOrd = orderings.max - val expectedOrderings = (minOrd to maxOrd).toList - - (orderings.sorted should contain).theSameElementsInOrderAs(expectedOrderings) } } + + // then + orderings.size should equal(batchSize * numOfSenders) + val minOrd = orderings.min + val maxOrd = orderings.max + val expectedOrderings = (minOrd to maxOrd).toList + + (orderings.sorted should contain).theSameElementsInOrderAs(expectedOrderings) } } From 12843e00f1d51ffa4b45cbff10b2cd6253c74b1b Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Wed, 10 Nov 2021 16:24:26 +0000 Subject: [PATCH 17/34] Add test that verifies the execution of the of the new triggers --- .../journal/PostgresJournalSpec.scala | 38 +++++++++++++++---- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala index 74d7143f..fb584a0c 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala @@ -6,24 +6,24 @@ package akka.persistence.postgres.journal import akka.actor.Actor -import akka.persistence.JournalProtocol.ReplayedMessage +import akka.persistence.JournalProtocol.{ReplayedMessage, WriteMessages, WriteMessagesFailed} import akka.persistence.journal.JournalSpec import akka.persistence.postgres.config._ import akka.persistence.postgres.db.SlickExtension import akka.persistence.postgres.query.ScalaPostgresReadJournalOperations import akka.persistence.postgres.util.Schema._ -import akka.persistence.postgres.util.{ ClasspathResources, DropCreate } +import akka.persistence.postgres.util.{ClasspathResources, DropCreate} import akka.persistence.query.Sequence -import akka.persistence.{ CapabilityFlag, PersistentImpl } +import akka.persistence.{AtomicWrite, CapabilityFlag, PersistentImpl, PersistentRepr} import akka.testkit.TestProbe -import com.typesafe.config.{ Config, ConfigFactory } +import com.typesafe.config.{Config, ConfigFactory} import org.scalatest.concurrent.PatienceConfiguration.Timeout import org.scalatest.concurrent.ScalaFutures -import org.scalatest.time.{ Minute, Span } -import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } +import org.scalatest.time.{Minute, Span} +import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} import scala.concurrent.duration._ -import scala.concurrent.{ ExecutionContext, Future } +import scala.concurrent.{ExecutionContext, Future} abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) extends JournalSpec(ConfigFactory.load(config)) @@ -54,6 +54,30 @@ abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) super.afterAll() } + "A journal" must { + "not allow to store events with sequence number lower than what is already stored for the same persistence id" in { + // given + val perId = "perId" + val sender = TestProbe() + val repeatedSnr = 5 + + // when + writeMessages(1, repeatedSnr + 1, perId, sender.ref, writerUuid) + + // then + val msg = AtomicWrite(PersistentRepr( + payload = s"a-$repeatedSnr", + sequenceNr = repeatedSnr, + persistenceId = pid, + sender = sender.ref, + writerUuid = writerUuid + )) + + val probe = TestProbe() + journal ! WriteMessages(Seq(msg), probe.ref, actorInstanceId) + probe.expectMsgType[WriteMessagesFailed] + } + } } trait PartitionedJournalSpecTestCases { From 9d5fb07c3da742e1d63d06abef2fd94422475acb Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Thu, 11 Nov 2021 14:18:41 +0000 Subject: [PATCH 18/34] Prefer IDENTITY column to SERIAL --- .../test/resources/schema/postgres/nested-partitions-schema.sql | 2 +- core/src/test/resources/schema/postgres/partitioned-schema.sql | 2 +- core/src/test/resources/schema/postgres/plain-schema.sql | 2 +- .../persistence/postgres/migration/journal/JournalSchema.scala | 2 +- .../migration-0.6.0/1-create-journal-persistence-ids-table.sql | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index 952e5fdb..ce110fa6 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -72,7 +72,7 @@ DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; CREATE TABLE public.journal_persistence_ids( - id BIGSERIAL, + id BIGINT GENERATED ALWAYS AS IDENTITY, persistence_id TEXT NOT NULL, max_sequence_number BIGINT NOT NULL, min_ordering BIGINT NOT NULL, diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index 875dfa70..68f66015 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -73,7 +73,7 @@ DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; CREATE TABLE public.journal_persistence_ids( - id BIGSERIAL, + id BIGINT GENERATED ALWAYS AS IDENTITY, persistence_id TEXT NOT NULL, max_sequence_number BIGINT NOT NULL, min_ordering BIGINT NOT NULL, diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index 81b7fdf3..66953e47 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -46,7 +46,7 @@ DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); DROP TABLE IF EXISTS public.journal_persistence_ids; CREATE TABLE public.journal_persistence_ids( - id BIGSERIAL, + id BIGINT GENERATED ALWAYS AS IDENTITY, persistence_id TEXT NOT NULL, max_sequence_number BIGINT NOT NULL, min_ordering BIGINT NOT NULL, diff --git a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala index 3e4555e2..0f5dd7e8 100644 --- a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala +++ b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala @@ -32,7 +32,7 @@ private[journal] trait JournalSchema { import journalPersistenceIdsTableCfg.columnNames._ for { _ <- sqlu"""CREATE TABLE #$fullTableName ( - #$id BIGSERIAL, + #$id BIGINT GENERATED ALWAYS AS IDENTITY, #$persistenceId TEXT NOT NULL, #$maxSequenceNumber BIGINT NOT NULL, #$maxOrdering BIGINT NOT NULL, diff --git a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql index e5322534..1b86a521 100644 --- a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql +++ b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql @@ -20,7 +20,7 @@ BEGIN sql := 'CREATE TABLE IF NOT EXISTS ' || jpi_table || '(' || - jpi_id_column || ' BIGSERIAL, ' || + jpi_id_column || ' BIGINT GENERATED ALWAYS AS IDENTITY, ' || jpi_persistence_id_column || ' TEXT NOT NULL, ' || jpi_max_sequence_number_column || ' BIGINT NOT NULL, ' || jpi_max_ordering_column || ' BIGINT NOT NULL, ' || From dcc1f710cea084347e309fae6586249f770b5d6e Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Tue, 4 Jul 2023 15:01:53 +0100 Subject: [PATCH 19/34] Rename new table to journal_metadata --- .../config/AkkaPersistenceConfig.scala | 24 +++++------ .../postgres/journal/dao/FlatJournalDao.scala | 2 +- .../postgres/journal/dao/JournalQueries.scala | 24 +++++++++-- .../postgres/journal/dao/JournalTables.scala | 28 ++++++------- .../dao/NestedPartitionsJournalDao.scala | 2 +- .../journal/dao/PartitionedJournalDao.scala | 2 +- .../akka/persistence/postgres/package.scala | 2 +- .../query/dao/ReadJournalQueries.scala | 6 +-- .../postgres/nested-partitions-schema.sql | 26 ++++++------ .../schema/postgres/partitioned-schema.sql | 26 ++++++------ .../schema/postgres/plain-schema.sql | 26 ++++++------ .../postgres/SharedActorSystemTestSpec.scala | 2 +- .../SingleActorSystemPerTestSpec.scala | 2 +- .../journal/dao/JournalQueriesTest.scala | 14 ++++++- .../journal/dao/JournalTablesTest.scala | 28 ++++++------- .../CurrentEventsByTagWithGapsTest.scala | 6 +-- .../postgres/util/DropCreate.scala | 8 ++-- .../journal/Jdbc4JournalMigration.scala | 2 +- .../migration/journal/JournalSchema.scala | 26 ++++++------ .../src/test/resources/base-migration.conf | 12 +++--- .../postgres/migration/MigrationTest.scala | 12 +++--- .../1-create-journal-metadata-table.sql | 41 +++++++++++++++++++ ...1-create-journal-persistence-ids-table.sql | 39 ------------------ ...reate-function-update-journal-metadata.sql | 39 ++++++++++++++++++ ...unction-update-journal-persistence-ids.sql | 39 ------------------ ...reate-trigger-update-journal-metadata.sql} | 4 +- ...tion-check-persistence-id-max-sequence.sql | 4 +- ...gger-check-persistence-id-max-sequence.sql | 8 ++-- ...populate-journal-persistence-ids-table.sql | 31 -------------- 29 files changed, 241 insertions(+), 244 deletions(-) create mode 100644 scripts/migration-0.6.0/1-create-journal-metadata-table.sql delete mode 100644 scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql create mode 100644 scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql delete mode 100644 scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql rename scripts/migration-0.6.0/{3-create-trigger-update-journal-persistence-ids.sql => 3-create-trigger-update-journal-metadata.sql} (70%) delete mode 100644 scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql diff --git a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala index 2c53c6a2..6d471427 100644 --- a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala +++ b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala @@ -49,8 +49,8 @@ class JournalTableConfiguration(config: Config) { override def toString: String = s"JournalTableConfiguration($tableName,$schemaName,$columnNames)" } -class JournalPersistenceIdsTableColumnNames(config: Config) { - private val cfg = config.asConfig("tables.journalPersistenceIds.columnNames") +class JournalMetadataTableColumnNames(config: Config) { + private val cfg = config.asConfig("tables.journalMetadata.columnNames") val id: String = cfg.as[String]("id", "id") val persistenceId: String = cfg.as[String]("persistenceId", "persistence_id") val maxSequenceNumber: String = cfg.as[String]("maxSequenceNumber", "max_sequence_number") @@ -58,15 +58,15 @@ class JournalPersistenceIdsTableColumnNames(config: Config) { val minOrdering: String = cfg.as[String]("minOrdering", "min_ordering") override def toString: String = - s"JournalPersistenceIdsTableColumnNames($id,$persistenceId,$maxSequenceNumber,$maxOrdering,$minOrdering)" + s"JournalMetadataTableColumnNames($id,$persistenceId,$maxSequenceNumber,$maxOrdering,$minOrdering)" } -class JournalPersistenceIdsTableConfiguration(config: Config) { - private val cfg = config.asConfig("tables.journalPersistenceIds") - val tableName: String = cfg.as[String]("tableName", "journal_persistence_ids") +class JournalMetadataTableConfiguration(config: Config) { + private val cfg = config.asConfig("tables.journalMetadata") + val tableName: String = cfg.as[String]("tableName", "journal_metadata") val schemaName: Option[String] = cfg.as[String]("schemaName").trim - val columnNames: JournalPersistenceIdsTableColumnNames = new JournalPersistenceIdsTableColumnNames(config) - override def toString: String = s"JournalPersistenceIdsTableConfiguration($tableName,$schemaName,$columnNames)" + val columnNames: JournalMetadataTableColumnNames = new JournalMetadataTableColumnNames(config) + override def toString: String = s"JournalMetadataTableConfiguration($tableName,$schemaName,$columnNames)" } class SnapshotTableColumnNames(config: Config) { @@ -142,14 +142,14 @@ class TagsConfig(config: Config) { class JournalConfig(config: Config) { val partitionsConfig = new JournalPartitionsConfiguration(config) val journalTableConfiguration = new JournalTableConfiguration(config) - val journalPersistenceIdsTableConfiguration = new JournalPersistenceIdsTableConfiguration(config) + val journalMetadataTableConfiguration = new JournalMetadataTableConfiguration(config) val pluginConfig = new JournalPluginConfig(config) val daoConfig = new BaseByteArrayJournalDaoConfig(config) val tagsConfig = new TagsConfig(config) val tagsTableConfiguration = new TagsTableConfiguration(config) val useSharedDb: Option[String] = config.asOptionalNonEmptyString(ConfigKeys.useSharedDb) override def toString: String = - s"JournalConfig($journalTableConfiguration,$pluginConfig,$tagsConfig,$partitionsConfig,$useSharedDb)" + s"JournalConfig($journalTableConfiguration,$journalMetadataTableConfiguration,$pluginConfig,$tagsConfig,$partitionsConfig,$useSharedDb)" } class SnapshotConfig(config: Config) { @@ -177,7 +177,7 @@ case class JournalSequenceRetrievalConfig( class ReadJournalConfig(config: Config) { val journalTableConfiguration = new JournalTableConfiguration(config) - val journalPersistenceIdsTableConfiguration = new JournalPersistenceIdsTableConfiguration(config) + val journalMetadataTableConfiguration = new JournalMetadataTableConfiguration(config) val journalSequenceRetrievalConfiguration = JournalSequenceRetrievalConfig(config) val pluginConfig = new ReadJournalPluginConfig(config) val tagsConfig = new TagsConfig(config) @@ -188,5 +188,5 @@ class ReadJournalConfig(config: Config) { val includeDeleted: Boolean = config.as[Boolean]("includeLogicallyDeleted", true) override def toString: String = - s"ReadJournalConfig($journalTableConfiguration,$pluginConfig,$refreshInterval,$maxBufferSize,$addShutdownHook,$includeDeleted)" + s"ReadJournalConfig($journalTableConfiguration,$journalMetadataTableConfiguration,$pluginConfig,$refreshInterval,$maxBufferSize,$addShutdownHook,$includeDeleted)" } diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala index 71dfeaf0..502eda7b 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala @@ -15,7 +15,7 @@ class FlatJournalDao(val db: Database, val journalConfig: JournalConfig, seriali extends BaseByteArrayJournalDao { val queries = new JournalQueries( FlatJournalTable(journalConfig.journalTableConfiguration), - JournalPersistenceIdsTable(journalConfig.journalPersistenceIdsTableConfiguration)) + JournalMetadataTable(journalConfig.journalMetadataTableConfiguration)) val tagDao = new SimpleTagDao(db, journalConfig.tagsTableConfiguration) val eventTagConverter = new CachedTagIdResolver(tagDao, journalConfig.tagsConfig) val serializer = new ByteArrayJournalSerializer(serialization, eventTagConverter) diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala index 518892c5..d6b859fe 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala @@ -11,8 +11,8 @@ import slick.lifted.TableQuery import slick.sql.FixedSqlAction class JournalQueries( - journalTable: TableQuery[JournalTable], - journalPersistenceIdsTable: TableQuery[JournalPersistenceIdsTable]) { + journalTable: TableQuery[JournalTable], + journalMetadataTable: TableQuery[JournalMetadataTable]) { import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ @@ -21,6 +21,9 @@ class JournalQueries( def writeJournalRows(xs: Seq[JournalRow]): FixedSqlAction[Option[Int], NoStream, slick.dbio.Effect.Write] = compiledJournalTable ++= xs.sortBy(_.sequenceNumber) + private def selectAllJournalForPersistenceId(persistenceId: Rep[String]) = + journalTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc) + def delete(persistenceId: String, toSequenceNr: Long): FixedSqlAction[Int, NoStream, slick.dbio.Effect.Write] = { journalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber <= toSequenceNr).delete } @@ -47,9 +50,12 @@ class JournalQueries( .map(_.deleted) .update(true) - private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]) = { + private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = journalTable.filter(_.persistenceId === persistenceId).map(_.sequenceNumber).max - } +// journalMetadataTable +// .filter(_.persistenceId === persistenceId) +// .map(_.maxSequenceNumber) +// .max // TODO replace with more appropriate combinator? private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = journalTable.filter(_.deleted === true).filter(_.persistenceId === persistenceId).map(_.sequenceNumber).max @@ -58,6 +64,16 @@ class JournalQueries( val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _) + private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = + selectAllJournalForPersistenceId(persistenceId).filter(_.sequenceNumber <= maxSequenceNr) + + val selectByPersistenceIdAndMaxSequenceNumber = Compiled(_selectByPersistenceIdAndMaxSequenceNumber _) + + private def _allPersistenceIdsDistinct: Query[Rep[String], String, Seq] = + journalTable.map(_.persistenceId).distinct + + val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct) + private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala index cc137a18..dc80cf79 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalTables.scala @@ -6,7 +6,7 @@ package akka.persistence.postgres package journal.dao -import akka.persistence.postgres.config.{ JournalPersistenceIdsTableConfiguration, JournalTableConfiguration } +import akka.persistence.postgres.config.{ JournalMetadataTableConfiguration, JournalTableConfiguration } import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ import io.circe.Json @@ -91,30 +91,30 @@ object NestedPartitionsJournalTable { FlatJournalTable.apply(journalTableCfg) } -class JournalPersistenceIdsTable(_tableTag: Tag, journalPersistenceIdsTableCfg: JournalPersistenceIdsTableConfiguration) - extends Table[JournalPersistenceIdsRow]( +class JournalMetadataTable(_tableTag: Tag, journalMetadataTableCfg: JournalMetadataTableConfiguration) + extends Table[JournalMetadataRow]( _tableTag, - _schemaName = journalPersistenceIdsTableCfg.schemaName, - _tableName = journalPersistenceIdsTableCfg.tableName) { + _schemaName = journalMetadataTableCfg.schemaName, + _tableName = journalMetadataTableCfg.tableName) { override def * = ( id, persistenceId, maxSequenceNumber, minOrdering, - maxOrdering) <> (JournalPersistenceIdsRow.tupled, JournalPersistenceIdsRow.unapply) + maxOrdering) <> (JournalMetadataRow.tupled, JournalMetadataRow.unapply) - val id: Rep[Long] = column[Long](journalPersistenceIdsTableCfg.columnNames.id) + val id: Rep[Long] = column[Long](journalMetadataTableCfg.columnNames.id) val persistenceId: Rep[String] = - column[String](journalPersistenceIdsTableCfg.columnNames.persistenceId, O.Length(255, varying = true)) - val maxSequenceNumber: Rep[Long] = column[Long](journalPersistenceIdsTableCfg.columnNames.maxSequenceNumber) - val minOrdering: Rep[Long] = column[Long](journalPersistenceIdsTableCfg.columnNames.minOrdering) - val maxOrdering: Rep[Long] = column[Long](journalPersistenceIdsTableCfg.columnNames.maxOrdering) + column[String](journalMetadataTableCfg.columnNames.persistenceId, O.Length(255, varying = true)) + val maxSequenceNumber: Rep[Long] = column[Long](journalMetadataTableCfg.columnNames.maxSequenceNumber) + val minOrdering: Rep[Long] = column[Long](journalMetadataTableCfg.columnNames.minOrdering) + val maxOrdering: Rep[Long] = column[Long](journalMetadataTableCfg.columnNames.maxOrdering) val pk = primaryKey(s"${tableName}_pk", persistenceId) } -object JournalPersistenceIdsTable { +object JournalMetadataTable { def apply( - journalPersistenceIdsTableCfg: JournalPersistenceIdsTableConfiguration): TableQuery[JournalPersistenceIdsTable] = - TableQuery(tag => new JournalPersistenceIdsTable(tag, journalPersistenceIdsTableCfg)) + journalMetadataTableCfg: JournalMetadataTableConfiguration): TableQuery[JournalMetadataTable] = + TableQuery(tag => new JournalMetadataTable(tag, journalMetadataTableCfg)) } diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala index 41065e78..bea9f471 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala @@ -19,7 +19,7 @@ class NestedPartitionsJournalDao(db: Database, journalConfig: JournalConfig, ser extends FlatJournalDao(db, journalConfig, serialization) { override val queries = new JournalQueries( NestedPartitionsJournalTable(journalConfig.journalTableConfiguration), - JournalPersistenceIdsTable(journalConfig.journalPersistenceIdsTableConfiguration)) + JournalMetadataTable(journalConfig.journalMetadataTableConfiguration)) private val journalTableCfg = journalConfig.journalTableConfiguration private val partitionSize = journalConfig.partitionsConfig.size private val partitionPrefix = journalConfig.partitionsConfig.prefix diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala index 297005ae..5ff862f4 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala @@ -17,7 +17,7 @@ class PartitionedJournalDao(db: Database, journalConfig: JournalConfig, serializ extends FlatJournalDao(db, journalConfig, serialization) { override val queries = new JournalQueries( PartitionedJournalTable(journalConfig.journalTableConfiguration), - JournalPersistenceIdsTable(journalConfig.journalPersistenceIdsTableConfiguration)) + JournalMetadataTable(journalConfig.journalMetadataTableConfiguration)) private val journalTableCfg = journalConfig.journalTableConfiguration private val partitionSize = journalConfig.partitionsConfig.size private val partitionPrefix = journalConfig.partitionsConfig.prefix diff --git a/core/src/main/scala/akka/persistence/postgres/package.scala b/core/src/main/scala/akka/persistence/postgres/package.scala index ae439cef..145d8c9e 100644 --- a/core/src/main/scala/akka/persistence/postgres/package.scala +++ b/core/src/main/scala/akka/persistence/postgres/package.scala @@ -17,7 +17,7 @@ package object postgres { tags: List[Int], metadata: Json) - final case class JournalPersistenceIdsRow( + final case class JournalMetadataRow( id: Long, persistenceId: String, maxSequenceNumber: Long, diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala index d0e3b5b0..23dd3c37 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala @@ -7,7 +7,7 @@ package akka.persistence.postgres package query.dao import akka.persistence.postgres.config.ReadJournalConfig -import akka.persistence.postgres.journal.dao.{ FlatJournalTable, JournalPersistenceIdsTable, JournalTable } +import akka.persistence.postgres.journal.dao.{ FlatJournalTable, JournalMetadataTable, JournalTable } class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ @@ -17,12 +17,12 @@ class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = baseTableQuery().map(_.persistenceId).distinct.take(max) - val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) - private def baseTableQuery() = if (readJournalConfig.includeDeleted) journalTable else journalTable.filter(_.deleted === false) + val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) + private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index ce110fa6..e7eb4ecf 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -65,13 +65,13 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); -DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_persistence_ids; +DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_metadata; DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); -DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON public.journal; -DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); -DROP TABLE IF EXISTS public.journal_persistence_ids; +DROP TRIGGER IF EXISTS trig_update_journal_metadata ON public.journal; +DROP FUNCTION IF EXISTS public.update_journal_metadata(); +DROP TABLE IF EXISTS public.journal_metadata; -CREATE TABLE public.journal_persistence_ids( +CREATE TABLE public.journal_metadata( id BIGINT GENERATED ALWAYS AS IDENTITY, persistence_id TEXT NOT NULL, max_sequence_number BIGINT NOT NULL, @@ -80,30 +80,30 @@ CREATE TABLE public.journal_persistence_ids( PRIMARY KEY (persistence_id) ) PARTITION BY HASH(persistence_id); -CREATE TABLE public.journal_persistence_ids_0 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE public.journal_persistence_ids_1 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE public.journal_metadata_0 PARTITION OF public.journal_metadata FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE public.journal_metadata_1 PARTITION OF public.journal_metadata FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS +CREATE OR REPLACE FUNCTION public.update_journal_metadata() RETURNS TRIGGER AS $$ DECLARE BEGIN - INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) + INSERT INTO public.journal_metadata (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, - min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); + min_ordering = LEAST(public.journal_metadata.min_ordering, NEW.ordering); RETURN NEW; END; $$ LANGUAGE plpgsql; -CREATE TRIGGER trig_update_journal_persistence_ids +CREATE TRIGGER trig_update_journal_metadata AFTER INSERT ON public.journal FOR EACH ROW - EXECUTE PROCEDURE public.update_journal_persistence_ids(); + EXECUTE PROCEDURE public.update_journal_metadata(); CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS $$ @@ -120,6 +120,6 @@ LANGUAGE plpgsql; CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON public.journal_persistence_ids + BEFORE UPDATE ON public.journal_metadata FOR EACH ROW EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index 68f66015..e1877326 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -66,13 +66,13 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); -DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_persistence_ids; +DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_metadata; DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); -DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON public.journal; -DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); -DROP TABLE IF EXISTS public.journal_persistence_ids; +DROP TRIGGER IF EXISTS trig_update_journal_metadata ON public.journal; +DROP FUNCTION IF EXISTS public.update_journal_metadata(); +DROP TABLE IF EXISTS public.journal_metadata; -CREATE TABLE public.journal_persistence_ids( +CREATE TABLE public.journal_metadata( id BIGINT GENERATED ALWAYS AS IDENTITY, persistence_id TEXT NOT NULL, max_sequence_number BIGINT NOT NULL, @@ -81,30 +81,30 @@ CREATE TABLE public.journal_persistence_ids( PRIMARY KEY (persistence_id) ) PARTITION BY HASH(persistence_id); -CREATE TABLE public.journal_persistence_ids_0 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE public.journal_persistence_ids_1 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE public.journal_metadata_0 PARTITION OF public.journal_metadata FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE public.journal_metadata_1 PARTITION OF public.journal_metadata FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS +CREATE OR REPLACE FUNCTION public.update_journal_metadata() RETURNS TRIGGER AS $$ DECLARE BEGIN - INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) + INSERT INTO public.journal_metadata (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, - min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); + min_ordering = LEAST(public.journal_metadata.min_ordering, NEW.ordering); RETURN NEW; END; $$ LANGUAGE plpgsql; -CREATE TRIGGER trig_update_journal_persistence_ids +CREATE TRIGGER trig_update_journal_metadata AFTER INSERT ON public.journal FOR EACH ROW - EXECUTE PROCEDURE public.update_journal_persistence_ids(); + EXECUTE PROCEDURE public.update_journal_metadata(); CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS $$ @@ -121,6 +121,6 @@ LANGUAGE plpgsql; CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON public.journal_persistence_ids + BEFORE UPDATE ON public.journal_metadata FOR EACH ROW EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index 66953e47..7a45c7f1 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -39,13 +39,13 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); -DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_persistence_ids; +DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_metadata; DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); -DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON public.journal; -DROP FUNCTION IF EXISTS public.update_journal_persistence_ids(); -DROP TABLE IF EXISTS public.journal_persistence_ids; +DROP TRIGGER IF EXISTS trig_update_journal_metadata ON public.journal; +DROP FUNCTION IF EXISTS public.update_journal_metadata(); +DROP TABLE IF EXISTS public.journal_metadata; -CREATE TABLE public.journal_persistence_ids( +CREATE TABLE public.journal_metadata( id BIGINT GENERATED ALWAYS AS IDENTITY, persistence_id TEXT NOT NULL, max_sequence_number BIGINT NOT NULL, @@ -54,30 +54,30 @@ CREATE TABLE public.journal_persistence_ids( PRIMARY KEY (persistence_id) ) PARTITION BY HASH(persistence_id); -CREATE TABLE public.journal_persistence_ids_0 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE public.journal_persistence_ids_1 PARTITION OF public.journal_persistence_ids FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE public.journal_metadata_0 PARTITION OF public.journal_metadata FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE public.journal_metadata_1 PARTITION OF public.journal_metadata FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS +CREATE OR REPLACE FUNCTION public.update_journal_metadata() RETURNS TRIGGER AS $$ DECLARE BEGIN - INSERT INTO public.journal_persistence_ids (persistence_id, max_sequence_number, max_ordering, min_ordering) + INSERT INTO public.journal_metadata (persistence_id, max_sequence_number, max_ordering, min_ordering) VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET max_sequence_number = NEW.sequence_number, max_ordering = NEW.ordering, - min_ordering = LEAST(public.journal_persistence_ids.min_ordering, NEW.ordering); + min_ordering = LEAST(public.journal_metadata.min_ordering, NEW.ordering); RETURN NEW; END; $$ LANGUAGE plpgsql; -CREATE TRIGGER trig_update_journal_persistence_ids +CREATE TRIGGER trig_update_journal_metadata AFTER INSERT ON public.journal FOR EACH ROW - EXECUTE PROCEDURE public.update_journal_persistence_ids(); + EXECUTE PROCEDURE public.update_journal_metadata(); CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS $$ @@ -94,6 +94,6 @@ LANGUAGE plpgsql; CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON public.journal_persistence_ids + BEFORE UPDATE ON public.journal_metadata FOR EACH ROW EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/scala/akka/persistence/postgres/SharedActorSystemTestSpec.scala b/core/src/test/scala/akka/persistence/postgres/SharedActorSystemTestSpec.scala index c6cf78d7..41b346b9 100644 --- a/core/src/test/scala/akka/persistence/postgres/SharedActorSystemTestSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/SharedActorSystemTestSpec.scala @@ -30,7 +30,7 @@ abstract class SharedActorSystemTestSpec(val config: Config) extends SimpleSpec implicit lazy val ec: ExecutionContext = system.dispatcher implicit val pc: PatienceConfig = PatienceConfig(timeout = 2.minutes) - implicit val timeout = Timeout(1.minute) + implicit val timeout: Timeout = Timeout(1.minute) lazy val serialization = SerializationExtension(system) diff --git a/core/src/test/scala/akka/persistence/postgres/SingleActorSystemPerTestSpec.scala b/core/src/test/scala/akka/persistence/postgres/SingleActorSystemPerTestSpec.scala index 129aef75..2232cd9a 100644 --- a/core/src/test/scala/akka/persistence/postgres/SingleActorSystemPerTestSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/SingleActorSystemPerTestSpec.scala @@ -27,7 +27,7 @@ abstract class SingleActorSystemPerTestSpec(val config: Config) }) implicit val pc: PatienceConfig = PatienceConfig(timeout = 2.minutes) - implicit val timeout: Timeout = Timeout(1.minutes) + implicit val timeout: Timeout = Timeout(1.minute) val cfg: Config = config.getConfig("postgres-journal") val journalConfig = new JournalConfig(cfg) diff --git a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala index 7a334223..29a87db8 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala @@ -6,6 +6,10 @@ import io.circe.{ Json, JsonObject } class JournalQueriesTest extends BaseQueryTest { + it should "produce SQL query for distinct persistenceID" in withJournalQueries { queries => + queries.allPersistenceIdsDistinct shouldBeSQL """select distinct "persistence_id" from "journal"""" + } + it should "create SQL query for highestMarkedSequenceNrForPersistenceId" in withJournalQueries { queries => queries.highestMarkedSequenceNrForPersistenceId( "aaa") shouldBeSQL """select max("sequence_number") from "journal" where ("deleted" = true) and ("persistence_id" = ?)""" @@ -14,7 +18,13 @@ class JournalQueriesTest extends BaseQueryTest { it should "create SQL query for highestSequenceNrForPersistenceId" in withJournalQueries { queries => queries.highestSequenceNrForPersistenceId( "aaa") shouldBeSQL """select max("sequence_number") from "journal" where "persistence_id" = ?""" - // queries.highestSequenceNrForPersistenceId("aaa") shouldBeSQL """select "max_sequence_number" from "journal_persistence_ids" where "persistence_id" = ? limit 1""" + // queries.highestSequenceNrForPersistenceId("aaa") shouldBeSQL """select "max_sequence_number" from "journal_metadata" where "persistence_id" = ? limit 1""" + } + + it should "create SQL query for selectByPersistenceIdAndMaxSequenceNumber" in withJournalQueries { queries => + queries.selectByPersistenceIdAndMaxSequenceNumber( + "aaa", + 11L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ("persistence_id" = ?) and ("sequence_number" <= ?) order by "sequence_number" desc""" } it should "create SQL query for messagesQuery" in withJournalQueries { queries => @@ -58,7 +68,7 @@ class JournalQueriesTest extends BaseQueryTest { f( new JournalQueries( FlatJournalTable.apply(journalConfig.journalTableConfiguration), - JournalPersistenceIdsTable.apply(journalConfig.journalPersistenceIdsTableConfiguration))) + JournalMetadataTable.apply(journalConfig.journalMetadataTableConfiguration))) } } } diff --git a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalTablesTest.scala b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalTablesTest.scala index f253cb84..d57ed78f 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalTablesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalTablesTest.scala @@ -36,26 +36,26 @@ class JournalTablesTest extends TablesTestSpec { } } - val journalPersistenceIdsTableConfiguration = journalConfig.journalPersistenceIdsTableConfiguration - val journalPersistenceIdsTable = JournalPersistenceIdsTable(journalPersistenceIdsTableConfiguration) + val journalMetadataTableConfiguration = journalConfig.journalMetadataTableConfiguration + val journalMetadataTable = JournalMetadataTable(journalMetadataTableConfiguration) - "JournalPersistenceIdsTable" should "be configured with a schema name" in { - journalPersistenceIdsTable.baseTableRow.schemaName shouldBe journalPersistenceIdsTableConfiguration.schemaName + "JournalMetadataTable" should "be configured with a schema name" in { + journalMetadataTable.baseTableRow.schemaName shouldBe journalMetadataTableConfiguration.schemaName } it should "be configured with a table name" in { - journalPersistenceIdsTable.baseTableRow.tableName shouldBe journalPersistenceIdsTableConfiguration.tableName + journalMetadataTable.baseTableRow.tableName shouldBe journalMetadataTableConfiguration.tableName } it should "be configured with column names" in { - val colName = toColumnName(journalPersistenceIdsTableConfiguration.tableName)(_) - journalPersistenceIdsTable.baseTableRow.persistenceId.toString shouldBe colName( - journalPersistenceIdsTableConfiguration.columnNames.persistenceId) - journalPersistenceIdsTable.baseTableRow.maxSequenceNumber.toString shouldBe colName( - journalPersistenceIdsTableConfiguration.columnNames.maxSequenceNumber) - journalPersistenceIdsTable.baseTableRow.maxOrdering.toString shouldBe colName( - journalPersistenceIdsTableConfiguration.columnNames.maxOrdering) - journalPersistenceIdsTable.baseTableRow.minOrdering.toString shouldBe colName( - journalPersistenceIdsTableConfiguration.columnNames.minOrdering) + val colName = toColumnName(journalMetadataTableConfiguration.tableName)(_) + journalMetadataTable.baseTableRow.persistenceId.toString shouldBe colName( + journalMetadataTableConfiguration.columnNames.persistenceId) + journalMetadataTable.baseTableRow.maxSequenceNumber.toString shouldBe colName( + journalMetadataTableConfiguration.columnNames.maxSequenceNumber) + journalMetadataTable.baseTableRow.maxOrdering.toString shouldBe colName( + journalMetadataTableConfiguration.columnNames.maxOrdering) + journalMetadataTable.baseTableRow.minOrdering.toString shouldBe colName( + journalMetadataTableConfiguration.columnNames.minOrdering) } } diff --git a/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala b/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala index 9ceedcb3..19f9869e 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala @@ -46,9 +46,9 @@ class CurrentEventsByTagWithGapsTest }.futureValue val journalTable = schemaType.table(journalConfig.journalTableConfiguration) - val journalPersistenceIdsTable = - schemaType.persistenceIdsTable(journalConfig.journalPersistenceIdsTableConfiguration) - val journalQueries = new JournalQueries(journalTable, journalPersistenceIdsTable) + val journalMetadataTable = + schemaType.metadataTable(journalConfig.journalMetadataTableConfiguration) + val journalQueries = new JournalQueries(journalTable, journalMetadataTable) val journalOps = new JavaDslPostgresReadJournalOperations(system) val tag = "testTag" diff --git a/core/src/test/scala/akka/persistence/postgres/util/DropCreate.scala b/core/src/test/scala/akka/persistence/postgres/util/DropCreate.scala index 890c7d81..84fa1024 100644 --- a/core/src/test/scala/akka/persistence/postgres/util/DropCreate.scala +++ b/core/src/test/scala/akka/persistence/postgres/util/DropCreate.scala @@ -6,10 +6,10 @@ package akka.persistence.postgres.util import java.sql.Statement -import akka.persistence.postgres.config.{ JournalPersistenceIdsTableConfiguration, JournalTableConfiguration } +import akka.persistence.postgres.config.{ JournalMetadataTableConfiguration, JournalTableConfiguration } import akka.persistence.postgres.journal.dao.{ FlatJournalTable, - JournalPersistenceIdsTable, + JournalMetadataTable, JournalTable, NestedPartitionsJournalTable, PartitionedJournalTable @@ -25,8 +25,8 @@ object Schema { lazy val schema: String = s"schema/postgres/$resourceNamePrefix-schema.sql" lazy val configName: String = s"${resourceNamePrefix}-application.conf" def table(journalTableCfg: JournalTableConfiguration): TableQuery[JournalTable] - def persistenceIdsTable(journalPersistenceIdsTableCfg: JournalPersistenceIdsTableConfiguration) - : TableQuery[JournalPersistenceIdsTable] = JournalPersistenceIdsTable.apply(journalPersistenceIdsTableCfg) + def metadataTable(journalMetadataTableCfg: JournalMetadataTableConfiguration) + : TableQuery[JournalMetadataTable] = JournalMetadataTable.apply(journalMetadataTableCfg) } case object Plain extends SchemaType { diff --git a/migration/src/main/scala/akka/persistence/postgres/migration/journal/Jdbc4JournalMigration.scala b/migration/src/main/scala/akka/persistence/postgres/migration/journal/Jdbc4JournalMigration.scala index 9fbb33b8..5ebd7831 100644 --- a/migration/src/main/scala/akka/persistence/postgres/migration/journal/Jdbc4JournalMigration.scala +++ b/migration/src/main/scala/akka/persistence/postgres/migration/journal/Jdbc4JournalMigration.scala @@ -69,7 +69,7 @@ class Jdbc4JournalMigration(globalConfig: Config, tempTableName: String = "tmp_j for { _ <- journalSchema.createTable _ <- journalSchema.createTagsTable - _ <- journalSchema.createJournalPersistenceIdsTable + _ <- journalSchema.createJournalMetadataTable } yield () } diff --git a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala index 0f5dd7e8..2c0a3475 100644 --- a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala +++ b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala @@ -24,12 +24,12 @@ private[journal] trait JournalSchema { def getTable: TableQuery[TempJournalTable] def createTable: DBIOAction[Unit, NoStream, Effect.Write] - def createJournalPersistenceIdsTable: DBIOAction[Unit, NoStream, Effect.Write] = { - val journalPersistenceIdsTableCfg = journalCfg.journalPersistenceIdsTableConfiguration + def createJournalMetadataTable: DBIOAction[Unit, NoStream, Effect.Write] = { + val journalMetadataTableCfg = journalCfg.journalMetadataTableConfiguration val fullTableName = - s"${journalPersistenceIdsTableCfg.schemaName.getOrElse("public")}.${journalPersistenceIdsTableCfg.tableName}" + s"${journalMetadataTableCfg.schemaName.getOrElse("public")}.${journalMetadataTableCfg.tableName}" - import journalPersistenceIdsTableCfg.columnNames._ + import journalMetadataTableCfg.columnNames._ for { _ <- sqlu"""CREATE TABLE #$fullTableName ( #$id BIGINT GENERATED ALWAYS AS IDENTITY, @@ -96,17 +96,17 @@ private[journal] trait JournalSchema { def createTriggers: DBIOAction[Unit, NoStream, Effect.Write] = { val journalTableCfg = journalCfg.journalTableConfiguration - val journalPersistenceIdsTableCfg = journalCfg.journalPersistenceIdsTableConfiguration - val schema = journalPersistenceIdsTableCfg.schemaName.getOrElse("public") - val fullTableName = s"$schema.${journalPersistenceIdsTableCfg.tableName}" + val journalMetadataTableCfg = journalCfg.journalMetadataTableConfiguration + val schema = journalMetadataTableCfg.schemaName.getOrElse("public") + val fullTableName = s"$schema.${journalMetadataTableCfg.tableName}" val journalFullTableName = s"$schema.${journalTableCfg.tableName}" - import journalPersistenceIdsTableCfg.columnNames._ + import journalMetadataTableCfg.columnNames._ import journalTableCfg.columnNames.{ persistenceId => jPersistenceId, _ } for { _ <- sqlu""" - CREATE OR REPLACE FUNCTION #$schema.update_journal_persistence_ids() RETURNS TRIGGER AS $$$$ + CREATE OR REPLACE FUNCTION #$schema.update_journal_metadata() RETURNS TRIGGER AS $$$$ DECLARE BEGIN INSERT INTO #$fullTableName (#$persistenceId, #$maxSequenceNumber, #$maxOrdering, #$minOrdering) @@ -123,17 +123,17 @@ private[journal] trait JournalSchema { """ _ <- sqlu""" - CREATE TRIGGER trig_update_journal_persistence_ids + CREATE TRIGGER trig_update_journal_metadata AFTER INSERT ON #$journalFullTableName FOR EACH ROW - EXECUTE PROCEDURE #$schema.update_journal_persistence_ids(); + EXECUTE PROCEDURE #$schema.update_journal_metadata(); """ _ <- sqlu""" - CREATE TRIGGER trig_update_journal_persistence_ids + CREATE TRIGGER trig_update_journal_metadata AFTER INSERT ON #$fullTmpTableName FOR EACH ROW - EXECUTE PROCEDURE #$schema.update_journal_persistence_ids(); + EXECUTE PROCEDURE #$schema.update_journal_metadata(); """ _ <- sqlu""" diff --git a/migration/src/test/resources/base-migration.conf b/migration/src/test/resources/base-migration.conf index c104d026..f156738e 100644 --- a/migration/src/test/resources/base-migration.conf +++ b/migration/src/test/resources/base-migration.conf @@ -63,14 +63,14 @@ postgres-journal { size = 50 } } - journalPersistenceIds { + journalMetadata { schemaName = "migration" - tableName = "fancy_journal_persistence_ids" + tableName = "fancy_journal_metadata" columnNames = { - persistenceId = "jpi_per_id" - maxSequenceNumber = "jpi_max_seq_num" - maxOrdering = "jpi_max_ord" - minOrdering = "jpi_min_ord" + persistenceId = "jm_per_id" + maxSequenceNumber = "jm_max_seq_num" + maxOrdering = "jm_max_ord" + minOrdering = "jm_min_ord" } } tags { diff --git a/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala b/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala index bd4da7bc..77ef1da8 100644 --- a/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala +++ b/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala @@ -177,8 +177,8 @@ trait PrepareDatabase extends BeforeAndAfterEach with BeforeAndAfterAll with Sca val journalTableConfig = journalConfig.journalTableConfiguration val journalTableName = journalTableConfig.tableName - val journalPersistenceIdsTableConfig = journalConfig.journalPersistenceIdsTableConfiguration - val journalPersistenceIdsTableName = journalPersistenceIdsTableConfig.tableName + val journalMetadataTableConfig = journalConfig.journalMetadataTableConfiguration + val journalMetadataTableName = journalMetadataTableConfig.tableName val tagsTableConfig = journalConfig.tagsTableConfiguration import journalTableConfig.columnNames.{ tags => tagsCol, _ } @@ -188,12 +188,12 @@ trait PrepareDatabase extends BeforeAndAfterEach with BeforeAndAfterAll with Sca _ <- sqlu"""DROP TABLE IF EXISTS migration.old_#$journalTableName""" _ <- sqlu"""DROP TABLE IF EXISTS migration.#$tempJournalTableName""" _ <- sqlu"""DROP TABLE IF EXISTS migration.#$journalTableName""" - _ <- sqlu"""DROP TRIGGER IF EXISTS trig_update_journal_persistence_ids ON migration.#$journalTableName""" - _ <- sqlu"""DROP FUNCTION IF EXISTS migration.update_journal_persistence_ids()""" + _ <- sqlu"""DROP TRIGGER IF EXISTS trig_update_journal_metadata ON migration.#$journalTableName""" + _ <- sqlu"""DROP FUNCTION IF EXISTS migration.update_journal_metadata()""" _ <- - sqlu"""DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON migration.#$journalPersistenceIdsTableName""" + sqlu"""DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON migration.#$journalMetadataTableName""" _ <- sqlu"""DROP FUNCTION IF EXISTS migration.check_persistence_id_max_sequence_number()""" - _ <- sqlu"""DROP TABLE IF EXISTS migration.#$journalPersistenceIdsTableName""" + _ <- sqlu"""DROP TABLE IF EXISTS migration.#$journalMetadataTableName""" _ <- sqlu"""CREATE TABLE IF NOT EXISTS migration.#$journalTableName ( #$ordering BIGSERIAL, diff --git a/scripts/migration-0.6.0/1-create-journal-metadata-table.sql b/scripts/migration-0.6.0/1-create-journal-metadata-table.sql new file mode 100644 index 00000000..660ec641 --- /dev/null +++ b/scripts/migration-0.6.0/1-create-journal-metadata-table.sql @@ -0,0 +1,41 @@ +-- Creates table and the amount of partitions defined by jm_partitions_number. Default is 10. +DO $$ +DECLARE + -- replace with appropriate values + schema CONSTANT TEXT := 'public'; + jm_table_name CONSTANT TEXT := 'journal_metadata'; + jm_id_column CONSTANT TEXT := 'id'; + jm_persistence_id_column CONSTANT TEXT := 'persistence_id'; + jm_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; + jm_max_ordering_column CONSTANT TEXT := 'max_ordering'; + jm_min_ordering_column CONSTANT TEXT := 'min_ordering'; + jm_partitions_table_name_prefix CONSTANT TEXT := 'journal_metadata_'; + jm_partitions_number CONSTANT INTEGER := 10; + + -- variables + jm_table TEXT; + jm_partition_table TEXT; + sql TEXT; +BEGIN + jm_table := schema || '.' || jm_table_name; + jm_partition_table := schema || '.' || jm_partitions_table_name_prefix; + + sql := 'CREATE TABLE IF NOT EXISTS ' || jm_table || + '(' || + jm_id_column || ' BIGINT GENERATED ALWAYS AS IDENTITY, ' || + jm_persistence_id_column || ' TEXT NOT NULL, ' || + jm_max_sequence_number_column || ' BIGINT NOT NULL, ' || + jm_max_ordering_column || ' BIGINT NOT NULL, ' || + jm_min_ordering_column || ' BIGINT NOT NULL, ' || + 'PRIMARY KEY (' || jm_persistence_id_column || ')' || + ') PARTITION BY HASH(' || jm_persistence_id_column || ')'; + + EXECUTE sql; + + FOR i IN 0..(jm_partitions_number - 1) LOOP + EXECUTE 'CREATE TABLE IF NOT EXISTS ' || jm_partition_table || i || + ' PARTITION OF ' || jm_table || + ' FOR VALUES WITH (MODULUS ' || jm_partitions_number || ', REMAINDER ' || i || ')'; + END LOOP; +END; +$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql deleted file mode 100644 index 1b86a521..00000000 --- a/scripts/migration-0.6.0/1-create-journal-persistence-ids-table.sql +++ /dev/null @@ -1,39 +0,0 @@ --- Creates table and the amount of partitions defined by jpi_partitions_number. Default is 10. -DO $$ -DECLARE - -- replace with appropriate values - schema CONSTANT TEXT := 'public'; - jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; - jpi_id_column CONSTANT TEXT := 'id'; - jpi_persistence_id_column CONSTANT TEXT := 'persistence_id'; - jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; - jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; - jpi_min_ordering_column CONSTANT TEXT := 'min_ordering'; - jpi_partitions_table_name_prefix CONSTANT TEXT := 'journal_persistence_ids_'; - jpi_partitions_number CONSTANT INTEGER := 10; - - -- variables - jpi_table TEXT; - sql TEXT; -BEGIN - jpi_table := schema || '.' || jpi_table_name; - - sql := 'CREATE TABLE IF NOT EXISTS ' || jpi_table || - '(' || - jpi_id_column || ' BIGINT GENERATED ALWAYS AS IDENTITY, ' || - jpi_persistence_id_column || ' TEXT NOT NULL, ' || - jpi_max_sequence_number_column || ' BIGINT NOT NULL, ' || - jpi_max_ordering_column || ' BIGINT NOT NULL, ' || - jpi_min_ordering_column || ' BIGINT NOT NULL, ' || - 'PRIMARY KEY (' || jpi_persistence_id_column || ')' || - ') PARTITION BY HASH(' || jpi_persistence_id_column || ')'; - - EXECUTE sql; - - FOR i IN 0..(jpi_partitions_number - 1) LOOP - EXECUTE 'CREATE TABLE IF NOT EXISTS ' || jpi_partitions_table_name_prefix || i || - ' PARTITION OF ' || jpi_table || - ' FOR VALUES WITH (MODULUS ' || jpi_partitions_number || ', REMAINDER ' || i || ')'; - END LOOP; -END; -$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql b/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql new file mode 100644 index 00000000..5532d53e --- /dev/null +++ b/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql @@ -0,0 +1,39 @@ +-- replace schema value if required +CREATE OR REPLACE FUNCTION public.update_journal_metadata() RETURNS TRIGGER AS +$$ +DECLARE + -- replace with appropriate values + schema CONSTANT TEXT := 'public'; + j_table_name CONSTANT TEXT := 'journal'; + j_persistence_id_column CONSTANT TEXT := 'persistence_id'; + j_sequence_number_column CONSTANT TEXT := 'sequence_number'; + j_ordering_column CONSTANT TEXT := 'ordering'; + jm_table_name CONSTANT TEXT := 'journal_metadata'; + jm_persistence_id_column CONSTANT TEXT := 'persistence_id'; + jm_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; + jm_max_ordering_column CONSTANT TEXT := 'max_ordering'; + jm_min_ordering_column CONSTANT TEXT := 'min_ordering'; + + -- variables + j_table TEXT; + jm_table TEXT; + cols TEXT; + vals TEXT; + upds TEXT; + sql TEXT; +BEGIN + j_table := schema || '.' || j_table_name; + jm_table := schema || '.' || jm_table_name; + cols := jm_persistence_id_column || ', ' || jm_max_sequence_number_column || ', ' || jm_max_ordering_column || ', ' || jm_min_ordering_column; + vals := '($1).' || j_persistence_id_column || ', ($1).' || j_sequence_number_column || ', ($1).' || j_ordering_column || ',($1).' || j_ordering_column; + upds := jm_max_sequence_number_column || ' = ($1).' || j_sequence_number_column || ', ' || + jm_max_ordering_column || ' = ($1).' || j_ordering_column || ', ' || + jm_min_ordering_column || ' = LEAST(' || jm_table || '.' || jm_min_ordering_column || ', ($1).' || j_ordering_column || ')'; + + sql := 'INSERT INTO ' || jm_table || ' (' || cols || ') VALUES (' || vals || ') ' || + 'ON CONFLICT (' || jm_persistence_id_column || ') DO UPDATE SET ' || upds; + + EXECUTE sql USING NEW; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql b/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql deleted file mode 100644 index 61a11de1..00000000 --- a/scripts/migration-0.6.0/2-create-function-update-journal-persistence-ids.sql +++ /dev/null @@ -1,39 +0,0 @@ --- replace schema value if required -CREATE OR REPLACE FUNCTION public.update_journal_persistence_ids() RETURNS TRIGGER AS -$$ -DECLARE - -- replace with appropriate values - schema CONSTANT TEXT := 'public'; - j_table_name CONSTANT TEXT := 'journal'; - j_persistence_id_column CONSTANT TEXT := 'persistence_id'; - j_sequence_number_column CONSTANT TEXT := 'sequence_number'; - j_ordering_column CONSTANT TEXT := 'ordering'; - jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; - jpi_persistence_id_column CONSTANT TEXT := 'persistence_id'; - jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; - jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; - jpi_min_ordering_column CONSTANT TEXT := 'min_ordering'; - - -- variables - j_table TEXT; - jpi_table TEXT; - cols TEXT; - vals TEXT; - upds TEXT; - sql TEXT; -BEGIN - j_table := schema || '.' || j_table_name; - jpi_table := schema || '.' || jpi_table_name; - cols := jpi_persistence_id_column || ', ' || jpi_max_sequence_number_column || ', ' || jpi_max_ordering_column || ', ' || jpi_min_ordering_column; - vals := '($1).' || j_persistence_id_column || ', ($1).' || j_sequence_number_column || ', ($1).' || j_ordering_column || ',($1).' || j_ordering_column; - upds := jpi_max_sequence_number_column || ' = ($1).' || j_sequence_number_column || ', ' || - jpi_max_ordering_column || ' = ($1).' || j_ordering_column || ', ' || - jpi_min_ordering_column || ' = LEAST(' || jpi_table || '.' || jpi_min_ordering_column || ', ($1).' || j_ordering_column || ')'; - - sql := 'INSERT INTO ' || jpi_table || ' (' || cols || ') VALUES (' || vals || ') ' || - 'ON CONFLICT (' || jpi_persistence_id_column || ') DO UPDATE SET ' || upds; - - EXECUTE sql USING NEW; - RETURN NEW; -END; -$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql b/scripts/migration-0.6.0/3-create-trigger-update-journal-metadata.sql similarity index 70% rename from scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql rename to scripts/migration-0.6.0/3-create-trigger-update-journal-metadata.sql index 719115c4..23bee52a 100644 --- a/scripts/migration-0.6.0/3-create-trigger-update-journal-persistence-ids.sql +++ b/scripts/migration-0.6.0/3-create-trigger-update-journal-metadata.sql @@ -10,9 +10,9 @@ DECLARE BEGIN j_table := schema || '.' || j_table_name; - sql := 'CREATE TRIGGER trig_update_journal_persistence_id + sql := 'CREATE TRIGGER trig_update_journal_metadata AFTER INSERT ON ' || j_table || ' FOR EACH ROW - EXECUTE PROCEDURE ' || schema || '.update_journal_persistence_ids()'; + EXECUTE PROCEDURE ' || schema || '.update_journal_metadata()'; EXECUTE sql; END; diff --git a/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql index 3846e1ce..81337e55 100644 --- a/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql +++ b/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql @@ -3,12 +3,12 @@ CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RET $$ DECLARE -- replace with appropriate values - jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; + jm_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; -- variables sql TEXT; BEGIN - sql := 'IF NEW.' || jpi_max_sequence_number_column || ' <= OLD.' || jpi_max_sequence_number_column || ' THEN + sql := 'IF NEW.' || jm_max_sequence_number_column || ' <= OLD.' || jm_max_sequence_number_column || ' THEN RAISE EXCEPTION ''New max_sequence_number not higher than previous value''; END IF;'; diff --git a/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql index f20618d0..cad3f35d 100644 --- a/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql +++ b/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql @@ -2,16 +2,16 @@ DO $$ DECLARE -- replace with appropriate values schema CONSTANT TEXT := 'public'; - jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; + jm_table_name CONSTANT TEXT := 'journal_metadata'; -- variables - jpi_table TEXT; + jm_table TEXT; sql TEXT; BEGIN - jpi_table := schema || '.' || jpi_table_name; + jm_table := schema || '.' || jm_table_name; sql := 'CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON ' || jpi_table || ' FOR EACH ROW + BEFORE UPDATE ON ' || jm_table || ' FOR EACH ROW EXECUTE PROCEDURE ' || schema || '.check_persistence_id_max_sequence_number()'; EXECUTE sql; diff --git a/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql b/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql deleted file mode 100644 index 222c1e59..00000000 --- a/scripts/migration-0.6.0/6-populate-journal-persistence-ids-table.sql +++ /dev/null @@ -1,31 +0,0 @@ -DO $$ -DECLARE - -- replace with appropriate values - schema CONSTANT TEXT := 'public'; - j_table_name CONSTANT TEXT := 'journal'; - j_persistence_id_column CONSTANT TEXT := 'persistence_id'; - j_sequence_number_column CONSTANT TEXT := 'sequence_number'; - j_ordering_column CONSTANT TEXT := 'ordering'; - jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; - jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; - jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; - jpi_min_ordering_column CONSTANT TEXT := 'min_ordering'; - - -- variables - j_table TEXT; - jpi_table TEXT; - sql TEXT; -BEGIN - j_table := schema || '.' || j_table_name; - jpi_table := schema || '.' || jpi_table_name; - sql := 'INSERT INTO ' || jpi_table || - ' SELECT ' || - j_persistence_id_column || ', ' || - 'max(' || j_sequence_number_column || '), ' || - 'max(' || j_ordering_column || '), ' || - 'min(' || j_ordering_column || ')' || - ' FROM ' || j_table || ' GROUP BY ' || j_persistence_id_column; - - EXECUTE sql; -END; -$$ LANGUAGE plpgsql; From 6579531ec895021269a541bfe4d121db6e6f2c30 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Tue, 4 Jul 2023 17:58:11 +0100 Subject: [PATCH 20/34] Use journal_metadata table on messagesQuery and highestSequenceNrForPersistenceId --- .../journal/dao/BaseByteArrayJournalDao.scala | 15 ++++++-- .../postgres/journal/dao/JournalQueries.scala | 38 +++++++++++++++---- .../journal/dao/PartitionedJournalDao.scala | 23 +++++++++++ .../query/dao/ByteArrayReadJournalDao.scala | 30 ++++++++++++++- .../query/dao/ReadJournalQueries.scala | 29 +++++++++++++- .../journal/dao/JournalQueriesTest.scala | 21 +++++++++- .../query/JournalSequenceActorTest.scala | 27 ++++++++++++- .../query/dao/ReadJournalQueriesTest.scala | 15 ++++++++ 8 files changed, 182 insertions(+), 16 deletions(-) diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala index 41a61845..e432d5b4 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala @@ -137,10 +137,17 @@ trait BaseByteArrayJournalDao extends JournalDaoWithUpdates with BaseJournalDaoW private def highestMarkedSequenceNr(persistenceId: String) = queries.highestMarkedSequenceNrForPersistenceId(persistenceId).result - override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = - for { - maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result) - } yield maybeHighestSeqNo.getOrElse(0L) + override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = { + db.run(queries.highestStoredSequenceNrForPersistenceId(persistenceId).result.headOption).flatMap { + case Some(maxSequenceNr) => + // journal_metadata has the max sequence nr stored + Future.successful(maxSequenceNr) + case None => + // journal_metadata has yet to store the max sequence number to this persistenceId + db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result) + .map(_.getOrElse(0L)) // Default to 0L when nothing is found for this persistenceId + } + } override def messages( persistenceId: String, diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala index d6b859fe..d98feaaa 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala @@ -10,9 +10,7 @@ import io.circe.Json import slick.lifted.TableQuery import slick.sql.FixedSqlAction -class JournalQueries( - journalTable: TableQuery[JournalTable], - journalMetadataTable: TableQuery[JournalMetadataTable]) { +class JournalQueries(journalTable: TableQuery[JournalTable], journalMetadataTable: TableQuery[JournalMetadataTable]) { import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ @@ -52,16 +50,18 @@ class JournalQueries( private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = journalTable.filter(_.persistenceId === persistenceId).map(_.sequenceNumber).max -// journalMetadataTable -// .filter(_.persistenceId === persistenceId) -// .map(_.maxSequenceNumber) -// .max // TODO replace with more appropriate combinator? + + private def _highestStoredSequenceNrForPersistenceId(persistenceId: Rep[String]): Query[Rep[Long], Long, Seq] = { + journalMetadataTable.filter(_.persistenceId === persistenceId).map(_.maxSequenceNumber).take(1) + } private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = journalTable.filter(_.deleted === true).filter(_.persistenceId === persistenceId).map(_.sequenceNumber).max val highestSequenceNrForPersistenceId = Compiled(_highestSequenceNrForPersistenceId _) + val highestStoredSequenceNrForPersistenceId = Compiled(_highestStoredSequenceNrForPersistenceId _) + val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _) private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = @@ -74,6 +74,12 @@ class JournalQueries( val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct) + private def _minAndMaxOrderingStoredForPersistenceId( + persistenceId: Rep[String]): Query[(Rep[Long], Rep[Long]), (Long, Long), Seq] = + journalMetadataTable.filter(_.persistenceId === persistenceId).take(1).map(r => (r.minOrdering, r.maxOrdering)) + + val minAndMaxOrderingStoredForPersistenceId = Compiled(_minAndMaxOrderingStoredForPersistenceId _) + private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], @@ -87,6 +93,24 @@ class JournalQueries( .sortBy(_.sequenceNumber.asc) .take(max) + private def _messagesOrderingBoundedQuery( + persistenceId: Rep[String], + fromSequenceNr: Rep[Long], + toSequenceNr: Rep[Long], + max: ConstColumn[Long], + minOrdering: Rep[Long], + maxOrdering: Rep[Long]): Query[JournalTable, JournalRow, Seq] = + journalTable + .filter(_.persistenceId === persistenceId) + .filter(_.deleted === false) + .filter(_.sequenceNumber >= fromSequenceNr) + .filter(_.sequenceNumber <= toSequenceNr) + .filter(_.ordering >= minOrdering) + .filter(_.ordering <= maxOrdering) + .sortBy(_.sequenceNumber.asc) + .take(max) + val messagesQuery = Compiled(_messagesQuery _) + val messagesOrderingBoundedQuery = Compiled(_messagesOrderingBoundedQuery _) } diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala index 5ff862f4..8c7faa61 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala @@ -1,15 +1,19 @@ package akka.persistence.postgres.journal.dao +import akka.NotUsed +import akka.persistence.PersistentRepr import akka.persistence.postgres.JournalRow import akka.persistence.postgres.config.JournalConfig import akka.persistence.postgres.db.DbErrors.{ withHandledIndexErrors, withHandledPartitionErrors } import akka.serialization.Serialization import akka.stream.Materializer +import akka.stream.scaladsl.Source import slick.jdbc.JdbcBackend.Database import java.util.concurrent.atomic.AtomicReference import scala.collection.immutable.{ Nil, Seq } import scala.concurrent.{ ExecutionContext, Future } +import scala.util.Try class PartitionedJournalDao(db: Database, journalConfig: JournalConfig, serialization: Serialization)( implicit ec: ExecutionContext, @@ -86,4 +90,23 @@ class PartitionedJournalDao(db: Database, journalConfig: JournalConfig, serializ DBIO.successful(()) } } + + override def messages( + persistenceId: String, + fromSequenceNr: Long, + toSequenceNr: Long, + max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = { + // Query the metadata table to get the known min and max ordering a persistence_id has, + // so that the postgres query planner might immediately discard scanning unnecessary partitions + val messagesQuery = queries.minAndMaxOrderingStoredForPersistenceId(persistenceId).result.headOption.flatMap { + case Some((minOrdering, maxOrdering)) => + queries + .messagesOrderingBoundedQuery(persistenceId, fromSequenceNr, toSequenceNr, max, minOrdering, maxOrdering) + .result + case None => + queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result + } + + Source.fromPublisher(db.stream(messagesQuery)).via(serializer.deserializeFlow) + } } diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala index 778ba038..24d80484 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala @@ -51,11 +51,10 @@ trait BaseByteArrayReadJournalDao extends ReadJournalDao with BaseJournalDaoWith persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, - max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = { + max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = Source .fromPublisher(db.stream(queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result)) .via(serializer.deserializeFlow) - } override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] = Source.fromPublisher(db.stream(queries.orderingByOrdering(offset, limit).result)) @@ -78,3 +77,30 @@ class ByteArrayReadJournalDao( new SimpleTagDao(db, readJournalConfig.tagsTableConfiguration), readJournalConfig.tagsConfig)) } + +class PartitionedReadJournalDao( + db: Database, + readJournalConfig: ReadJournalConfig, + serialization: Serialization, + tagIdResolver: TagIdResolver)(implicit ec: ExecutionContext, mat: Materializer) + extends ByteArrayReadJournalDao(db, readJournalConfig, serialization, tagIdResolver) { + + import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ + + override def messages( + persistenceId: String, + fromSequenceNr: Long, + toSequenceNr: Long, + max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = { + val messagesQuery = queries.minAndMaxOrderingStoredForPersistenceId(persistenceId).result.headOption.flatMap { + case Some((minOrdering, maxOrdering)) => + queries + .messagesOrderingBoundedQuery(persistenceId, fromSequenceNr, toSequenceNr, max, minOrdering, maxOrdering) + .result + case None => + queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result + } + + Source.fromPublisher(db.stream(messagesQuery)).via(serializer.deserializeFlow) + } +} diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala index 23dd3c37..1c842575 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala @@ -13,6 +13,8 @@ class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ private val journalTable: TableQuery[JournalTable] = FlatJournalTable(readJournalConfig.journalTableConfiguration) + private val journalMetadataTable: TableQuery[JournalMetadataTable] = + JournalMetadataTable.apply(readJournalConfig.journalMetadataTableConfiguration) private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = baseTableQuery().map(_.persistenceId).distinct.take(max) @@ -23,11 +25,17 @@ class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) + private def _minAndMaxOrderingStoredForPersistenceId( + persistenceId: Rep[String]): Query[(Rep[Long], Rep[Long]), (Long, Long), Seq] = + journalMetadataTable.filter(_.persistenceId === persistenceId).take(1).map(r => (r.minOrdering, r.maxOrdering)) + + val minAndMaxOrderingStoredForPersistenceId = Compiled(_minAndMaxOrderingStoredForPersistenceId _) + private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], toSequenceNr: Rep[Long], - max: ConstColumn[Long]) = + max: ConstColumn[Long]): Query[JournalTable, JournalRow, Seq] = baseTableQuery() .filter(_.persistenceId === persistenceId) .filter(_.sequenceNumber >= fromSequenceNr) @@ -35,8 +43,27 @@ class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { .sortBy(_.sequenceNumber.asc) .take(max) + private def _messagesOrderingBoundedQuery( + persistenceId: Rep[String], + fromSequenceNr: Rep[Long], + toSequenceNr: Rep[Long], + max: ConstColumn[Long], + minOrdering: Rep[Long], + maxOrdering: Rep[Long]): Query[JournalTable, JournalRow, Seq] = + baseTableQuery() + .filter(_.persistenceId === persistenceId) + .filter(_.deleted === false) + .filter(_.sequenceNumber >= fromSequenceNr) + .filter(_.sequenceNumber <= toSequenceNr) + .filter(_.ordering >= minOrdering) + .filter(_.ordering <= maxOrdering) + .sortBy(_.sequenceNumber.asc) + .take(max) + val messagesQuery = Compiled(_messagesQuery _) + val messagesOrderingBoundedQuery = Compiled(_messagesOrderingBoundedQuery _) + protected def _eventsByTag( tag: Rep[List[Int]], offset: ConstColumn[Long], diff --git a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala index 29a87db8..682e89ea 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala @@ -18,7 +18,11 @@ class JournalQueriesTest extends BaseQueryTest { it should "create SQL query for highestSequenceNrForPersistenceId" in withJournalQueries { queries => queries.highestSequenceNrForPersistenceId( "aaa") shouldBeSQL """select max("sequence_number") from "journal" where "persistence_id" = ?""" - // queries.highestSequenceNrForPersistenceId("aaa") shouldBeSQL """select "max_sequence_number" from "journal_metadata" where "persistence_id" = ? limit 1""" + } + + it should "create SQL query for highestStoredSequenceNrForPersistenceId" in withJournalQueries { queries => + queries.highestStoredSequenceNrForPersistenceId( + "aaa") shouldBeSQL """select "max_sequence_number" from "journal_metadata" where "persistence_id" = ? limit 1""" } it should "create SQL query for selectByPersistenceIdAndMaxSequenceNumber" in withJournalQueries { queries => @@ -27,6 +31,11 @@ class JournalQueriesTest extends BaseQueryTest { 11L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ("persistence_id" = ?) and ("sequence_number" <= ?) order by "sequence_number" desc""" } + it should "create SQL query for minAndMaxOrderingStoredForPersistenceId" in withJournalQueries { queries => + queries.minAndMaxOrderingStoredForPersistenceId( + "aaa") shouldBeSQL """select "min_ordering", "max_ordering" from "journal_metadata" where "persistence_id" = ? limit 1""" + } + it should "create SQL query for messagesQuery" in withJournalQueries { queries => queries.messagesQuery( "aaa", @@ -35,6 +44,16 @@ class JournalQueriesTest extends BaseQueryTest { 11L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ((("persistence_id" = ?) and ("deleted" = false)) and ("sequence_number" >= ?)) and ("sequence_number" <= ?) order by "sequence_number" limit ?""" } + it should "create SQL query for messagesOrderingBoundedQuery" in withJournalQueries { queries => + queries.messagesOrderingBoundedQuery( + "aaa", + 11L, + 11L, + 11L, + 11L, + 11L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ((((("persistence_id" = ?) and ("deleted" = false)) and ("sequence_number" >= ?)) and ("sequence_number" <= ?)) and ("ordering" >= ?)) and ("ordering" <= ?) order by "sequence_number" limit ?""" + } + it should "create SQL query for markJournalMessagesAsDeleted" in withJournalQueries { queries => queries.markJournalMessagesAsDeleted( "aaa", diff --git a/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala b/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala index b7173c75..96d4da5c 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala @@ -11,7 +11,11 @@ import akka.pattern.ask import akka.persistence.postgres.config.JournalSequenceRetrievalConfig import akka.persistence.postgres.db.ExtendedPostgresProfile import akka.persistence.postgres.query.JournalSequenceActor.{ GetMaxOrderingId, MaxOrderingId } -import akka.persistence.postgres.query.dao.{ ByteArrayReadJournalDao, TestProbeReadJournalDao } +import akka.persistence.postgres.query.dao.{ + ByteArrayReadJournalDao, + PartitionedReadJournalDao, + TestProbeReadJournalDao +} import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao } import akka.persistence.postgres.util.Schema.{ NestedPartitions, Partitioned, Plain, SchemaType } import akka.persistence.postgres.{ JournalRow, SharedActorSystemTestSpec } @@ -22,6 +26,7 @@ import akka.testkit.TestProbe import io.circe.{ Json, JsonObject } import org.scalatest.time.Span import org.slf4j.LoggerFactory +import slick.jdbc import slick.jdbc.{ JdbcBackend, JdbcCapabilities } import scala.concurrent.Future @@ -316,6 +321,26 @@ class PartitionedJournalSequenceActorTest extends JournalSequenceActorTest(Parti } } } + + override def withJournalSequenceActor(db: jdbc.JdbcBackend.Database, maxTries: Int)(f: ActorRef => Unit)( + implicit system: ActorSystem): Unit = { + import system.dispatcher + implicit val mat: Materializer = SystemMaterializer(system).materializer + val readJournalDao = + new PartitionedReadJournalDao( + db, + readJournalConfig, + SerializationExtension(system), + new CachedTagIdResolver( + new SimpleTagDao(db, readJournalConfig.tagsTableConfiguration), + readJournalConfig.tagsConfig)) + val actor = + system.actorOf( + JournalSequenceActor + .props(readJournalDao, readJournalConfig.journalSequenceRetrievalConfiguration.copy(maxTries = maxTries))) + try f(actor) + finally system.stop(actor) + } } class PlainJournalSequenceActorTest extends JournalSequenceActorTest(Plain) diff --git a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala index 54c27721..90a179cf 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala @@ -8,6 +8,11 @@ class ReadJournalQueriesTest extends BaseQueryTest { queries.allPersistenceIdsDistinct(23L) shouldBeSQL """select distinct "persistence_id" from "journal" limit ?""" } + it should "create SQL query for minAndMaxOrderingStoredForPersistenceId" in withReadJournalQueries { queries => + queries.minAndMaxOrderingStoredForPersistenceId( + "aaa") shouldBeSQL """select "min_ordering", "max_ordering" from "journal_metadata" where "persistence_id" = ? limit 1""" + } + it should "create SQL query for messagesQuery" in withReadJournalQueries { queries => queries.messagesQuery( "p1", @@ -16,6 +21,16 @@ class ReadJournalQueriesTest extends BaseQueryTest { 5L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where (("persistence_id" = ?) and ("sequence_number" >= ?)) and ("sequence_number" <= ?) order by "sequence_number" limit ?""" } + it should "create SQL query for messagesOrderingBoundedQuery" in withReadJournalQueries { queries => + queries.messagesOrderingBoundedQuery( + "aaa", + 1L, + 4L, + 5L, + 1L, + 10L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ((((("persistence_id" = ?) and ("deleted" = false)) and ("sequence_number" >= ?)) and ("sequence_number" <= ?)) and ("ordering" >= ?)) and ("ordering" <= ?) order by "sequence_number" limit ?""" + } + it should "create SQL query for eventsByTag" in withReadJournalQueries { queries => queries.eventsByTag( List(11), From 7c0c540e79ee29fe80307d55e29e5f7bb1f7316d Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Wed, 5 Jul 2023 16:43:15 +0100 Subject: [PATCH 21/34] Make usage of journal metadata optional through configuration --- core/src/main/resources/reference.conf | 45 +++++++++++++- .../config/AkkaPersistenceConfig.scala | 8 ++- .../journal/dao/BaseByteArrayJournalDao.scala | 27 ++++++--- .../postgres/journal/dao/FlatJournalDao.scala | 4 +- .../journal/dao/JournalMetadataQueries.scala | 19 ++++++ .../postgres/journal/dao/JournalQueries.scala | 14 +---- .../dao/NestedPartitionsJournalDao.scala | 4 +- .../journal/dao/PartitionedJournalDao.scala | 32 +++++----- ...cala => BaseByteArrayReadJournalDao.scala} | 47 ++------------- .../query/dao/FlatReadJournalDao.scala | 24 ++++++++ .../query/dao/PartitionedReadJournalDao.scala | 58 +++++++++++++++++++ .../dao/ReadJournalMetadataQueries.scala | 15 +++++ .../query/dao/ReadJournalQueries.scala | 8 --- ...application-with-use-journal-metadata.conf | 2 + ...application-with-use-journal-metadata.conf | 2 + ...application-with-use-journal-metadata.conf | 4 ++ .../journal/PostgresJournalPerfSpec.scala | 15 ++++- .../journal/PostgresJournalSpec.scala | 37 +++++++----- .../dao/JournalMetadataQueriesTest.scala | 22 +++++++ .../journal/dao/JournalQueriesTest.scala | 15 +---- .../CurrentEventsByTagWithGapsTest.scala | 4 +- .../query/JournalSequenceActorTest.scala | 9 +-- .../dao/ReadJournalMetadataQueriesTest.scala | 17 ++++++ .../query/dao/ReadJournalQueriesTest.scala | 5 -- docs/custom-dao.md | 2 +- 25 files changed, 295 insertions(+), 144 deletions(-) create mode 100644 core/src/main/scala/akka/persistence/postgres/journal/dao/JournalMetadataQueries.scala rename core/src/main/scala/akka/persistence/postgres/query/dao/{ByteArrayReadJournalDao.scala => BaseByteArrayReadJournalDao.scala} (58%) create mode 100644 core/src/main/scala/akka/persistence/postgres/query/dao/FlatReadJournalDao.scala create mode 100644 core/src/main/scala/akka/persistence/postgres/query/dao/PartitionedReadJournalDao.scala create mode 100644 core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalMetadataQueries.scala create mode 100644 core/src/test/resources/nested-partitions-application-with-use-journal-metadata.conf create mode 100644 core/src/test/resources/partitioned-application-with-use-journal-metadata.conf create mode 100644 core/src/test/resources/plain-application-with-use-journal-metadata.conf create mode 100644 core/src/test/scala/akka/persistence/postgres/journal/dao/JournalMetadataQueriesTest.scala create mode 100644 core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalMetadataQueriesTest.scala diff --git a/core/src/main/resources/reference.conf b/core/src/main/resources/reference.conf index 191e68d0..1bb83259 100644 --- a/core/src/main/resources/reference.conf +++ b/core/src/main/resources/reference.conf @@ -126,7 +126,17 @@ postgres-journal { metadata = "metadata" } } - + # Used to hold journal information that can be used to speed up queries + journalMetadata { + tableName = "journal_metadata" + schemaName = "" + columnNames = { + persistenceId = "persistence_id" + maxSequenceNumber = "max_sequence_number" + maxOrdering = "max_ordering" + minOrdering = "min_ordering" + } + } tags { tableName = "tags" schameName = "" @@ -176,6 +186,14 @@ postgres-journal { # to the same value for these other journals. use-shared-db = null + # This setting can be used to enable the usage of the data being stored + # at the journal_metadata table, in order to speed up some queries that would + # solely use the journal table. + # In case the metadata table does not hold the required information (not available yet), + # the logic fallback to the journal-only queries. + # This setting is disabled by default. + use-journal-metadata = false + slick { db { @@ -358,7 +376,18 @@ postgres-read-journal { # to the same value for these other journals. use-shared-db = null - dao = "akka.persistence.postgres.query.dao.ByteArrayReadJournalDao" + # This setting can be used to enable the usage of the data being stored + # at the journal_metadata table, in order to speed up some queries that would + # solely use the journal table. + # In case the metadata table does not hold the required information (not available yet), + # the logic fallback to the journal-only queries. + # This setting is disabled by default. + use-journal-metadata = false + + + # Replace with "akka.persistence.postgres.query.dao.PartitionedJournalDao" in order to leverage dedicated queries to + # partitioned journal. + dao = "akka.persistence.postgres.query.dao.FlatReadJournalDao" # Confguration for akka.persistence.postgres.tag.TagIdResolver tags { @@ -402,7 +431,17 @@ postgres-read-journal { message = "message" } } - + # Used to hold journal information that can be used to speed up queries + journalMetadata { + tableName = "journal_metadata" + schemaName = "" + columnNames = { + persistenceId = "persistence_id" + maxSequenceNumber = "max_sequence_number" + maxOrdering = "max_ordering" + minOrdering = "min_ordering" + } + } tags { tableName = "tags" schameName = "" diff --git a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala index 6d471427..78fece43 100644 --- a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala +++ b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala @@ -12,6 +12,7 @@ import scala.concurrent.duration._ object ConfigKeys { val useSharedDb = "use-shared-db" + val useJournalMetadata = "use-journal-metadata" } class SlickConfiguration(config: Config) { @@ -148,8 +149,10 @@ class JournalConfig(config: Config) { val tagsConfig = new TagsConfig(config) val tagsTableConfiguration = new TagsTableConfiguration(config) val useSharedDb: Option[String] = config.asOptionalNonEmptyString(ConfigKeys.useSharedDb) + val useJournalMetadata: Boolean = config.asBoolean(ConfigKeys.useJournalMetadata, false) + override def toString: String = - s"JournalConfig($journalTableConfiguration,$journalMetadataTableConfiguration,$pluginConfig,$tagsConfig,$partitionsConfig,$useSharedDb)" + s"JournalConfig($journalTableConfiguration,$journalMetadataTableConfiguration,$pluginConfig,$tagsConfig,$partitionsConfig,$useSharedDb,$useJournalMetadata)" } class SnapshotConfig(config: Config) { @@ -186,7 +189,8 @@ class ReadJournalConfig(config: Config) { val maxBufferSize: Int = config.as[String]("max-buffer-size", "500").toInt val addShutdownHook: Boolean = config.asBoolean("add-shutdown-hook", true) val includeDeleted: Boolean = config.as[Boolean]("includeLogicallyDeleted", true) + val useJournalMetadata: Boolean = config.asBoolean(ConfigKeys.useJournalMetadata, false) override def toString: String = - s"ReadJournalConfig($journalTableConfiguration,$journalMetadataTableConfiguration,$pluginConfig,$refreshInterval,$maxBufferSize,$addShutdownHook,$includeDeleted)" + s"ReadJournalConfig($journalTableConfiguration,$journalMetadataTableConfiguration,$pluginConfig,$refreshInterval,$maxBufferSize,$addShutdownHook,$includeDeleted,$useJournalMetadata)" } diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala index e432d5b4..27b76493 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/BaseByteArrayJournalDao.scala @@ -15,6 +15,7 @@ import akka.stream.scaladsl.{ Keep, Sink, Source } import akka.stream.{ Materializer, OverflowStrategy, QueueOfferResult } import akka.{ Done, NotUsed } import org.slf4j.{ Logger, LoggerFactory } +import slick.dbio.DBIOAction import slick.jdbc.JdbcBackend._ import scala.collection.immutable._ @@ -39,6 +40,9 @@ trait BaseByteArrayJournalDao extends JournalDaoWithUpdates with BaseJournalDaoW val logger: Logger = LoggerFactory.getLogger(this.getClass) + lazy val metadataQueries: JournalMetadataQueries = new JournalMetadataQueries( + JournalMetadataTable(journalConfig.journalMetadataTableConfiguration)) + // This logging may block since we don't control how the user will configure logback // We can't use a Akka logging neither because we don't have an ActorSystem in scope and // we should not introduce another dependency here. @@ -138,15 +142,20 @@ trait BaseByteArrayJournalDao extends JournalDaoWithUpdates with BaseJournalDaoW queries.highestMarkedSequenceNrForPersistenceId(persistenceId).result override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = { - db.run(queries.highestStoredSequenceNrForPersistenceId(persistenceId).result.headOption).flatMap { - case Some(maxSequenceNr) => - // journal_metadata has the max sequence nr stored - Future.successful(maxSequenceNr) - case None => - // journal_metadata has yet to store the max sequence number to this persistenceId - db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result) - .map(_.getOrElse(0L)) // Default to 0L when nothing is found for this persistenceId - } + val query = if (journalConfig.useJournalMetadata) { + metadataQueries.highestSequenceNrForPersistenceId(persistenceId).result.headOption.flatMap { + case Some(maxSequenceNr) => + // return the stored max sequence nr on journal metadata table + DBIOAction.successful(Some(maxSequenceNr)) + case None => + // journal metadata do not have information for this persistenceId -> fallback to standard behaviour + queries.highestSequenceNrForPersistenceId(persistenceId).result + } + } else + queries.highestSequenceNrForPersistenceId(persistenceId).result + + // Default to 0L when nothing is found for this persistenceId + db.run(query).map(_.getOrElse(0L)) } override def messages( diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala index 502eda7b..12feb4c1 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala @@ -13,9 +13,7 @@ class FlatJournalDao(val db: Database, val journalConfig: JournalConfig, seriali implicit val ec: ExecutionContext, val mat: Materializer) extends BaseByteArrayJournalDao { - val queries = new JournalQueries( - FlatJournalTable(journalConfig.journalTableConfiguration), - JournalMetadataTable(journalConfig.journalMetadataTableConfiguration)) + val queries = new JournalQueries(FlatJournalTable(journalConfig.journalTableConfiguration)) val tagDao = new SimpleTagDao(db, journalConfig.tagsTableConfiguration) val eventTagConverter = new CachedTagIdResolver(tagDao, journalConfig.tagsConfig) val serializer = new ByteArrayJournalSerializer(serialization, eventTagConverter) diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalMetadataQueries.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalMetadataQueries.scala new file mode 100644 index 00000000..9544c449 --- /dev/null +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalMetadataQueries.scala @@ -0,0 +1,19 @@ +package akka.persistence.postgres.journal.dao + +import slick.lifted.TableQuery + +class JournalMetadataQueries(journalMetadataTable: TableQuery[JournalMetadataTable]) { + import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ + + private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Query[Rep[Long], Long, Seq] = { + journalMetadataTable.filter(_.persistenceId === persistenceId).map(_.maxSequenceNumber).take(1) + } + + val highestSequenceNrForPersistenceId = Compiled(_highestSequenceNrForPersistenceId _) + + private def _minAndMaxOrderingForPersistenceId( + persistenceId: Rep[String]): Query[(Rep[Long], Rep[Long]), (Long, Long), Seq] = + journalMetadataTable.filter(_.persistenceId === persistenceId).take(1).map(r => (r.minOrdering, r.maxOrdering)) + + val minAndMaxOrderingForPersistenceId = Compiled(_minAndMaxOrderingForPersistenceId _) +} diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala index d98feaaa..a968eb70 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala @@ -10,7 +10,7 @@ import io.circe.Json import slick.lifted.TableQuery import slick.sql.FixedSqlAction -class JournalQueries(journalTable: TableQuery[JournalTable], journalMetadataTable: TableQuery[JournalMetadataTable]) { +class JournalQueries(journalTable: TableQuery[JournalTable]) { import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ @@ -51,17 +51,11 @@ class JournalQueries(journalTable: TableQuery[JournalTable], journalMetadataTabl private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = journalTable.filter(_.persistenceId === persistenceId).map(_.sequenceNumber).max - private def _highestStoredSequenceNrForPersistenceId(persistenceId: Rep[String]): Query[Rep[Long], Long, Seq] = { - journalMetadataTable.filter(_.persistenceId === persistenceId).map(_.maxSequenceNumber).take(1) - } - private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = journalTable.filter(_.deleted === true).filter(_.persistenceId === persistenceId).map(_.sequenceNumber).max val highestSequenceNrForPersistenceId = Compiled(_highestSequenceNrForPersistenceId _) - val highestStoredSequenceNrForPersistenceId = Compiled(_highestStoredSequenceNrForPersistenceId _) - val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _) private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = @@ -74,12 +68,6 @@ class JournalQueries(journalTable: TableQuery[JournalTable], journalMetadataTabl val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct) - private def _minAndMaxOrderingStoredForPersistenceId( - persistenceId: Rep[String]): Query[(Rep[Long], Rep[Long]), (Long, Long), Seq] = - journalMetadataTable.filter(_.persistenceId === persistenceId).take(1).map(r => (r.minOrdering, r.maxOrdering)) - - val minAndMaxOrderingStoredForPersistenceId = Compiled(_minAndMaxOrderingStoredForPersistenceId _) - private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala index bea9f471..30c34d66 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/NestedPartitionsJournalDao.scala @@ -17,9 +17,7 @@ class NestedPartitionsJournalDao(db: Database, journalConfig: JournalConfig, ser implicit ec: ExecutionContext, mat: Materializer) extends FlatJournalDao(db, journalConfig, serialization) { - override val queries = new JournalQueries( - NestedPartitionsJournalTable(journalConfig.journalTableConfiguration), - JournalMetadataTable(journalConfig.journalMetadataTableConfiguration)) + override val queries = new JournalQueries(NestedPartitionsJournalTable(journalConfig.journalTableConfiguration)) private val journalTableCfg = journalConfig.journalTableConfiguration private val partitionSize = journalConfig.partitionsConfig.size private val partitionPrefix = journalConfig.partitionsConfig.prefix diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala index 8c7faa61..0016c24c 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/PartitionedJournalDao.scala @@ -19,9 +19,7 @@ class PartitionedJournalDao(db: Database, journalConfig: JournalConfig, serializ implicit ec: ExecutionContext, mat: Materializer) extends FlatJournalDao(db, journalConfig, serialization) { - override val queries = new JournalQueries( - PartitionedJournalTable(journalConfig.journalTableConfiguration), - JournalMetadataTable(journalConfig.journalMetadataTableConfiguration)) + override val queries = new JournalQueries(PartitionedJournalTable(journalConfig.journalTableConfiguration)) private val journalTableCfg = journalConfig.journalTableConfiguration private val partitionSize = journalConfig.partitionsConfig.size private val partitionPrefix = journalConfig.partitionsConfig.prefix @@ -96,17 +94,23 @@ class PartitionedJournalDao(db: Database, journalConfig: JournalConfig, serializ fromSequenceNr: Long, toSequenceNr: Long, max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = { - // Query the metadata table to get the known min and max ordering a persistence_id has, - // so that the postgres query planner might immediately discard scanning unnecessary partitions - val messagesQuery = queries.minAndMaxOrderingStoredForPersistenceId(persistenceId).result.headOption.flatMap { - case Some((minOrdering, maxOrdering)) => - queries - .messagesOrderingBoundedQuery(persistenceId, fromSequenceNr, toSequenceNr, max, minOrdering, maxOrdering) - .result - case None => - queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result - } - Source.fromPublisher(db.stream(messagesQuery)).via(serializer.deserializeFlow) + // This behaviour override is only applied here, because it is only useful on the PartitionedJournal strategy. + val query = if (journalConfig.useJournalMetadata) { + metadataQueries.minAndMaxOrderingForPersistenceId(persistenceId).result.headOption.flatMap { + case Some((minOrdering, maxOrdering)) => + // if journal_metadata knows the min and max ordering of a persistenceId, + // use them to help the query planner to avoid scanning unnecessary partitions. + queries + .messagesOrderingBoundedQuery(persistenceId, fromSequenceNr, toSequenceNr, max, minOrdering, maxOrdering) + .result + case None => + // fallback to standard behaviour + queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result + } + } else + queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result + + Source.fromPublisher(db.stream(query)).via(serializer.deserializeFlow) } } diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/BaseByteArrayReadJournalDao.scala similarity index 58% rename from core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala rename to core/src/main/scala/akka/persistence/postgres/query/dao/BaseByteArrayReadJournalDao.scala index 24d80484..0ee55c17 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ByteArrayReadJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/BaseByteArrayReadJournalDao.scala @@ -9,7 +9,11 @@ package query.dao import akka.NotUsed import akka.persistence.PersistentRepr import akka.persistence.postgres.config.ReadJournalConfig -import akka.persistence.postgres.journal.dao.{ BaseJournalDaoWithReadMessages, ByteArrayJournalSerializer } +import akka.persistence.postgres.journal.dao.{ + BaseJournalDaoWithReadMessages, + ByteArrayJournalSerializer, + JournalMetadataTable +} import akka.persistence.postgres.serialization.FlowPersistentReprSerializer import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao, TagIdResolver } import akka.serialization.Serialization @@ -63,44 +67,3 @@ trait BaseByteArrayReadJournalDao extends ReadJournalDao with BaseJournalDaoWith db.run(queries.maxOrdering.result) } } - -class ByteArrayReadJournalDao( - val db: Database, - val readJournalConfig: ReadJournalConfig, - serialization: Serialization, - val tagIdResolver: TagIdResolver)(implicit val ec: ExecutionContext, val mat: Materializer) - extends BaseByteArrayReadJournalDao { - val queries = new ReadJournalQueries(readJournalConfig) - val serializer = new ByteArrayJournalSerializer( - serialization, - new CachedTagIdResolver( - new SimpleTagDao(db, readJournalConfig.tagsTableConfiguration), - readJournalConfig.tagsConfig)) -} - -class PartitionedReadJournalDao( - db: Database, - readJournalConfig: ReadJournalConfig, - serialization: Serialization, - tagIdResolver: TagIdResolver)(implicit ec: ExecutionContext, mat: Materializer) - extends ByteArrayReadJournalDao(db, readJournalConfig, serialization, tagIdResolver) { - - import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ - - override def messages( - persistenceId: String, - fromSequenceNr: Long, - toSequenceNr: Long, - max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = { - val messagesQuery = queries.minAndMaxOrderingStoredForPersistenceId(persistenceId).result.headOption.flatMap { - case Some((minOrdering, maxOrdering)) => - queries - .messagesOrderingBoundedQuery(persistenceId, fromSequenceNr, toSequenceNr, max, minOrdering, maxOrdering) - .result - case None => - queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result - } - - Source.fromPublisher(db.stream(messagesQuery)).via(serializer.deserializeFlow) - } -} diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/FlatReadJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/FlatReadJournalDao.scala new file mode 100644 index 00000000..3ed462af --- /dev/null +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/FlatReadJournalDao.scala @@ -0,0 +1,24 @@ +package akka.persistence.postgres.query.dao + +import akka.persistence.postgres.config.ReadJournalConfig +import akka.persistence.postgres.journal.dao.ByteArrayJournalSerializer +import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao, TagIdResolver } +import akka.serialization.Serialization +import akka.stream.Materializer +import slick.jdbc.JdbcBackend.Database + +import scala.concurrent.ExecutionContext + +class FlatReadJournalDao( + val db: Database, + val readJournalConfig: ReadJournalConfig, + serialization: Serialization, + val tagIdResolver: TagIdResolver)(implicit val ec: ExecutionContext, val mat: Materializer) + extends BaseByteArrayReadJournalDao { + val queries = new ReadJournalQueries(readJournalConfig) + val serializer = new ByteArrayJournalSerializer( + serialization, + new CachedTagIdResolver( + new SimpleTagDao(db, readJournalConfig.tagsTableConfiguration), + readJournalConfig.tagsConfig)) +} diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/PartitionedReadJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/PartitionedReadJournalDao.scala new file mode 100644 index 00000000..2cb90e98 --- /dev/null +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/PartitionedReadJournalDao.scala @@ -0,0 +1,58 @@ +package akka.persistence.postgres.query.dao + +import akka.NotUsed +import akka.persistence.PersistentRepr +import akka.persistence.postgres.config.ReadJournalConfig +import akka.persistence.postgres.journal.dao.{ ByteArrayJournalSerializer, JournalMetadataTable } +import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao, TagIdResolver } +import akka.serialization.Serialization +import akka.stream.Materializer +import akka.stream.scaladsl.Source +import slick.jdbc.JdbcBackend.Database + +import scala.concurrent.ExecutionContext +import scala.util.Try + +class PartitionedReadJournalDao( + val db: Database, + val readJournalConfig: ReadJournalConfig, + serialization: Serialization, + val tagIdResolver: TagIdResolver)(implicit val ec: ExecutionContext, val mat: Materializer) + extends BaseByteArrayReadJournalDao { + + import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ + + val queries = new ReadJournalQueries(readJournalConfig) + private val metadataQueries: ReadJournalMetadataQueries = new ReadJournalMetadataQueries( + JournalMetadataTable(readJournalConfig.journalMetadataTableConfiguration)) + + val serializer = new ByteArrayJournalSerializer( + serialization, + new CachedTagIdResolver( + new SimpleTagDao(db, readJournalConfig.tagsTableConfiguration), + readJournalConfig.tagsConfig)) + + override def messages( + persistenceId: String, + fromSequenceNr: Long, + toSequenceNr: Long, + max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = { + // This behaviour override is only applied here, because it is only useful on the PartitionedJournal strategy. + val query = if (readJournalConfig.useJournalMetadata) { + metadataQueries.minAndMaxOrderingForPersistenceId(persistenceId).result.headOption.flatMap { + case Some((minOrdering, maxOrdering)) => + // if journal_metadata knows the min and max ordering of a persistenceId, + // use them to help the query planner to avoid scanning unnecessary partitions. + queries + .messagesOrderingBoundedQuery(persistenceId, fromSequenceNr, toSequenceNr, max, minOrdering, maxOrdering) + .result + case None => + // fallback to standard behaviour + queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result + } + } else + queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result + + Source.fromPublisher(db.stream(query)).via(serializer.deserializeFlow) + } +} diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalMetadataQueries.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalMetadataQueries.scala new file mode 100644 index 00000000..eceea435 --- /dev/null +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalMetadataQueries.scala @@ -0,0 +1,15 @@ +package akka.persistence.postgres.query.dao + +import akka.persistence.postgres.journal.dao.JournalMetadataTable +import slick.lifted.TableQuery + +class ReadJournalMetadataQueries(journalMetadataTable: TableQuery[JournalMetadataTable]) { + + import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ + + private def _minAndMaxOrderingForPersistenceId( + persistenceId: Rep[String]): Query[(Rep[Long], Rep[Long]), (Long, Long), Seq] = + journalMetadataTable.filter(_.persistenceId === persistenceId).take(1).map(r => (r.minOrdering, r.maxOrdering)) + + val minAndMaxOrderingForPersistenceId = Compiled(_minAndMaxOrderingForPersistenceId _) +} diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala index 1c842575..ffe045c3 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala @@ -13,8 +13,6 @@ class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ private val journalTable: TableQuery[JournalTable] = FlatJournalTable(readJournalConfig.journalTableConfiguration) - private val journalMetadataTable: TableQuery[JournalMetadataTable] = - JournalMetadataTable.apply(readJournalConfig.journalMetadataTableConfiguration) private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = baseTableQuery().map(_.persistenceId).distinct.take(max) @@ -25,12 +23,6 @@ class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) - private def _minAndMaxOrderingStoredForPersistenceId( - persistenceId: Rep[String]): Query[(Rep[Long], Rep[Long]), (Long, Long), Seq] = - journalMetadataTable.filter(_.persistenceId === persistenceId).take(1).map(r => (r.minOrdering, r.maxOrdering)) - - val minAndMaxOrderingStoredForPersistenceId = Compiled(_minAndMaxOrderingStoredForPersistenceId _) - private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], diff --git a/core/src/test/resources/nested-partitions-application-with-use-journal-metadata.conf b/core/src/test/resources/nested-partitions-application-with-use-journal-metadata.conf new file mode 100644 index 00000000..8ca28304 --- /dev/null +++ b/core/src/test/resources/nested-partitions-application-with-use-journal-metadata.conf @@ -0,0 +1,2 @@ +include "plain-application-with-use-journal-metadata.conf" +include "nested-partitions-journal.conf" diff --git a/core/src/test/resources/partitioned-application-with-use-journal-metadata.conf b/core/src/test/resources/partitioned-application-with-use-journal-metadata.conf new file mode 100644 index 00000000..ee77c852 --- /dev/null +++ b/core/src/test/resources/partitioned-application-with-use-journal-metadata.conf @@ -0,0 +1,2 @@ +include "plain-application-with-use-journal-metadata.conf" +include "partitioned-journal.conf" diff --git a/core/src/test/resources/plain-application-with-use-journal-metadata.conf b/core/src/test/resources/plain-application-with-use-journal-metadata.conf new file mode 100644 index 00000000..fd9b103e --- /dev/null +++ b/core/src/test/resources/plain-application-with-use-journal-metadata.conf @@ -0,0 +1,4 @@ +include "general.conf" +include "plain-application.conf" + +postgres-journal.use-journal-metadata = true diff --git a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalPerfSpec.scala b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalPerfSpec.scala index 4a053391..c7d8d2c7 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalPerfSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalPerfSpec.scala @@ -8,15 +8,15 @@ package akka.persistence.postgres.journal import akka.actor.Props import akka.persistence.CapabilityFlag import akka.persistence.journal.JournalPerfSpec -import akka.persistence.journal.JournalPerfSpec.{BenchActor, Cmd, ResetCounter} +import akka.persistence.journal.JournalPerfSpec.{ BenchActor, Cmd, ResetCounter } import akka.persistence.postgres.config._ import akka.persistence.postgres.db.SlickExtension import akka.persistence.postgres.util.Schema._ -import akka.persistence.postgres.util.{ClasspathResources, DropCreate} +import akka.persistence.postgres.util.{ ClasspathResources, DropCreate } import akka.testkit.TestProbe import com.typesafe.config.ConfigFactory import org.scalatest.concurrent.ScalaFutures -import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} +import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import scala.concurrent.ExecutionContextExecutor import scala.concurrent.duration._ @@ -114,6 +114,9 @@ class NestedPartitionsJournalPerfSpecSharedDb class NestedPartitionsJournalPerfSpecPhysicalDelete extends PostgresJournalPerfSpec("nested-partitions-application-with-hard-delete.conf", NestedPartitions) +class NestedPartitionsJournalPerfSpecUseJournalMetadata + extends PostgresJournalPerfSpec("nested-partitions-application-with-use-journal-metadata.conf", NestedPartitions) + class PartitionedJournalPerfSpec extends PostgresJournalPerfSpec("partitioned-application.conf", Partitioned) class PartitionedJournalPerfSpecSharedDb @@ -122,9 +125,15 @@ class PartitionedJournalPerfSpecSharedDb class PartitionedJournalPerfSpecPhysicalDelete extends PostgresJournalPerfSpec("partitioned-application-with-hard-delete.conf", Partitioned) +class PartitionedJournalPerfSpecUseJournalMetadata + extends PostgresJournalPerfSpec("partitioned-application-with-use-journal-metadata.conf", Partitioned) + class PlainJournalPerfSpec extends PostgresJournalPerfSpec("plain-application.conf", Plain) class PlainJournalPerfSpecSharedDb extends PostgresJournalPerfSpec("plain-shared-db-application.conf", Plain) class PlainJournalPerfSpecPhysicalDelete extends PostgresJournalPerfSpec("plain-application-with-hard-delete.conf", Plain) + +class PlainJournalPerfSpecUseJournalMetadata + extends PostgresJournalPerfSpec("plain-application-with-use-journal-metadata.conf", Plain) diff --git a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala index fb584a0c..6c881c75 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala @@ -6,24 +6,24 @@ package akka.persistence.postgres.journal import akka.actor.Actor -import akka.persistence.JournalProtocol.{ReplayedMessage, WriteMessages, WriteMessagesFailed} +import akka.persistence.JournalProtocol.{ ReplayedMessage, WriteMessages, WriteMessagesFailed } import akka.persistence.journal.JournalSpec import akka.persistence.postgres.config._ import akka.persistence.postgres.db.SlickExtension import akka.persistence.postgres.query.ScalaPostgresReadJournalOperations import akka.persistence.postgres.util.Schema._ -import akka.persistence.postgres.util.{ClasspathResources, DropCreate} +import akka.persistence.postgres.util.{ ClasspathResources, DropCreate } import akka.persistence.query.Sequence -import akka.persistence.{AtomicWrite, CapabilityFlag, PersistentImpl, PersistentRepr} +import akka.persistence.{ AtomicWrite, CapabilityFlag, PersistentImpl, PersistentRepr } import akka.testkit.TestProbe -import com.typesafe.config.{Config, ConfigFactory} +import com.typesafe.config.{ Config, ConfigFactory } import org.scalatest.concurrent.PatienceConfiguration.Timeout import org.scalatest.concurrent.ScalaFutures -import org.scalatest.time.{Minute, Span} -import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} +import org.scalatest.time.{ Minute, Span } +import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import scala.concurrent.duration._ -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.{ ExecutionContext, Future } abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) extends JournalSpec(ConfigFactory.load(config)) @@ -65,13 +65,13 @@ abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) writeMessages(1, repeatedSnr + 1, perId, sender.ref, writerUuid) // then - val msg = AtomicWrite(PersistentRepr( - payload = s"a-$repeatedSnr", - sequenceNr = repeatedSnr, - persistenceId = pid, - sender = sender.ref, - writerUuid = writerUuid - )) + val msg = AtomicWrite( + PersistentRepr( + payload = s"a-$repeatedSnr", + sequenceNr = repeatedSnr, + persistenceId = pid, + sender = sender.ref, + writerUuid = writerUuid)) val probe = TestProbe() journal ! WriteMessages(Seq(msg), probe.ref, actorInstanceId) @@ -137,6 +137,9 @@ class NestedPartitionsJournalSpecSharedDb class NestedPartitionsJournalSpecPhysicalDelete extends PostgresJournalSpec("nested-partitions-application-with-hard-delete.conf", NestedPartitions) +class NestedPartitionsJournalSpecUseJournalMetadata + extends PostgresJournalSpec("nested-partitions-application-with-use-journal-metadata.conf", NestedPartitions) + class PartitionedJournalSpec extends PostgresJournalSpec("partitioned-application.conf", Partitioned) with PartitionedJournalSpecTestCases @@ -147,6 +150,12 @@ class PartitionedJournalSpecPhysicalDelete extends PostgresJournalSpec("partitioned-application-with-hard-delete.conf", Partitioned) with PartitionedJournalSpecTestCases +class PartitionedJournalSpecUseJournalMetadata + extends PostgresJournalSpec("partitioned-application-with-use-journal-metadata.conf", Partitioned) + with PartitionedJournalSpecTestCases + class PlainJournalSpec extends PostgresJournalSpec("plain-application.conf", Plain) class PlainJournalSpecSharedDb extends PostgresJournalSpec("plain-shared-db-application.conf", Plain) class PlainJournalSpecPhysicalDelete extends PostgresJournalSpec("plain-application-with-hard-delete.conf", Plain) +class PlainJournalSpecUseJournalMetadata + extends PostgresJournalSpec("plain-application-with-use-journal-metadata.conf", Plain) diff --git a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalMetadataQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalMetadataQueriesTest.scala new file mode 100644 index 00000000..5c0c9378 --- /dev/null +++ b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalMetadataQueriesTest.scala @@ -0,0 +1,22 @@ +package akka.persistence.postgres.journal.dao + +import akka.persistence.postgres.util.BaseQueryTest + +class JournalMetadataQueriesTest extends BaseQueryTest { + + it should "create SQL query for highestSequenceNrForPersistenceId" in withJournalMetadataQueries { queries => + queries.highestSequenceNrForPersistenceId( + "aaa") shouldBeSQL """select "max_sequence_number" from "journal_metadata" where "persistence_id" = ? limit 1""" + } + + it should "create SQL query for minAndMaxOrderingForPersistenceId" in withJournalMetadataQueries { queries => + queries.minAndMaxOrderingForPersistenceId( + "aaa") shouldBeSQL """select "min_ordering", "max_ordering" from "journal_metadata" where "persistence_id" = ? limit 1""" + } + + private def withJournalMetadataQueries(f: JournalMetadataQueries => Unit): Unit = { + withActorSystem { implicit system => + f(new JournalMetadataQueries(JournalMetadataTable(journalConfig.journalMetadataTableConfiguration))) + } + } +} diff --git a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala index 682e89ea..abd25ccc 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala @@ -20,22 +20,12 @@ class JournalQueriesTest extends BaseQueryTest { "aaa") shouldBeSQL """select max("sequence_number") from "journal" where "persistence_id" = ?""" } - it should "create SQL query for highestStoredSequenceNrForPersistenceId" in withJournalQueries { queries => - queries.highestStoredSequenceNrForPersistenceId( - "aaa") shouldBeSQL """select "max_sequence_number" from "journal_metadata" where "persistence_id" = ? limit 1""" - } - it should "create SQL query for selectByPersistenceIdAndMaxSequenceNumber" in withJournalQueries { queries => queries.selectByPersistenceIdAndMaxSequenceNumber( "aaa", 11L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ("persistence_id" = ?) and ("sequence_number" <= ?) order by "sequence_number" desc""" } - it should "create SQL query for minAndMaxOrderingStoredForPersistenceId" in withJournalQueries { queries => - queries.minAndMaxOrderingStoredForPersistenceId( - "aaa") shouldBeSQL """select "min_ordering", "max_ordering" from "journal_metadata" where "persistence_id" = ? limit 1""" - } - it should "create SQL query for messagesQuery" in withJournalQueries { queries => queries.messagesQuery( "aaa", @@ -84,10 +74,7 @@ class JournalQueriesTest extends BaseQueryTest { private def withJournalQueries(f: JournalQueries => Unit): Unit = { withActorSystem { implicit system => - f( - new JournalQueries( - FlatJournalTable.apply(journalConfig.journalTableConfiguration), - JournalMetadataTable.apply(journalConfig.journalMetadataTableConfiguration))) + f(new JournalQueries(FlatJournalTable.apply(journalConfig.journalTableConfiguration))) } } } diff --git a/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala b/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala index 19f9869e..539c36b5 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/CurrentEventsByTagWithGapsTest.scala @@ -46,9 +46,7 @@ class CurrentEventsByTagWithGapsTest }.futureValue val journalTable = schemaType.table(journalConfig.journalTableConfiguration) - val journalMetadataTable = - schemaType.metadataTable(journalConfig.journalMetadataTableConfiguration) - val journalQueries = new JournalQueries(journalTable, journalMetadataTable) + val journalQueries = new JournalQueries(journalTable) val journalOps = new JavaDslPostgresReadJournalOperations(system) val tag = "testTag" diff --git a/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala b/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala index 96d4da5c..201813d5 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala @@ -11,11 +11,7 @@ import akka.pattern.ask import akka.persistence.postgres.config.JournalSequenceRetrievalConfig import akka.persistence.postgres.db.ExtendedPostgresProfile import akka.persistence.postgres.query.JournalSequenceActor.{ GetMaxOrderingId, MaxOrderingId } -import akka.persistence.postgres.query.dao.{ - ByteArrayReadJournalDao, - PartitionedReadJournalDao, - TestProbeReadJournalDao -} +import akka.persistence.postgres.query.dao.{ FlatReadJournalDao, PartitionedReadJournalDao, TestProbeReadJournalDao } import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao } import akka.persistence.postgres.util.Schema.{ NestedPartitions, Partitioned, Plain, SchemaType } import akka.persistence.postgres.{ JournalRow, SharedActorSystemTestSpec } @@ -31,7 +27,6 @@ import slick.jdbc.{ JdbcBackend, JdbcCapabilities } import scala.concurrent.Future import scala.concurrent.duration._ -import scala.util.Random abstract class JournalSequenceActorTest(val schemaType: SchemaType) extends QueryTestSpec(schemaType.configName) { private val log = LoggerFactory.getLogger(classOf[JournalSequenceActorTest]) @@ -181,7 +176,7 @@ abstract class JournalSequenceActorTest(val schemaType: SchemaType) extends Quer import system.dispatcher implicit val mat: Materializer = SystemMaterializer(system).materializer val readJournalDao = - new ByteArrayReadJournalDao( + new FlatReadJournalDao( db, readJournalConfig, SerializationExtension(system), diff --git a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalMetadataQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalMetadataQueriesTest.scala new file mode 100644 index 00000000..bef12342 --- /dev/null +++ b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalMetadataQueriesTest.scala @@ -0,0 +1,17 @@ +package akka.persistence.postgres.query.dao + +import akka.persistence.postgres.journal.dao.JournalMetadataTable +import akka.persistence.postgres.util.BaseQueryTest + +class ReadJournalMetadataQueriesTest extends BaseQueryTest { + it should "create SQL query for minAndMaxOrderingForPersistenceId" in withReadJournalMetadataQueries { queries => + queries.minAndMaxOrderingForPersistenceId( + "aaa") shouldBeSQL """select "min_ordering", "max_ordering" from "journal_metadata" where "persistence_id" = ? limit 1""" + } + + private def withReadJournalMetadataQueries(f: ReadJournalMetadataQueries => Unit): Unit = { + withActorSystem { implicit system => + f(new ReadJournalMetadataQueries(JournalMetadataTable(readJournalConfig.journalMetadataTableConfiguration))) + } + } +} diff --git a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala index 90a179cf..086623c6 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala @@ -8,11 +8,6 @@ class ReadJournalQueriesTest extends BaseQueryTest { queries.allPersistenceIdsDistinct(23L) shouldBeSQL """select distinct "persistence_id" from "journal" limit ?""" } - it should "create SQL query for minAndMaxOrderingStoredForPersistenceId" in withReadJournalQueries { queries => - queries.minAndMaxOrderingStoredForPersistenceId( - "aaa") shouldBeSQL """select "min_ordering", "max_ordering" from "journal_metadata" where "persistence_id" = ? limit 1""" - } - it should "create SQL query for messagesQuery" in withReadJournalQueries { queries => queries.messagesQuery( "p1", diff --git a/docs/custom-dao.md b/docs/custom-dao.md index 3a4d8c72..2f980a40 100644 --- a/docs/custom-dao.md +++ b/docs/custom-dao.md @@ -23,7 +23,7 @@ postgres-snapshot-store { } postgres-read-journal { - dao = "akka.persistence.postgres.query.dao.ByteArrayReadJournalDao" + dao = "akka.persistence.postgres.query.dao.FlatReadJournalDao" } ``` From 15902651953e615e847cfb8c3eeb7cf9f449b164 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Thu, 6 Jul 2023 14:32:30 +0100 Subject: [PATCH 22/34] Fix typo --- core/src/main/resources/reference.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/resources/reference.conf b/core/src/main/resources/reference.conf index 1bb83259..1db61189 100644 --- a/core/src/main/resources/reference.conf +++ b/core/src/main/resources/reference.conf @@ -385,7 +385,7 @@ postgres-read-journal { use-journal-metadata = false - # Replace with "akka.persistence.postgres.query.dao.PartitionedJournalDao" in order to leverage dedicated queries to + # Replace with "akka.persistence.postgres.query.dao.PartitionedReadJournalDao" in order to leverage dedicated queries to # partitioned journal. dao = "akka.persistence.postgres.query.dao.FlatReadJournalDao" From 26db18624c716887ecaecc3eb937b54fa23772a4 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Thu, 6 Jul 2023 15:20:03 +0100 Subject: [PATCH 23/34] Remove unused journal queries --- .../postgres/journal/dao/JournalQueries.scala | 13 ------------- .../postgres/journal/dao/JournalQueriesTest.scala | 10 ---------- 2 files changed, 23 deletions(-) diff --git a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala index a968eb70..9465020d 100644 --- a/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/journal/dao/JournalQueries.scala @@ -19,9 +19,6 @@ class JournalQueries(journalTable: TableQuery[JournalTable]) { def writeJournalRows(xs: Seq[JournalRow]): FixedSqlAction[Option[Int], NoStream, slick.dbio.Effect.Write] = compiledJournalTable ++= xs.sortBy(_.sequenceNumber) - private def selectAllJournalForPersistenceId(persistenceId: Rep[String]) = - journalTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc) - def delete(persistenceId: String, toSequenceNr: Long): FixedSqlAction[Int, NoStream, slick.dbio.Effect.Write] = { journalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber <= toSequenceNr).delete } @@ -58,16 +55,6 @@ class JournalQueries(journalTable: TableQuery[JournalTable]) { val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _) - private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = - selectAllJournalForPersistenceId(persistenceId).filter(_.sequenceNumber <= maxSequenceNr) - - val selectByPersistenceIdAndMaxSequenceNumber = Compiled(_selectByPersistenceIdAndMaxSequenceNumber _) - - private def _allPersistenceIdsDistinct: Query[Rep[String], String, Seq] = - journalTable.map(_.persistenceId).distinct - - val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct) - private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], diff --git a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala index abd25ccc..0bd9d6a9 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala @@ -6,10 +6,6 @@ import io.circe.{ Json, JsonObject } class JournalQueriesTest extends BaseQueryTest { - it should "produce SQL query for distinct persistenceID" in withJournalQueries { queries => - queries.allPersistenceIdsDistinct shouldBeSQL """select distinct "persistence_id" from "journal"""" - } - it should "create SQL query for highestMarkedSequenceNrForPersistenceId" in withJournalQueries { queries => queries.highestMarkedSequenceNrForPersistenceId( "aaa") shouldBeSQL """select max("sequence_number") from "journal" where ("deleted" = true) and ("persistence_id" = ?)""" @@ -20,12 +16,6 @@ class JournalQueriesTest extends BaseQueryTest { "aaa") shouldBeSQL """select max("sequence_number") from "journal" where "persistence_id" = ?""" } - it should "create SQL query for selectByPersistenceIdAndMaxSequenceNumber" in withJournalQueries { queries => - queries.selectByPersistenceIdAndMaxSequenceNumber( - "aaa", - 11L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ("persistence_id" = ?) and ("sequence_number" <= ?) order by "sequence_number" desc""" - } - it should "create SQL query for messagesQuery" in withJournalQueries { queries => queries.messagesQuery( "aaa", From e883da463158ee80a876c388ecd46eda8ed993f8 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Thu, 6 Jul 2023 15:33:45 +0100 Subject: [PATCH 24/34] Proper distinction between FlatReadJournal and PartitionedReadJournal on ReadJournalQueries --- .../postgres/query/dao/FlatReadJournalDao.scala | 6 ++++-- .../postgres/query/dao/PartitionedReadJournalDao.scala | 10 ++++++++-- .../postgres/query/dao/ReadJournalQueries.scala | 10 ++++------ .../postgres/query/dao/ReadJournalQueriesTest.scala | 6 +++++- 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/FlatReadJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/FlatReadJournalDao.scala index 3ed462af..515cf423 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/FlatReadJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/FlatReadJournalDao.scala @@ -1,7 +1,7 @@ package akka.persistence.postgres.query.dao import akka.persistence.postgres.config.ReadJournalConfig -import akka.persistence.postgres.journal.dao.ByteArrayJournalSerializer +import akka.persistence.postgres.journal.dao.{ ByteArrayJournalSerializer, FlatJournalTable } import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao, TagIdResolver } import akka.serialization.Serialization import akka.stream.Materializer @@ -15,7 +15,9 @@ class FlatReadJournalDao( serialization: Serialization, val tagIdResolver: TagIdResolver)(implicit val ec: ExecutionContext, val mat: Materializer) extends BaseByteArrayReadJournalDao { - val queries = new ReadJournalQueries(readJournalConfig) + val queries = new ReadJournalQueries( + FlatJournalTable(readJournalConfig.journalTableConfiguration), + readJournalConfig.includeDeleted) val serializer = new ByteArrayJournalSerializer( serialization, new CachedTagIdResolver( diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/PartitionedReadJournalDao.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/PartitionedReadJournalDao.scala index 2cb90e98..82da7647 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/PartitionedReadJournalDao.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/PartitionedReadJournalDao.scala @@ -3,7 +3,11 @@ package akka.persistence.postgres.query.dao import akka.NotUsed import akka.persistence.PersistentRepr import akka.persistence.postgres.config.ReadJournalConfig -import akka.persistence.postgres.journal.dao.{ ByteArrayJournalSerializer, JournalMetadataTable } +import akka.persistence.postgres.journal.dao.{ + ByteArrayJournalSerializer, + JournalMetadataTable, + PartitionedJournalTable +} import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao, TagIdResolver } import akka.serialization.Serialization import akka.stream.Materializer @@ -22,7 +26,9 @@ class PartitionedReadJournalDao( import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ - val queries = new ReadJournalQueries(readJournalConfig) + val queries = new ReadJournalQueries( + PartitionedJournalTable(readJournalConfig.journalTableConfiguration), + readJournalConfig.includeDeleted) private val metadataQueries: ReadJournalMetadataQueries = new ReadJournalMetadataQueries( JournalMetadataTable(readJournalConfig.journalMetadataTableConfiguration)) diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala index ffe045c3..958b449f 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala @@ -6,19 +6,17 @@ package akka.persistence.postgres package query.dao -import akka.persistence.postgres.config.ReadJournalConfig -import akka.persistence.postgres.journal.dao.{ FlatJournalTable, JournalMetadataTable, JournalTable } +import akka.persistence.postgres.journal.dao.JournalTable +import slick.lifted.TableQuery -class ReadJournalQueries(val readJournalConfig: ReadJournalConfig) { +class ReadJournalQueries(journalTable: TableQuery[JournalTable], includeDeleted: Boolean) { import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ - private val journalTable: TableQuery[JournalTable] = FlatJournalTable(readJournalConfig.journalTableConfiguration) - private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = baseTableQuery().map(_.persistenceId).distinct.take(max) private def baseTableQuery() = - if (readJournalConfig.includeDeleted) journalTable + if (includeDeleted) journalTable else journalTable.filter(_.deleted === false) val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) diff --git a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala index 086623c6..ec98dcaf 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala @@ -1,5 +1,6 @@ package akka.persistence.postgres.query.dao +import akka.persistence.postgres.journal.dao.FlatJournalTable import akka.persistence.postgres.util.BaseQueryTest class ReadJournalQueriesTest extends BaseQueryTest { @@ -45,7 +46,10 @@ class ReadJournalQueriesTest extends BaseQueryTest { private def withReadJournalQueries(f: ReadJournalQueries => Unit): Unit = { withActorSystem { implicit system => - f(new ReadJournalQueries(readJournalConfig)) + f( + new ReadJournalQueries( + FlatJournalTable(readJournalConfig.journalTableConfiguration), + readJournalConfig.includeDeleted)) } } } From 5c8970fa2bed6483b5985815f23e9d03233d8b93 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Thu, 6 Jul 2023 16:24:28 +0100 Subject: [PATCH 25/34] Update documentation --- README.md | 56 ++--------------------------------------------- docs/migration.md | 50 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 54 deletions(-) diff --git a/README.md b/README.md index 436c5f4c..c0eb6e15 100644 --- a/README.md +++ b/README.md @@ -113,62 +113,10 @@ Example partition names: `j_myActor_0`, `j_myActor_1`, `j_worker_0` etc. Keep in mind that the default maximum length for a table name in Postgres is 63 bytes, so you should avoid any non-ascii characters in your `persistenceId`s and keep the `prefix` reasonably short. > :warning: Once any of the partitioning setting under `postgres-journal.tables.journal.partitions` branch is settled, you should never change it. Otherwise you might end up with PostgresExceptions caused by table name or range conflicts. -## Migration - -### Migration from akka-persistence-jdbc 4.0.0 -It is possible to migrate existing journals from Akka Persistence JDBC 4.0.0. -Since we decided to extract metadata from the serialized payload and store it in a separate column it is not possible to migrate exiting journal and snapshot store using plain SQL scripts. - -#### How migration works -Each journal event and snapshot has to be read, deserialized, metadata and tags must be extracted and then everything stored in the new table. - -We provide you with an optional artifact, `akka-persistence-postgres-migration` that brings to your project the necessary classes to automate the above process. - -**Important**: Our util classes neither drop nor update any old data. Original tables will be still there but renamed with an `old_` prefix. It's up to you when to drop them. - -#### How to use plugin provided migrations -##### Add akka-persistence-migration to your project -Add the following to your `build.sbt` -``` -libraryDependencies += "com.swissborg" %% "akka-persistence-postgres-migration" % "0.5.0" -``` -For a maven project add: -```xml - - com.swisborg - akka-persistence-postgres-migration_2.13 - 0.5.0 - -``` -to your `pom.xml`. - -##### Create and run migrations: -```scala -import akka.persistence.postgres.migration.journal.Jdbc4JournalMigration -import akka.persistence.postgres.migration.snapshot.Jdbc4SnapshotStoreMigration - -for { -_ <- new Jdbc4JournalMigration(config).run() -_ <- new Jdbc4SnapshotStoreMigration(config).run() -} yield () -``` -**Very important note**: The migration has to be finished before your application starts any persistent actors! - -It's your choice whether you want to trigger migration manually or (recommended) leverage a database version control system of your choice (e.g. Flyway). - -#### Examples -An example Flyway-based migration can be found in the demo app: https://github.com/mkubala/demo-akka-persistence-postgres/blob/master/src/main/scala/com/github/mkubala/FlywayMigrationExample.scala -### Migration from akka-persistence-postgres 0.4.0 to 0.5.0 -New indices need to be created on each partition, to avoid locking production databases for too long, it should be done in 2 steps: -1. manually create indices CONCURRENTLY, -2. deploy new release with migration scripts. - -#### Manually create indices CONCURRENTLY -Execute DDL statements produced by the [sample migration script](scripts/migration-0.5.0/partitioned/1-add-indices-manually.sql), adapt top level variables to match your journal configuration before executing. +## Migration -#### Deploy new release with migration scripts -See [sample flyway migration script](scripts/migration-0.5.0/partitioned/2-add-indices-flyway.sql) and adapt top level variables to match your journal configuration. +Please see the documentation regarding migrations [here](https://swissborg.github.io/akka-persistence-postgres/migration). ## Contributing We are also always looking for contributions and new ideas, so if you’d like to join the project, check out the [open issues](https://github.com/SwissBorg/akka-persistence-postgres/issues), or post your own suggestions! diff --git a/docs/migration.md b/docs/migration.md index 137d5d3f..b6eeabad 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -61,3 +61,53 @@ Execute DDL statements produced by the [sample migration script](https://github. ### Deploy new release with migration scripts See [sample flyway migration script](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.5.0/partitioned/2-add-indices-flyway.sql) and adapt top level variables to match your journal configuration. + +## Migration from akka-persistence-postgres 0.5.0 to 0.6.0 + +The new `journal_metadata` table needs to be added, alongside the triggers and functions associated with it. +Here is the list of sample flyway migration scripts you can use: +1. [create journal_metadata table](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/1-create-journal-metadata-table.sql) +2. [create function to update journal_metadata](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql) +3. [create trigger to update journal_metadata](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/3-create-trigger-update-journal-metadata.sql) +4. [create function to check consistency of max sequence_nr per persistence_id](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql) +5. [create trigger to check consistency of max sequence_nr per persistence_id](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql) + +⚠️ Ensure to adapt the top level variables of the scripts to appropriate values that match your journal configuration/setup. + +This new table is used to improve the performance of specific queries. However, its usage is not enabled by default, so the previous (v0.5.0) behaviour is kept. +In order to make use of it you need to specify it through the configuration of your journal: + +```hocon +{ + postgres-journal { + ... + + use-journal-metadata = true # Default is false + } + + # Same applies to the read journal + postgres-read-journal { + ... + + use-journal-metadata = true # Default is false + } +} +``` + +Another important change that was introduced was that there is now a `FlatReadJournalDao` and a `PartitionedReadJournalDao`. +The first is the direct replacement of the previous `ByteArrayReadJournalDao` and it is the one set by default. +However, with the addition of the `journal_metadata`, if you are using the partitioned journal please change it to `PartitionedReadJournalDao`, +as some of the queries in use will benefit from it. + +```hocon +{ + postgres-read-journal { + ... + + dao = "akka.persistence.postgres.query.dao.PartitionedReadJournalDao" + use-journal-metadata = true # Default is false + } +} +``` + +⚠️ Also, since a new table is being added it might be required for you to adapt your `postgres-journal.tables` configuration. \ No newline at end of file From 0b0e6945160af9bc4e5c9b99ed4666801b500275 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Thu, 20 Jul 2023 17:48:13 +0100 Subject: [PATCH 26/34] Missing updates of dao classpaths in tests --- .../config/AkkaPersistenceConfig.scala | 6 +++--- .../AkkaPersistenceConfigTest.scala | 18 +++++++++--------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala index 78fece43..73e3c079 100644 --- a/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala +++ b/core/src/main/scala/akka/persistence/postgres/config/AkkaPersistenceConfig.scala @@ -107,7 +107,7 @@ class TagsTableConfiguration(config: Config) { } class JournalPluginConfig(config: Config) { - val dao: String = config.asString("dao", "akka.persistence.postgres.dao.bytea.journal.FlatJournalDao") + val dao: String = config.asString("dao", "akka.persistence.postgres.journal.dao.FlatJournalDao") override def toString: String = s"JournalPluginConfig($dao)" } @@ -122,12 +122,12 @@ class BaseByteArrayJournalDaoConfig(config: Config) { } class ReadJournalPluginConfig(config: Config) { - val dao: String = config.as[String]("dao", "akka.persistence.postgres.dao.bytea.readjournal.ByteArrayReadJournalDao") + val dao: String = config.as[String]("dao", "akka.persistence.postgres.query.dao.FlatReadJournalDao") override def toString: String = s"ReadJournalPluginConfig($dao)" } class SnapshotPluginConfig(config: Config) { - val dao: String = config.as[String]("dao", "akka.persistence.postgres.dao.bytea.snapshot.ByteArraySnapshotDao") + val dao: String = config.as[String]("dao", "akka.persistence.postgres.snapshot.dao.ByteArraySnapshotDao") override def toString: String = s"SnapshotPluginConfig($dao)" } diff --git a/core/src/test/scala/akka/persistence/postgres/configuration/AkkaPersistenceConfigTest.scala b/core/src/test/scala/akka/persistence/postgres/configuration/AkkaPersistenceConfigTest.scala index 808d2c62..269bf6c9 100644 --- a/core/src/test/scala/akka/persistence/postgres/configuration/AkkaPersistenceConfigTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/configuration/AkkaPersistenceConfigTest.scala @@ -46,7 +46,7 @@ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers with OptionVal | } | } | - | dao = "akka.persistence.postgres.dao.bytea.journal.FlatJournalDao" + | dao = "akka.persistence.postgres.journal.dao.FlatJournalDao" | | logicalDelete = true | @@ -111,7 +111,7 @@ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers with OptionVal | } | } | - | dao = "akka.persistence.postgres.dao.bytea.snapshot.ByteArraySnapshotDao" + | dao = "akka.persistence.postgres.snapshot.dao.ByteArraySnapshotDao" | | slick { | profile = "slick.jdbc.MySQLProfile$" @@ -163,7 +163,7 @@ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers with OptionVal | # are delivered downstreams. | max-buffer-size = "10" | - | dao = "akka.persistence.postgres.dao.bytea.readjournal.ByteArrayReadJournalDao" + | dao = "akka.persistence.postgres.query.dao.FlatReadJournalDao" | | tags { | cacheTtl = 12 hours @@ -238,7 +238,7 @@ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers with OptionVal slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None - cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.dao.bytea.journal.FlatJournalDao" + cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.journal.dao.FlatJournalDao" cfg.journalTableConfiguration.tableName shouldBe "journal" cfg.journalTableConfiguration.schemaName shouldBe None @@ -267,7 +267,7 @@ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers with OptionVal slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None - cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.dao.bytea.snapshot.ByteArraySnapshotDao" + cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.snapshot.dao.ByteArraySnapshotDao" cfg.snapshotTableConfiguration.tableName shouldBe "snapshot" cfg.snapshotTableConfiguration.schemaName shouldBe None @@ -284,7 +284,7 @@ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers with OptionVal slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None - cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.dao.bytea.readjournal.ByteArrayReadJournalDao" + cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.query.dao.FlatReadJournalDao" cfg.refreshInterval shouldBe 1.second cfg.maxBufferSize shouldBe 500 @@ -313,7 +313,7 @@ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers with OptionVal slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None - cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.dao.bytea.journal.FlatJournalDao" + cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.journal.dao.FlatJournalDao" cfg.journalTableConfiguration.tableName shouldBe "journal" cfg.journalTableConfiguration.schemaName shouldBe None @@ -343,7 +343,7 @@ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers with OptionVal slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None - cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.dao.bytea.snapshot.ByteArraySnapshotDao" + cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.snapshot.dao.ByteArraySnapshotDao" cfg.snapshotTableConfiguration.tableName shouldBe "snapshot" cfg.snapshotTableConfiguration.schemaName shouldBe None @@ -360,7 +360,7 @@ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers with OptionVal slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None - cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.dao.bytea.readjournal.ByteArrayReadJournalDao" + cfg.pluginConfig.dao shouldBe "akka.persistence.postgres.query.dao.FlatReadJournalDao" cfg.refreshInterval shouldBe 300.millis cfg.maxBufferSize shouldBe 10 From b9034635cd5ebe504e2b1f4351852c40c8761482 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Wed, 2 Aug 2023 13:29:10 +0100 Subject: [PATCH 27/34] Simplify migrations necessary for 0.6.0 --- .../postgres/nested-partitions-schema.sql | 40 ++++--------------- .../schema/postgres/partitioned-schema.sql | 40 ++++--------------- .../schema/postgres/plain-schema.sql | 38 +++--------------- .../journal/PostgresJournalSpec.scala | 40 +++++++++---------- .../query/JournalSequenceActorTest.scala | 6 +-- docs/migration.md | 2 - .../migration/journal/JournalSchema.scala | 28 ++----------- .../postgres/migration/MigrationTest.scala | 3 -- .../1-create-journal-metadata-table.sql | 2 +- ...reate-function-update-journal-metadata.sql | 4 +- ...tion-check-persistence-id-max-sequence.sql | 18 --------- ...gger-check-persistence-id-max-sequence.sql | 19 --------- 12 files changed, 48 insertions(+), 192 deletions(-) delete mode 100644 scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql delete mode 100644 scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index e7eb4ecf..509376a8 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -65,18 +65,16 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); -DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_metadata; -DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); DROP TRIGGER IF EXISTS trig_update_journal_metadata ON public.journal; DROP FUNCTION IF EXISTS public.update_journal_metadata(); DROP TABLE IF EXISTS public.journal_metadata; CREATE TABLE public.journal_metadata( - id BIGINT GENERATED ALWAYS AS IDENTITY, - persistence_id TEXT NOT NULL, + id BIGINT GENERATED ALWAYS AS IDENTITY, max_sequence_number BIGINT NOT NULL, - min_ordering BIGINT NOT NULL, - max_ordering BIGINT NOT NULL, + min_ordering BIGINT NOT NULL, + max_ordering BIGINT NOT NULL, + persistence_id TEXT NOT NULL, PRIMARY KEY (persistence_id) ) PARTITION BY HASH(persistence_id); @@ -91,35 +89,11 @@ BEGIN VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET - max_sequence_number = NEW.sequence_number, - max_ordering = NEW.ordering, + max_sequence_number = GREATEST(public.journal_metadata.max_sequence_number, NEW.sequence_number), + max_ordering = GREATEST(public.journal_metadata.max_ordering, NEW.ordering), min_ordering = LEAST(public.journal_metadata.min_ordering, NEW.ordering); RETURN NEW; END; $$ -LANGUAGE plpgsql; - -CREATE TRIGGER trig_update_journal_metadata - AFTER INSERT ON public.journal - FOR EACH ROW - EXECUTE PROCEDURE public.update_journal_metadata(); - -CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS -$$ -DECLARE -BEGIN - IF NEW.max_sequence_number <= OLD.max_sequence_number THEN - RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; - END IF; - - RETURN NEW; -END; -$$ -LANGUAGE plpgsql; - - -CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON public.journal_metadata - FOR EACH ROW - EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); +LANGUAGE plpgsql; \ No newline at end of file diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index e1877326..b4368ae4 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -66,18 +66,16 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); -DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_metadata; -DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); DROP TRIGGER IF EXISTS trig_update_journal_metadata ON public.journal; DROP FUNCTION IF EXISTS public.update_journal_metadata(); DROP TABLE IF EXISTS public.journal_metadata; CREATE TABLE public.journal_metadata( - id BIGINT GENERATED ALWAYS AS IDENTITY, - persistence_id TEXT NOT NULL, + id BIGINT GENERATED ALWAYS AS IDENTITY, max_sequence_number BIGINT NOT NULL, - min_ordering BIGINT NOT NULL, - max_ordering BIGINT NOT NULL, + min_ordering BIGINT NOT NULL, + max_ordering BIGINT NOT NULL, + persistence_id TEXT NOT NULL, PRIMARY KEY (persistence_id) ) PARTITION BY HASH(persistence_id); @@ -92,35 +90,11 @@ BEGIN VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET - max_sequence_number = NEW.sequence_number, - max_ordering = NEW.ordering, + max_sequence_number = GREATEST(public.journal_metadata.max_sequence_number, NEW.sequence_number), + max_ordering = GREATEST(public.journal_metadata.max_ordering, NEW.ordering), min_ordering = LEAST(public.journal_metadata.min_ordering, NEW.ordering); RETURN NEW; END; $$ -LANGUAGE plpgsql; - -CREATE TRIGGER trig_update_journal_metadata - AFTER INSERT ON public.journal - FOR EACH ROW - EXECUTE PROCEDURE public.update_journal_metadata(); - -CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS -$$ -DECLARE -BEGIN - IF NEW.max_sequence_number <= OLD.max_sequence_number THEN - RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; - END IF; - - RETURN NEW; -END; -$$ -LANGUAGE plpgsql; - - -CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON public.journal_metadata - FOR EACH ROW - EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); +LANGUAGE plpgsql; \ No newline at end of file diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index 7a45c7f1..53e79560 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -39,18 +39,16 @@ CREATE TABLE IF NOT EXISTS public.snapshot PRIMARY KEY (persistence_id, sequence_number) ); -DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON public.journal_metadata; -DROP FUNCTION IF EXISTS public.check_persistence_id_max_sequence_number(); DROP TRIGGER IF EXISTS trig_update_journal_metadata ON public.journal; DROP FUNCTION IF EXISTS public.update_journal_metadata(); DROP TABLE IF EXISTS public.journal_metadata; CREATE TABLE public.journal_metadata( - id BIGINT GENERATED ALWAYS AS IDENTITY, - persistence_id TEXT NOT NULL, + id BIGINT GENERATED ALWAYS AS IDENTITY, max_sequence_number BIGINT NOT NULL, - min_ordering BIGINT NOT NULL, - max_ordering BIGINT NOT NULL, + min_ordering BIGINT NOT NULL, + max_ordering BIGINT NOT NULL, + persistence_id TEXT NOT NULL, PRIMARY KEY (persistence_id) ) PARTITION BY HASH(persistence_id); @@ -65,35 +63,11 @@ BEGIN VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) ON CONFLICT (persistence_id) DO UPDATE SET - max_sequence_number = NEW.sequence_number, - max_ordering = NEW.ordering, + max_sequence_number = GREATEST(public.journal_metadata.max_sequence_number, NEW.sequence_number), + max_ordering = GREATEST(public.journal_metadata.max_ordering, NEW.ordering), min_ordering = LEAST(public.journal_metadata.min_ordering, NEW.ordering); RETURN NEW; END; $$ LANGUAGE plpgsql; - -CREATE TRIGGER trig_update_journal_metadata - AFTER INSERT ON public.journal - FOR EACH ROW - EXECUTE PROCEDURE public.update_journal_metadata(); - -CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS -$$ -DECLARE -BEGIN - IF NEW.max_sequence_number <= OLD.max_sequence_number THEN - RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; - END IF; - - RETURN NEW; -END; -$$ -LANGUAGE plpgsql; - - -CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON public.journal_metadata - FOR EACH ROW - EXECUTE PROCEDURE public.check_persistence_id_max_sequence_number(); diff --git a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala index 6c881c75..888e0103 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala @@ -84,8 +84,9 @@ trait PartitionedJournalSpecTestCases { this: PostgresJournalSpec => "A journal" must { - "store events concurrently for different persistence ids without creating duplicates or gaps among journal ordering (offset)" in { + "store events concurrently without any gaps or duplicates among ordering (offset) values" in { // given + val perId = "perId-1" val numOfSenders = 5 val batchSize = 1000 val senders = List.fill(numOfSenders)(TestProbe()).zipWithIndex @@ -95,35 +96,30 @@ trait PartitionedJournalSpecTestCases { .sequence { senders.map { case (sender, idx) => Future { - writeMessages((idx * batchSize) + 1, (idx + 1) * batchSize, s"perId-${idx + 1}", sender.ref, writerUuid) + writeMessages((idx * batchSize) + 1, (idx + 1) * batchSize, perId, sender.ref, writerUuid) } } } .futureValue(Timeout(Span(1, Minute))) + // then val journalOps = new ScalaPostgresReadJournalOperations(system) - var orderings: IndexedSeq[Long] = IndexedSeq.empty - - senders.foreach { case (_, idx) => - journalOps.withCurrentEventsByPersistenceId()(s"perId-${idx + 1}") { tp => - tp.request(Long.MaxValue) - val replayedMessages = (1 to batchSize).map { _ => - tp.expectNext() - } - tp.expectComplete() - orderings = orderings ++ replayedMessages.map(_.offset).collect { case Sequence(value) => - value - } + journalOps.withCurrentEventsByPersistenceId()(perId) { tp => + tp.request(Long.MaxValue) + val replayedMessages = (1 to batchSize * numOfSenders).map { _ => + tp.expectNext() } - } - - // then - orderings.size should equal(batchSize * numOfSenders) - val minOrd = orderings.min - val maxOrd = orderings.max - val expectedOrderings = (minOrd to maxOrd).toList + tp.expectComplete() + val orderings = replayedMessages.map(_.offset).collect { case Sequence(value) => + value + } + orderings.size should equal(batchSize * numOfSenders) + val minOrd = orderings.min + val maxOrd = orderings.max + val expectedOrderings = (minOrd to maxOrd).toList - (orderings.sorted should contain).theSameElementsInOrderAs(expectedOrderings) + (orderings.sorted should contain).theSameElementsInOrderAs(expectedOrderings) + } } } diff --git a/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala b/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala index 201813d5..201d8437 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/JournalSequenceActorTest.scala @@ -76,7 +76,7 @@ abstract class JournalSequenceActorTest(val schemaType: SchemaType) extends Quer JournalRow(id, deleted = false, "id", id, Array(0.toByte), Nil, emptyJson) } .grouped(10000) - .mapAsync(1) { rows => + .mapAsync(4) { rows => db.run(journalTable.forceInsertAll(rows)) } .runWith(Sink.ignore) @@ -112,7 +112,7 @@ abstract class JournalSequenceActorTest(val schemaType: SchemaType) extends Quer JournalRow(id, deleted = false, "id", id, Array(0.toByte), Nil, emptyJson) } .grouped(10000) - .mapAsync(1) { rows => + .mapAsync(4) { rows => db.run(journalTable.forceInsertAll(rows)) } .runWith(Sink.ignore) @@ -145,7 +145,7 @@ abstract class JournalSequenceActorTest(val schemaType: SchemaType) extends Quer JournalRow(id, deleted = false, "id", id, Array(0.toByte), Nil, emptyJson) } .grouped(10000) - .mapAsync(1) { rows => + .mapAsync(4) { rows => db.run(journalTable.forceInsertAll(rows)) } .runWith(Sink.ignore) diff --git a/docs/migration.md b/docs/migration.md index b6eeabad..df30e7f3 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -69,8 +69,6 @@ Here is the list of sample flyway migration scripts you can use: 1. [create journal_metadata table](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/1-create-journal-metadata-table.sql) 2. [create function to update journal_metadata](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql) 3. [create trigger to update journal_metadata](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/3-create-trigger-update-journal-metadata.sql) -4. [create function to check consistency of max sequence_nr per persistence_id](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql) -5. [create trigger to check consistency of max sequence_nr per persistence_id](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql) ⚠️ Ensure to adapt the top level variables of the scripts to appropriate values that match your journal configuration/setup. diff --git a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala index 2c0a3475..ce06f709 100644 --- a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala +++ b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala @@ -32,11 +32,11 @@ private[journal] trait JournalSchema { import journalMetadataTableCfg.columnNames._ for { _ <- sqlu"""CREATE TABLE #$fullTableName ( - #$id BIGINT GENERATED ALWAYS AS IDENTITY, - #$persistenceId TEXT NOT NULL, + #$id BIGINT GENERATED ALWAYS AS IDENTITY, #$maxSequenceNumber BIGINT NOT NULL, #$maxOrdering BIGINT NOT NULL, #$minOrdering BIGINT NOT NULL, + #$persistenceId TEXT NOT NULL, PRIMARY KEY (#$persistenceId) ) PARTITION BY HASH(#$persistenceId)""" _ <- @@ -113,8 +113,8 @@ private[journal] trait JournalSchema { VALUES (NEW.#$jPersistenceId, NEW.#$sequenceNumber, NEW.#$ordering, NEW.#$ordering) ON CONFLICT (#$persistenceId) DO UPDATE SET - #$maxSequenceNumber = NEW.#$sequenceNumber, - #$maxOrdering = NEW.#$ordering, + #$maxSequenceNumber = GREATEST(#$fullTableName.#$maxSequenceNumber, NEW.#$sequenceNumber), + #$maxOrdering = GREATEST(#$fullTableName.#$maxOrdering, NEW.#$ordering), #$minOrdering = LEAST(#$fullTableName.#$minOrdering, NEW.#$ordering); RETURN NEW; @@ -135,26 +135,6 @@ private[journal] trait JournalSchema { FOR EACH ROW EXECUTE PROCEDURE #$schema.update_journal_metadata(); """ - - _ <- sqlu""" - CREATE OR REPLACE FUNCTION #$schema.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS $$$$ - DECLARE - BEGIN - IF NEW.#$maxSequenceNumber <= OLD.#$maxSequenceNumber THEN - RAISE EXCEPTION 'New max_sequence_number not higher than previous value'; - END IF; - - RETURN NEW; - END; - $$$$ LANGUAGE plpgsql; - """ - - _ <- sqlu""" - CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON #$fullTableName - FOR EACH ROW - EXECUTE PROCEDURE #$schema.check_persistence_id_max_sequence_number(); - """ } yield () } } diff --git a/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala b/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala index 77ef1da8..b2e5e5bf 100644 --- a/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala +++ b/migration/src/test/scala/akka/persistence/postgres/migration/MigrationTest.scala @@ -190,9 +190,6 @@ trait PrepareDatabase extends BeforeAndAfterEach with BeforeAndAfterAll with Sca _ <- sqlu"""DROP TABLE IF EXISTS migration.#$journalTableName""" _ <- sqlu"""DROP TRIGGER IF EXISTS trig_update_journal_metadata ON migration.#$journalTableName""" _ <- sqlu"""DROP FUNCTION IF EXISTS migration.update_journal_metadata()""" - _ <- - sqlu"""DROP TRIGGER IF EXISTS trig_check_persistence_id_max_sequence_number ON migration.#$journalMetadataTableName""" - _ <- sqlu"""DROP FUNCTION IF EXISTS migration.check_persistence_id_max_sequence_number()""" _ <- sqlu"""DROP TABLE IF EXISTS migration.#$journalMetadataTableName""" _ <- sqlu"""CREATE TABLE IF NOT EXISTS migration.#$journalTableName ( diff --git a/scripts/migration-0.6.0/1-create-journal-metadata-table.sql b/scripts/migration-0.6.0/1-create-journal-metadata-table.sql index 660ec641..84777220 100644 --- a/scripts/migration-0.6.0/1-create-journal-metadata-table.sql +++ b/scripts/migration-0.6.0/1-create-journal-metadata-table.sql @@ -23,10 +23,10 @@ BEGIN sql := 'CREATE TABLE IF NOT EXISTS ' || jm_table || '(' || jm_id_column || ' BIGINT GENERATED ALWAYS AS IDENTITY, ' || - jm_persistence_id_column || ' TEXT NOT NULL, ' || jm_max_sequence_number_column || ' BIGINT NOT NULL, ' || jm_max_ordering_column || ' BIGINT NOT NULL, ' || jm_min_ordering_column || ' BIGINT NOT NULL, ' || + jm_persistence_id_column || ' TEXT NOT NULL, ' || 'PRIMARY KEY (' || jm_persistence_id_column || ')' || ') PARTITION BY HASH(' || jm_persistence_id_column || ')'; diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql b/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql index 5532d53e..92ba6bcd 100644 --- a/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql +++ b/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql @@ -26,8 +26,8 @@ BEGIN jm_table := schema || '.' || jm_table_name; cols := jm_persistence_id_column || ', ' || jm_max_sequence_number_column || ', ' || jm_max_ordering_column || ', ' || jm_min_ordering_column; vals := '($1).' || j_persistence_id_column || ', ($1).' || j_sequence_number_column || ', ($1).' || j_ordering_column || ',($1).' || j_ordering_column; - upds := jm_max_sequence_number_column || ' = ($1).' || j_sequence_number_column || ', ' || - jm_max_ordering_column || ' = ($1).' || j_ordering_column || ', ' || + upds := jm_max_sequence_number_column || ' = GREATEST(' || jm_table || '.' || jm_max_sequence_number_column || ', ($1).' || j_sequence_number_column || '), ' || + jm_max_ordering_column || ' = GREATEST(' || jm_table || '.' || jm_max_ordering_column || ', ($1).' || j_ordering_column || '), ' || jm_min_ordering_column || ' = LEAST(' || jm_table || '.' || jm_min_ordering_column || ', ($1).' || j_ordering_column || ')'; sql := 'INSERT INTO ' || jm_table || ' (' || cols || ') VALUES (' || vals || ') ' || diff --git a/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql deleted file mode 100644 index 81337e55..00000000 --- a/scripts/migration-0.6.0/4-create-function-check-persistence-id-max-sequence.sql +++ /dev/null @@ -1,18 +0,0 @@ --- replace schema value if required -CREATE OR REPLACE FUNCTION public.check_persistence_id_max_sequence_number() RETURNS TRIGGER AS -$$ -DECLARE - -- replace with appropriate values - jm_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; - - -- variables - sql TEXT; -BEGIN - sql := 'IF NEW.' || jm_max_sequence_number_column || ' <= OLD.' || jm_max_sequence_number_column || ' THEN - RAISE EXCEPTION ''New max_sequence_number not higher than previous value''; - END IF;'; - - EXECUTE sql USING NEW; - RETURN NEW; -END; -$$ LANGUAGE plpgsql; diff --git a/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql b/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql deleted file mode 100644 index cad3f35d..00000000 --- a/scripts/migration-0.6.0/5-create-trigger-check-persistence-id-max-sequence.sql +++ /dev/null @@ -1,19 +0,0 @@ -DO $$ -DECLARE - -- replace with appropriate values - schema CONSTANT TEXT := 'public'; - jm_table_name CONSTANT TEXT := 'journal_metadata'; - - -- variables - jm_table TEXT; - sql TEXT; -BEGIN - jm_table := schema || '.' || jm_table_name; - - sql := 'CREATE TRIGGER trig_check_persistence_id_max_sequence_number - BEFORE UPDATE ON ' || jm_table || ' FOR EACH ROW - EXECUTE PROCEDURE ' || schema || '.check_persistence_id_max_sequence_number()'; - - EXECUTE sql; -END ; -$$ LANGUAGE plpgsql; From 7d96c33954e20fb1bd0b4b56bd9a19743594bed0 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Fri, 4 Aug 2023 15:46:03 +0100 Subject: [PATCH 28/34] Solve issue of min_ordering on upsert --- .../postgres/nested-partitions-schema.sql | 20 +++- .../schema/postgres/partitioned-schema.sql | 20 +++- .../schema/postgres/plain-schema.sql | 18 +++- .../journal/PostgresJournalSpec.scala | 96 ++++++++++++++++++- docs/migration.md | 4 + .../migration/journal/JournalSchema.scala | 13 ++- ...reate-function-update-journal-metadata.sql | 6 +- .../4-populate-journal-metadata.sql | 39 ++++++++ 8 files changed, 196 insertions(+), 20 deletions(-) create mode 100644 scripts/migration-0.6.0/4-populate-journal-metadata.sql diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index 509376a8..4f2b4dc3 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -86,14 +86,26 @@ $$ DECLARE BEGIN INSERT INTO public.journal_metadata (persistence_id, max_sequence_number, max_ordering, min_ordering) - VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) + VALUES ( + NEW.persistence_id, + NEW.sequence_number, + NEW.ordering, + CASE + WHEN NEW.sequence_number = 1 THEN NEW.ordering + ELSE 0 + END + ) ON CONFLICT (persistence_id) DO UPDATE SET max_sequence_number = GREATEST(public.journal_metadata.max_sequence_number, NEW.sequence_number), - max_ordering = GREATEST(public.journal_metadata.max_ordering, NEW.ordering), - min_ordering = LEAST(public.journal_metadata.min_ordering, NEW.ordering); + max_ordering = GREATEST(public.journal_metadata.max_ordering, NEW.ordering); RETURN NEW; END; $$ -LANGUAGE plpgsql; \ No newline at end of file +LANGUAGE plpgsql; + +CREATE TRIGGER trig_update_journal_metadata + AFTER INSERT ON public.journal + FOR EACH ROW + EXECUTE PROCEDURE public.update_journal_metadata(); \ No newline at end of file diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index b4368ae4..e0ce5741 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -87,14 +87,26 @@ $$ DECLARE BEGIN INSERT INTO public.journal_metadata (persistence_id, max_sequence_number, max_ordering, min_ordering) - VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) + VALUES ( + NEW.persistence_id, + NEW.sequence_number, + NEW.ordering, + CASE + WHEN NEW.sequence_number = 1 THEN NEW.ordering + ELSE 0 + END + ) ON CONFLICT (persistence_id) DO UPDATE SET max_sequence_number = GREATEST(public.journal_metadata.max_sequence_number, NEW.sequence_number), - max_ordering = GREATEST(public.journal_metadata.max_ordering, NEW.ordering), - min_ordering = LEAST(public.journal_metadata.min_ordering, NEW.ordering); + max_ordering = GREATEST(public.journal_metadata.max_ordering, NEW.ordering); RETURN NEW; END; $$ -LANGUAGE plpgsql; \ No newline at end of file +LANGUAGE plpgsql; + +CREATE TRIGGER trig_update_journal_metadata + AFTER INSERT ON public.journal + FOR EACH ROW + EXECUTE PROCEDURE public.update_journal_metadata(); \ No newline at end of file diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index 53e79560..68812d46 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -60,14 +60,26 @@ $$ DECLARE BEGIN INSERT INTO public.journal_metadata (persistence_id, max_sequence_number, max_ordering, min_ordering) - VALUES (NEW.persistence_id, NEW.sequence_number, NEW.ordering, NEW.ordering) + VALUES ( + NEW.persistence_id, + NEW.sequence_number, + NEW.ordering, + CASE + WHEN NEW.sequence_number = 1 THEN NEW.ordering + ELSE 0 + END + ) ON CONFLICT (persistence_id) DO UPDATE SET max_sequence_number = GREATEST(public.journal_metadata.max_sequence_number, NEW.sequence_number), - max_ordering = GREATEST(public.journal_metadata.max_ordering, NEW.ordering), - min_ordering = LEAST(public.journal_metadata.min_ordering, NEW.ordering); + max_ordering = GREATEST(public.journal_metadata.max_ordering, NEW.ordering); RETURN NEW; END; $$ LANGUAGE plpgsql; + +CREATE TRIGGER trig_update_journal_metadata + AFTER INSERT ON public.journal + FOR EACH ROW + EXECUTE PROCEDURE public.update_journal_metadata(); \ No newline at end of file diff --git a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala index 888e0103..9caeff57 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala @@ -5,11 +5,12 @@ package akka.persistence.postgres.journal -import akka.actor.Actor -import akka.persistence.JournalProtocol.{ ReplayedMessage, WriteMessages, WriteMessagesFailed } +import akka.actor.{ Actor, ActorRef } +import akka.persistence.JournalProtocol.{ ReplayedMessage, WriteMessages, WriteMessagesFailed, WriteMessagesSuccessful } import akka.persistence.journal.JournalSpec import akka.persistence.postgres.config._ import akka.persistence.postgres.db.SlickExtension +import akka.persistence.postgres.journal.dao.JournalMetadataTable import akka.persistence.postgres.query.ScalaPostgresReadJournalOperations import akka.persistence.postgres.util.Schema._ import akka.persistence.postgres.util.{ ClasspathResources, DropCreate } @@ -54,6 +55,19 @@ abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) super.afterAll() } + private def writeSingleMessage(seqNr: Int, pid: String, sender: ActorRef, writerUuid: String) = { + val msg = AtomicWrite( + PersistentRepr( + payload = s"a-$seqNr", + sequenceNr = seqNr, + persistenceId = pid, + sender = sender, + writerUuid = writerUuid)) + val probe = TestProbe() + journal ! WriteMessages(List(msg), probe.ref, actorInstanceId) + probe.expectMsg(WriteMessagesSuccessful) + } + "A journal" must { "not allow to store events with sequence number lower than what is already stored for the same persistence id" in { // given @@ -69,7 +83,7 @@ abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) PersistentRepr( payload = s"a-$repeatedSnr", sequenceNr = repeatedSnr, - persistenceId = pid, + persistenceId = perId, sender = sender.ref, writerUuid = writerUuid)) @@ -78,6 +92,82 @@ abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) probe.expectMsgType[WriteMessagesFailed] } } + + "An insert on the journal" must { + import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ + + val metadataTable = JournalMetadataTable(journalConfig.journalMetadataTableConfiguration) + + "automatically insert journal metadata" in { + // given + val perId = "perId-meta-1" + val sender = TestProbe() + + // when + writeSingleMessage(1, perId, sender.ref, writerUuid) + + // then + val metadataExists = db.run(metadataTable.filter(_.persistenceId === perId).exists.result).futureValue + metadataExists shouldBe true + } + + "upsert only max_sequence_number and max_ordering if metadata already exists" in { + // given + val perId = "perId-meta-2" + val sender = TestProbe() + writeSingleMessage(1, perId, sender.ref, writerUuid) + val prevMaxSeqNr = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.maxSequenceNumber).result.head).futureValue + val prevMaxOrdering = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.maxOrdering).result.head).futureValue + val prevMinOrdering = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.minOrdering).result.head).futureValue + + // when + writeSingleMessage(2, perId, sender.ref, writerUuid) + + // then + val newMaxSeqNr = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.maxSequenceNumber).result.head).futureValue + val newMaxOrdering = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.maxOrdering).result.head).futureValue + val newMinOrdering = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.minOrdering).result.head).futureValue + + newMaxSeqNr shouldBe prevMaxSeqNr + 1 + newMaxOrdering shouldBe prevMaxOrdering + 1 + newMinOrdering shouldBe prevMinOrdering + } + + "set min_ordering to 0 when no metadata entry exists but the event being inserted is not the first one for the persistenceId (sequence_number > 1)" in { + // given + val perId = "perId-meta-3" + val sender = TestProbe() + writeSingleMessage(1, perId, sender.ref, writerUuid) + val prevMaxSeqNr = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.maxSequenceNumber).result.head).futureValue + val prevMaxOrdering = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.maxOrdering).result.head).futureValue + + // when + // simulate case where metadata does not exist, but persistenceId already has events + db.run(metadataTable.filter(_.persistenceId === perId).delete).futureValue + // write new event of same persistenceId + writeSingleMessage(2, perId, sender.ref, writerUuid) + + // then + val newMaxSeqNr = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.maxSequenceNumber).result.head).futureValue + val newMaxOrdering = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.maxOrdering).result.head).futureValue + val newMinOrdering = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.minOrdering).result.head).futureValue + + newMaxSeqNr shouldBe prevMaxSeqNr + 1 + newMaxOrdering shouldBe prevMaxOrdering + 1 + newMinOrdering shouldBe 0 + } + } } trait PartitionedJournalSpecTestCases { diff --git a/docs/migration.md b/docs/migration.md index df30e7f3..55c7d397 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -64,11 +64,15 @@ See [sample flyway migration script](https://github.com/SwissBorg/akka-persisten ## Migration from akka-persistence-postgres 0.5.0 to 0.6.0 +TODO IMPROVE THIS SECTION WITH MORE DETAILS. + The new `journal_metadata` table needs to be added, alongside the triggers and functions associated with it. + Here is the list of sample flyway migration scripts you can use: 1. [create journal_metadata table](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/1-create-journal-metadata-table.sql) 2. [create function to update journal_metadata](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql) 3. [create trigger to update journal_metadata](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/3-create-trigger-update-journal-metadata.sql) +4. [populate journal_metadata with past data](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/4-populate-journal-metadata.sql) ⚠️ Ensure to adapt the top level variables of the scripts to appropriate values that match your journal configuration/setup. diff --git a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala index ce06f709..aa8f617d 100644 --- a/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala +++ b/migration/src/main/scala/akka/persistence/postgres/migration/journal/JournalSchema.scala @@ -110,12 +110,19 @@ private[journal] trait JournalSchema { DECLARE BEGIN INSERT INTO #$fullTableName (#$persistenceId, #$maxSequenceNumber, #$maxOrdering, #$minOrdering) - VALUES (NEW.#$jPersistenceId, NEW.#$sequenceNumber, NEW.#$ordering, NEW.#$ordering) + VALUES ( + NEW.#$jPersistenceId, + NEW.#$sequenceNumber, + NEW.#$ordering, + CASE + WHEN NEW.#$sequenceNumber = 1 THEN NEW.#$ordering + ELSE -1 + END + ) ON CONFLICT (#$persistenceId) DO UPDATE SET #$maxSequenceNumber = GREATEST(#$fullTableName.#$maxSequenceNumber, NEW.#$sequenceNumber), - #$maxOrdering = GREATEST(#$fullTableName.#$maxOrdering, NEW.#$ordering), - #$minOrdering = LEAST(#$fullTableName.#$minOrdering, NEW.#$ordering); + #$maxOrdering = GREATEST(#$fullTableName.#$maxOrdering, NEW.#$ordering); RETURN NEW; END; diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql b/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql index 92ba6bcd..1ff0377c 100644 --- a/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql +++ b/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql @@ -25,10 +25,10 @@ BEGIN j_table := schema || '.' || j_table_name; jm_table := schema || '.' || jm_table_name; cols := jm_persistence_id_column || ', ' || jm_max_sequence_number_column || ', ' || jm_max_ordering_column || ', ' || jm_min_ordering_column; - vals := '($1).' || j_persistence_id_column || ', ($1).' || j_sequence_number_column || ', ($1).' || j_ordering_column || ',($1).' || j_ordering_column; + vals := '($1).' || j_persistence_id_column || ', ($1).' || j_sequence_number_column || ', ($1).' || j_ordering_column || + ', CASE WHEN ($1).' || j_sequence_number_column || ' = 1 THEN ($1).' || j_ordering_column || ' ELSE 0 END'; upds := jm_max_sequence_number_column || ' = GREATEST(' || jm_table || '.' || jm_max_sequence_number_column || ', ($1).' || j_sequence_number_column || '), ' || - jm_max_ordering_column || ' = GREATEST(' || jm_table || '.' || jm_max_ordering_column || ', ($1).' || j_ordering_column || '), ' || - jm_min_ordering_column || ' = LEAST(' || jm_table || '.' || jm_min_ordering_column || ', ($1).' || j_ordering_column || ')'; + jm_max_ordering_column || ' = GREATEST(' || jm_table || '.' || jm_max_ordering_column || ', ($1).' || j_ordering_column || ')'; sql := 'INSERT INTO ' || jm_table || ' (' || cols || ') VALUES (' || vals || ') ' || 'ON CONFLICT (' || jm_persistence_id_column || ') DO UPDATE SET ' || upds; diff --git a/scripts/migration-0.6.0/4-populate-journal-metadata.sql b/scripts/migration-0.6.0/4-populate-journal-metadata.sql new file mode 100644 index 00000000..35e6d61f --- /dev/null +++ b/scripts/migration-0.6.0/4-populate-journal-metadata.sql @@ -0,0 +1,39 @@ +/* +ATTENTION: This is a simplistic migration, which is not prepared to handle a large number of rows. +If that is your situation, please consider running some kind of batched ad-hoc program that will read the journal, +compute the necessary values and then insert them to the journal metadata table. + +When you upgrade to the 0.6.x series, the crucial part is adding the metadata insert trigger, which will take care of all new events, +meaning that it is totally safe to solve the back filling of data in a ad-hoc manner. +*/ +DO $$ +DECLARE + -- replace with appropriate values + schema CONSTANT TEXT := 'public'; + j_table_name CONSTANT TEXT := 'journal'; + j_persistence_id_column CONSTANT TEXT := 'persistence_id'; + j_sequence_number_column CONSTANT TEXT := 'sequence_number'; + j_ordering_column CONSTANT TEXT := 'ordering'; + jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; + jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; + jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; + jpi_min_ordering_column CONSTANT TEXT := 'min_ordering'; + + -- variables + j_table TEXT; + jpi_table TEXT; + sql TEXT; +BEGIN + j_table := schema || '.' || j_table_name; + jpi_table := schema || '.' || jpi_table_name; + sql := 'INSERT INTO ' || jpi_table || + ' SELECT ' || + j_persistence_id_column || ', ' || + 'max(' || j_sequence_number_column || '), ' || + 'max(' || j_ordering_column || '), ' || + 'min(' || j_ordering_column || ')' || + ' FROM ' || j_table || ' GROUP BY ' || j_persistence_id_column; + + EXECUTE sql; +END; +$$ LANGUAGE plpgsql; \ No newline at end of file From 43a37722efb76a663f13e709dc450458cc98b4e3 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Mon, 7 Aug 2023 10:18:33 +0100 Subject: [PATCH 29/34] Improve migration documentation --- docs/migration.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/docs/migration.md b/docs/migration.md index 55c7d397..98790765 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -64,11 +64,21 @@ See [sample flyway migration script](https://github.com/SwissBorg/akka-persisten ## Migration from akka-persistence-postgres 0.5.0 to 0.6.0 -TODO IMPROVE THIS SECTION WITH MORE DETAILS. +Version 0.6.0 aims to improve the performance of the query that has the most DB I/O when using this plugin: +```sql +select max("sequence_number") from "journal" where "persistence_id" = ? +``` + +We introduced a new `journal_metadata` table that will be holding key data per persistence id, that will be used to speed up the above query and others (like the one used to replay events). To do this, we are trading off a bit of performance at event write time and the query read time. This impact is caused by the usage of a DB trigger that is executed everytime an insert on the journal happens. +So, for now this table is holding the following information per persistence id: +- max sequence number among all the associated events; +- min and max ordering interval where the events are located within the journal; + +We believe the trade-off is worth it since the impact on write performance is much lower that the gain when read time, observed on these queries that take the most of the DB I/O. -The new `journal_metadata` table needs to be added, alongside the triggers and functions associated with it. +Below is the list of sample flyway migration scripts you can use to add this new table and associated triggers. +⚠️ The last one of them is a simplistic data migration to populate the new table. However, if your data size is big consider using a more lazy ad-hoc alternative that does batch reads from the journal and inserts the missing data. The trigger you will be adding is idempotent, so it is safe to re-process some events when the ad-hoc job is catching up to present date events. -Here is the list of sample flyway migration scripts you can use: 1. [create journal_metadata table](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/1-create-journal-metadata-table.sql) 2. [create function to update journal_metadata](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql) 3. [create trigger to update journal_metadata](https://github.com/SwissBorg/akka-persistence-postgres/blob/master/scripts/migration-0.6.0/3-create-trigger-update-journal-metadata.sql) @@ -76,7 +86,7 @@ Here is the list of sample flyway migration scripts you can use: ⚠️ Ensure to adapt the top level variables of the scripts to appropriate values that match your journal configuration/setup. -This new table is used to improve the performance of specific queries. However, its usage is not enabled by default, so the previous (v0.5.0) behaviour is kept. +Keep in mind that the usage of the new table by the queries is not enabled by default, so the previous (v0.5.0) behaviour is kept. In order to make use of it you need to specify it through the configuration of your journal: ```hocon From a70d62796ddc2591729acec29d2e70c962023a54 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Mon, 7 Aug 2023 13:31:11 +0100 Subject: [PATCH 30/34] Use -1 instead of 0 as min_ordering default value --- .../resources/schema/postgres/nested-partitions-schema.sql | 2 +- .../src/test/resources/schema/postgres/partitioned-schema.sql | 2 +- core/src/test/resources/schema/postgres/plain-schema.sql | 2 +- .../persistence/postgres/journal/PostgresJournalSpec.scala | 4 ++-- .../2-create-function-update-journal-metadata.sql | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql index 4f2b4dc3..affb6c4d 100644 --- a/core/src/test/resources/schema/postgres/nested-partitions-schema.sql +++ b/core/src/test/resources/schema/postgres/nested-partitions-schema.sql @@ -92,7 +92,7 @@ BEGIN NEW.ordering, CASE WHEN NEW.sequence_number = 1 THEN NEW.ordering - ELSE 0 + ELSE -1 END ) ON CONFLICT (persistence_id) DO UPDATE diff --git a/core/src/test/resources/schema/postgres/partitioned-schema.sql b/core/src/test/resources/schema/postgres/partitioned-schema.sql index e0ce5741..621dc00b 100644 --- a/core/src/test/resources/schema/postgres/partitioned-schema.sql +++ b/core/src/test/resources/schema/postgres/partitioned-schema.sql @@ -93,7 +93,7 @@ BEGIN NEW.ordering, CASE WHEN NEW.sequence_number = 1 THEN NEW.ordering - ELSE 0 + ELSE -1 END ) ON CONFLICT (persistence_id) DO UPDATE diff --git a/core/src/test/resources/schema/postgres/plain-schema.sql b/core/src/test/resources/schema/postgres/plain-schema.sql index 68812d46..cd1105a6 100644 --- a/core/src/test/resources/schema/postgres/plain-schema.sql +++ b/core/src/test/resources/schema/postgres/plain-schema.sql @@ -66,7 +66,7 @@ BEGIN NEW.ordering, CASE WHEN NEW.sequence_number = 1 THEN NEW.ordering - ELSE 0 + ELSE -1 END ) ON CONFLICT (persistence_id) DO UPDATE diff --git a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala index 9caeff57..fb958767 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala @@ -139,7 +139,7 @@ abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) newMinOrdering shouldBe prevMinOrdering } - "set min_ordering to 0 when no metadata entry exists but the event being inserted is not the first one for the persistenceId (sequence_number > 1)" in { + "set min_ordering to -1 when no metadata entry exists but the event being inserted is not the first one for the persistenceId (sequence_number > 1)" in { // given val perId = "perId-meta-3" val sender = TestProbe() @@ -165,7 +165,7 @@ abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) newMaxSeqNr shouldBe prevMaxSeqNr + 1 newMaxOrdering shouldBe prevMaxOrdering + 1 - newMinOrdering shouldBe 0 + newMinOrdering shouldBe -1 } } } diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql b/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql index 1ff0377c..159b2e4f 100644 --- a/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql +++ b/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql @@ -26,7 +26,7 @@ BEGIN jm_table := schema || '.' || jm_table_name; cols := jm_persistence_id_column || ', ' || jm_max_sequence_number_column || ', ' || jm_max_ordering_column || ', ' || jm_min_ordering_column; vals := '($1).' || j_persistence_id_column || ', ($1).' || j_sequence_number_column || ', ($1).' || j_ordering_column || - ', CASE WHEN ($1).' || j_sequence_number_column || ' = 1 THEN ($1).' || j_ordering_column || ' ELSE 0 END'; + ', CASE WHEN ($1).' || j_sequence_number_column || ' = 1 THEN ($1).' || j_ordering_column || ' ELSE -1 END'; upds := jm_max_sequence_number_column || ' = GREATEST(' || jm_table || '.' || jm_max_sequence_number_column || ', ($1).' || j_sequence_number_column || '), ' || jm_max_ordering_column || ' = GREATEST(' || jm_table || '.' || jm_max_ordering_column || ', ($1).' || j_ordering_column || ')'; From 9631cea47e60424ec1e6e1da7eb31fbbab516077 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Tue, 8 Aug 2023 19:32:44 +0100 Subject: [PATCH 31/34] Add test cases suggested on review --- .../journal/PostgresJournalSpec.scala | 20 +++++++++++++++---- ...reate-function-update-journal-metadata.sql | 4 +++- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala index fb958767..a707380e 100644 --- a/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala +++ b/core/src/test/scala/akka/persistence/postgres/journal/PostgresJournalSpec.scala @@ -97,18 +97,29 @@ abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ val metadataTable = JournalMetadataTable(journalConfig.journalMetadataTableConfiguration) + val UNSET_MIN_ORDERING = -1 "automatically insert journal metadata" in { // given val perId = "perId-meta-1" val sender = TestProbe() + val prevMetadataExists = db.run(metadataTable.filter(_.persistenceId === perId).exists.result).futureValue // when writeSingleMessage(1, perId, sender.ref, writerUuid) // then - val metadataExists = db.run(metadataTable.filter(_.persistenceId === perId).exists.result).futureValue - metadataExists shouldBe true + val newMetadataExists = db.run(metadataTable.filter(_.persistenceId === perId).exists.result).futureValue + val maxOrdering = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.maxOrdering).result.head).futureValue + val minOrdering = + db.run(metadataTable.filter(_.persistenceId === perId).map(_.minOrdering).result.head).futureValue + + prevMetadataExists shouldBe false + newMetadataExists shouldBe true + // when its the first event the insert should take the ordering value and set that on the min and max_ordering columns + maxOrdering shouldBe minOrdering + minOrdering > 0 shouldBe true } "upsert only max_sequence_number and max_ordering if metadata already exists" in { @@ -137,9 +148,10 @@ abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) newMaxSeqNr shouldBe prevMaxSeqNr + 1 newMaxOrdering shouldBe prevMaxOrdering + 1 newMinOrdering shouldBe prevMinOrdering + newMaxOrdering > 0 shouldBe true } - "set min_ordering to -1 when no metadata entry exists but the event being inserted is not the first one for the persistenceId (sequence_number > 1)" in { + "set min_ordering to UNSET_MIN_ORDERING when no metadata entry exists but the event being inserted is not the first one for the persistenceId (sequence_number > 1)" in { // given val perId = "perId-meta-3" val sender = TestProbe() @@ -165,7 +177,7 @@ abstract class PostgresJournalSpec(config: String, schemaType: SchemaType) newMaxSeqNr shouldBe prevMaxSeqNr + 1 newMaxOrdering shouldBe prevMaxOrdering + 1 - newMinOrdering shouldBe -1 + newMinOrdering shouldBe UNSET_MIN_ORDERING } } } diff --git a/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql b/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql index 159b2e4f..754219f0 100644 --- a/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql +++ b/scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql @@ -13,6 +13,8 @@ DECLARE jm_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; jm_max_ordering_column CONSTANT TEXT := 'max_ordering'; jm_min_ordering_column CONSTANT TEXT := 'min_ordering'; + first_sequence_number_value CONSTANT INTEGER := 1; + unset_min_ordering_value CONSTANT INTEGER := -1; -- variables j_table TEXT; @@ -26,7 +28,7 @@ BEGIN jm_table := schema || '.' || jm_table_name; cols := jm_persistence_id_column || ', ' || jm_max_sequence_number_column || ', ' || jm_max_ordering_column || ', ' || jm_min_ordering_column; vals := '($1).' || j_persistence_id_column || ', ($1).' || j_sequence_number_column || ', ($1).' || j_ordering_column || - ', CASE WHEN ($1).' || j_sequence_number_column || ' = 1 THEN ($1).' || j_ordering_column || ' ELSE -1 END'; + ', CASE WHEN ($1).' || j_sequence_number_column || ' = ' || first_sequence_number_value || ' THEN ($1).' || j_ordering_column || ' ELSE ' || unset_min_ordering_value || ' END'; upds := jm_max_sequence_number_column || ' = GREATEST(' || jm_table || '.' || jm_max_sequence_number_column || ', ($1).' || j_sequence_number_column || '), ' || jm_max_ordering_column || ' = GREATEST(' || jm_table || '.' || jm_max_ordering_column || ', ($1).' || j_ordering_column || ')'; From 8debe8b9512314b8b204c75f551023d6f13dfabc Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Tue, 29 Aug 2023 15:47:25 +0100 Subject: [PATCH 32/34] Remove redundant filter clause --- .../akka/persistence/postgres/query/dao/ReadJournalQueries.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala index 958b449f..76962b1b 100644 --- a/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala +++ b/core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala @@ -42,7 +42,6 @@ class ReadJournalQueries(journalTable: TableQuery[JournalTable], includeDeleted: maxOrdering: Rep[Long]): Query[JournalTable, JournalRow, Seq] = baseTableQuery() .filter(_.persistenceId === persistenceId) - .filter(_.deleted === false) .filter(_.sequenceNumber >= fromSequenceNr) .filter(_.sequenceNumber <= toSequenceNr) .filter(_.ordering >= minOrdering) From 9bd44bf78f292a95a9532e20a42713d5baa94762 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Tue, 29 Aug 2023 16:14:22 +0100 Subject: [PATCH 33/34] Fix test --- .../persistence/postgres/query/dao/ReadJournalQueriesTest.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala index ec98dcaf..b5f58687 100644 --- a/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala +++ b/core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala @@ -24,7 +24,7 @@ class ReadJournalQueriesTest extends BaseQueryTest { 4L, 5L, 1L, - 10L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ((((("persistence_id" = ?) and ("deleted" = false)) and ("sequence_number" >= ?)) and ("sequence_number" <= ?)) and ("ordering" >= ?)) and ("ordering" <= ?) order by "sequence_number" limit ?""" + 10L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where (((("persistence_id" = ?) and ("sequence_number" >= ?)) and ("sequence_number" <= ?)) and ("ordering" >= ?)) and ("ordering" <= ?) order by "sequence_number" limit ?""" } it should "create SQL query for eventsByTag" in withReadJournalQueries { queries => From 513e3ca121c7fbc2264568975be33bc884d12989 Mon Sep 17 00:00:00 2001 From: Tiago Mota Date: Tue, 29 Aug 2023 16:14:37 +0100 Subject: [PATCH 34/34] Prepare 0.6.0-RC1 release --- README.md | 4 ++-- docs/index.md | 4 ++-- docs/migration.md | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index c0eb6e15..5612bd52 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ You can read more about DAOs and schema variants in [the official documentation] To use `akka-persistence-postgres` in your SBT project, add the following to your `build.sbt`: ```scala -libraryDependencies += "com.swissborg" %% "akka-persistence-postgres" % "0.5.0" +libraryDependencies += "com.swissborg" %% "akka-persistence-postgres" % "0.6.0-RC1" ``` For a maven project add: @@ -29,7 +29,7 @@ For a maven project add: com.swissborg akka-persistence-postgres_2.13 - 0.5.0 + 0.6.0-RC1 ``` to your `pom.xml`. diff --git a/docs/index.md b/docs/index.md index 6e8c3ebb..9f9d17a6 100644 --- a/docs/index.md +++ b/docs/index.md @@ -18,7 +18,7 @@ The main goal is to keep index size and memory consumption on a moderate level w To use `akka-persistence-postgres` in your SBT project, add the following to your `build.sbt`: ```scala -libraryDependencies += "com.swisborg" %% "akka-persistence-postgres" % "0.5.0" +libraryDependencies += "com.swisborg" %% "akka-persistence-postgres" % "0.6.0-RC1" ``` For a maven project add: @@ -26,7 +26,7 @@ For a maven project add: com.swisborg akka-persistence-postgres_2.13 - 0.5.0 + 0.6.0-RC1 ``` to your `pom.xml`. diff --git a/docs/migration.md b/docs/migration.md index 98790765..2886efe6 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -22,14 +22,14 @@ We provide you with an optional artifact, `akka-persistence-postgres-migration` #### Add akka-persistence-migration to your project Add the following to your `build.sbt` ``` -libraryDependencies += "com.swissborg" %% "akka-persistence-postgres-migration" % "0.5.0" +libraryDependencies += "com.swissborg" %% "akka-persistence-postgres-migration" % "0.6.0-RC1" ``` For a maven project add: ```xml com.swisborg akka-persistence-postgres-migration_2.13 - 0.5.0 + 0.6.0-RC1 ``` to your `pom.xml`.