Repository: dnvriend/akka-persistence-jdbc Branch: main Commit: e2f37890c1dc Files: 271 Total size: 800.0 KB Directory structure: gitextract_t3d9jgyh/ ├── .fossa.yml ├── .github/ │ └── workflows/ │ ├── checks.yml │ ├── fossa.yml │ ├── link-validator.yml │ ├── release.yml │ ├── test.yml │ └── weekly.yml ├── .gitignore ├── .sbtopts ├── .scala-steward.conf ├── .scalafmt.conf ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── RELEASING.md ├── build.sbt ├── core/ │ ├── lib/ │ │ └── ojdbc6.jar │ └── src/ │ ├── main/ │ │ ├── mima-filters/ │ │ │ ├── 3.5.3.backwards.excludes/ │ │ │ │ ├── issue-322-messagesWithBatch.excludes │ │ │ │ └── issue-91-ordering-offset.excludes │ │ │ ├── 4.x.x.backwards.excludes/ │ │ │ │ └── pr-401-highest-seq-nr.excludes │ │ │ ├── 5.0.1.backwards.excludes/ │ │ │ │ └── pr-570-akka-serialization.excludes │ │ │ ├── 5.0.2.backwards.excludes/ │ │ │ │ └── issue-585-performance-regression.excludes │ │ │ ├── 5.1.0.backwards.excludes/ │ │ │ │ └── issue-557-logical-delete.excludes │ │ │ ├── 5.4.0.backwards.excludes/ │ │ │ │ ├── issue-710-tag-fk.excludes │ │ │ │ └── issue-775-slick-3.50.excludes │ │ │ ├── 5.5.0.backwards.excludes/ │ │ │ │ └── issue-891-durable-store.excludes │ │ │ └── 5.5.2.backwards.excludes/ │ │ │ └── pr-928-cleanup-tool.excludes │ │ ├── resources/ │ │ │ ├── reference.conf │ │ │ └── schema/ │ │ │ ├── h2/ │ │ │ │ ├── h2-create-schema-legacy.sql │ │ │ │ ├── h2-create-schema.sql │ │ │ │ ├── h2-drop-schema-legacy.sql │ │ │ │ └── h2-drop-schema.sql │ │ │ ├── mysql/ │ │ │ │ ├── mysql-create-schema-legacy.sql │ │ │ │ ├── mysql-create-schema.sql │ │ │ │ ├── mysql-drop-schema-legacy.sql │ │ │ │ ├── mysql-drop-schema.sql │ │ │ │ └── mysql-event-tag-migration.sql │ │ │ ├── oracle/ │ │ │ │ ├── oracle-create-schema-legacy.sql │ │ │ │ ├── oracle-create-schema.sql │ │ │ │ ├── oracle-drop-schema-legacy.sql │ │ │ │ ├── oracle-drop-schema.sql │ │ │ │ └── oracle-event-tag-migration.sql │ │ │ ├── postgres/ │ │ │ │ ├── postgres-create-schema-legacy.sql │ │ │ │ ├── postgres-create-schema.sql │ │ │ │ ├── postgres-drop-schema-legacy.sql │ │ │ │ ├── postgres-drop-schema.sql │ │ │ │ └── postgres-event-tag-migration.sql │ │ │ └── sqlserver/ │ │ │ ├── sqlserver-create-schema-legacy.sql │ │ │ ├── sqlserver-create-schema-varchar.sql │ │ │ ├── sqlserver-create-schema.sql │ │ │ ├── sqlserver-drop-schema-legacy.sql │ │ │ ├── sqlserver-drop-schema.sql │ │ │ └── sqlserver-event-tag-migration.sql │ │ └── scala/ │ │ └── akka/ │ │ └── persistence/ │ │ └── jdbc/ │ │ ├── AkkaSerialization.scala │ │ ├── JournalRow.scala │ │ ├── cleanup/ │ │ │ ├── javadsl/ │ │ │ │ └── EventSourcedCleanup.scala │ │ │ └── scaladsl/ │ │ │ └── EventSourcedCleanup.scala │ │ ├── config/ │ │ │ └── AkkaPersistenceConfig.scala │ │ ├── db/ │ │ │ ├── SlickDatabase.scala │ │ │ └── SlickExtension.scala │ │ ├── journal/ │ │ │ ├── JdbcAsyncWriteJournal.scala │ │ │ └── dao/ │ │ │ ├── BaseDao.scala │ │ │ ├── BaseJournalDaoWithReadMessages.scala │ │ │ ├── DefaultJournalDao.scala │ │ │ ├── FlowControl.scala │ │ │ ├── H2Compat.scala │ │ │ ├── JournalDao.scala │ │ │ ├── JournalDaoInstantiation.scala │ │ │ ├── JournalDaoWithReadMessages.scala │ │ │ ├── JournalDaoWithUpdates.scala │ │ │ ├── JournalQueries.scala │ │ │ ├── JournalTables.scala │ │ │ └── legacy/ │ │ │ ├── ByteArrayJournalDao.scala │ │ │ ├── ByteArrayJournalSerializer.scala │ │ │ ├── JournalQueries.scala │ │ │ ├── JournalTables.scala │ │ │ └── package.scala │ │ ├── query/ │ │ │ ├── JdbcReadJournalProvider.scala │ │ │ ├── JournalSequenceActor.scala │ │ │ ├── dao/ │ │ │ │ ├── DefaultReadJournalDao.scala │ │ │ │ ├── ReadJournalDao.scala │ │ │ │ ├── ReadJournalQueries.scala │ │ │ │ └── legacy/ │ │ │ │ ├── ByteArrayReadJournalDao.scala │ │ │ │ └── ReadJournalQueries.scala │ │ │ ├── javadsl/ │ │ │ │ └── JdbcReadJournal.scala │ │ │ ├── package.scala │ │ │ └── scaladsl/ │ │ │ └── JdbcReadJournal.scala │ │ ├── serialization/ │ │ │ ├── PersistentReprSerializer.scala │ │ │ └── SnapshotSerializer.scala │ │ ├── snapshot/ │ │ │ ├── JdbcSnapshotStore.scala │ │ │ └── dao/ │ │ │ ├── DefaultSnapshotDao.scala │ │ │ ├── SnapshotDao.scala │ │ │ ├── SnapshotDaoInstantiation.scala │ │ │ ├── SnapshotQueries.scala │ │ │ ├── SnapshotTables.scala │ │ │ └── legacy/ │ │ │ ├── ByteArraySnapshotDao.scala │ │ │ ├── ByteArraySnapshotSerializer.scala │ │ │ ├── SnapshotQueries.scala │ │ │ └── SnapshotTables.scala │ │ ├── state/ │ │ │ ├── DurableStateQueries.scala │ │ │ ├── DurableStateTables.scala │ │ │ ├── JdbcDurableStateStoreProvider.scala │ │ │ ├── OffsetOps.scala │ │ │ ├── SequenceNextValUpdater.scala │ │ │ ├── javadsl/ │ │ │ │ └── JdbcDurableStateStore.scala │ │ │ └── scaladsl/ │ │ │ ├── DurableStateSequenceActor.scala │ │ │ └── JdbcDurableStateStore.scala │ │ ├── testkit/ │ │ │ ├── internal/ │ │ │ │ ├── SchemaType.scala │ │ │ │ └── SchemaUtilsImpl.scala │ │ │ ├── javadsl/ │ │ │ │ └── SchemaUtils.scala │ │ │ └── scaladsl/ │ │ │ └── SchemaUtils.scala │ │ └── util/ │ │ ├── BlockingOps.scala │ │ ├── ByteArrayOps.scala │ │ ├── ConfigOps.scala │ │ ├── InputStreamOps.scala │ │ ├── PluginVersionChecker.scala │ │ ├── StringOps.scala │ │ └── TrySeq.scala │ └── test/ │ ├── LICENSE │ ├── java/ │ │ └── akka/ │ │ └── persistence/ │ │ └── jdbc/ │ │ ├── JavadslSnippets.java │ │ └── state/ │ │ └── JavadslSnippets.java │ ├── resources/ │ │ ├── general.conf │ │ ├── h2-application.conf │ │ ├── h2-default-mode-application.conf │ │ ├── h2-shared-db-application.conf │ │ ├── h2-two-read-journals-application.conf │ │ ├── jndi-application.conf │ │ ├── jndi-shared-db-application.conf │ │ ├── logback-test.xml │ │ ├── mysql-application.conf │ │ ├── mysql-shared-db-application.conf │ │ ├── oracle-application.conf │ │ ├── oracle-schema-overrides.conf │ │ ├── oracle-shared-db-application.conf │ │ ├── postgres-application.conf │ │ ├── postgres-shared-db-application.conf │ │ ├── sqlserver-application.conf │ │ └── sqlserver-shared-db-application.conf │ └── scala/ │ └── akka/ │ └── persistence/ │ └── jdbc/ │ ├── ScaladslSnippets.scala │ ├── SharedActorSystemTestSpec.scala │ ├── SimpleSpec.scala │ ├── SingleActorSystemPerTestSpec.scala │ ├── TablesTestSpec.scala │ ├── cleanup/ │ │ └── scaladsl/ │ │ └── EventSourcedCleanupTest.scala │ ├── configuration/ │ │ ├── AkkaPersistenceConfigTest.scala │ │ ├── ConfigOpsTest.scala │ │ └── JNDIConfigTest.scala │ ├── journal/ │ │ ├── JdbcJournalPerfSpec.scala │ │ ├── JdbcJournalSpec.scala │ │ └── dao/ │ │ ├── ByteArrayJournalSerializerTest.scala │ │ ├── JournalTablesTest.scala │ │ ├── TagsSerializationTest.scala │ │ └── TrySeqTest.scala │ ├── query/ │ │ ├── AllPersistenceIdsTest.scala │ │ ├── CurrentEventsByPersistenceIdTest.scala │ │ ├── CurrentEventsByTagTest.scala │ │ ├── CurrentPersistenceIdsTest.scala │ │ ├── EventAdapterTest.scala │ │ ├── EventsByPersistenceIdTest.scala │ │ ├── EventsByTagMigrationTest.scala │ │ ├── EventsByTagTest.scala │ │ ├── EventsByUnfrequentTagTest.scala │ │ ├── HardDeleteQueryTest.scala │ │ ├── JournalDaoStreamMessagesMemoryTest.scala │ │ ├── JournalSequenceActorTest.scala │ │ ├── MultipleReadJournalTest.scala │ │ ├── QueryTestSpec.scala │ │ ├── TaggingEventAdapter.scala │ │ └── dao/ │ │ ├── ReadJournalTablesTest.scala │ │ └── TestProbeReadJournalDao.scala │ ├── serialization/ │ │ └── StoreOnlySerializableMessagesTest.scala │ ├── snapshot/ │ │ ├── JdbcSnapshotStoreSpec.scala │ │ └── dao/ │ │ └── legacy/ │ │ └── SnapshotTablesTest.scala │ ├── state/ │ │ ├── Payloads.scala │ │ ├── ScaladslSnippets.scala │ │ └── scaladsl/ │ │ ├── DataGenerationHelper.scala │ │ ├── DurableStateSequenceActorTest.scala │ │ ├── DurableStateStorePluginSpec.scala │ │ ├── JdbcDurableStateSpec.scala │ │ ├── StateSpecBase.scala │ │ └── TestProbeDurableStateStoreQuery.scala │ └── util/ │ ├── ClasspathResources.scala │ └── DropCreate.scala ├── doc/ │ └── deadlock.md ├── docs/ │ ├── LICENSE │ ├── release-train-issue-template.md │ └── src/ │ └── main/ │ └── paradox/ │ ├── _template/ │ │ └── projectSpecificFooter.st │ ├── assets/ │ │ └── js/ │ │ └── warnOldVersion.js │ ├── configuration.md │ ├── custom-dao.md │ ├── durable-state-store.md │ ├── index.md │ ├── migration.md │ ├── overview.md │ ├── query.md │ └── snapshots.md ├── integration/ │ ├── LICENSE │ └── src/ │ └── test/ │ └── scala/ │ └── akka/ │ └── persistence/ │ └── jdbc/ │ └── integration/ │ ├── AllPersistenceIdsTest.scala │ ├── CurrentEventsByPersistenceIdTest.scala │ ├── CurrentEventsByTagTest.scala │ ├── CurrentPersistenceIdsTest.scala │ ├── EventAdapterTest.scala │ ├── EventSourcedCleanupTest.scala │ ├── EventsByPersistenceIdTest.scala │ ├── EventsByTagMigrationTest.scala │ ├── EventsByTagTest.scala │ ├── HardDeleteQueryTest.scala │ ├── JdbcJournalPerfSpec.scala │ ├── JdbcJournalSpec.scala │ ├── JdbcSnapshotStoreSpec.scala │ ├── JournalDaoStreamMessagesMemoryTest.scala │ ├── JournalSequenceActorTest.scala │ ├── PostgresDurableStateStorePluginSpec.scala │ ├── PostgresScalaJdbcDurableStateChangesByTagTest.scala │ └── StoreOnlySerializableMessagesTest.scala ├── migrator/ │ └── src/ │ ├── main/ │ │ └── scala/ │ │ └── akka/ │ │ └── persistence/ │ │ └── jdbc/ │ │ └── migrator/ │ │ ├── JournalMigrator.scala │ │ └── SnapshotMigrator.scala │ └── test/ │ ├── LICENSE │ ├── resources/ │ │ ├── general.conf │ │ ├── h2-application.conf │ │ ├── mysql-application.conf │ │ ├── oracle-application.conf │ │ ├── postgres-application.conf │ │ ├── schema/ │ │ │ ├── h2/ │ │ │ │ ├── h2-create-schema-legacy.sql │ │ │ │ ├── h2-create-schema.sql │ │ │ │ ├── h2-drop-schema-legacy.sql │ │ │ │ └── h2-drop-schema.sql │ │ │ ├── mysql/ │ │ │ │ ├── mysql-create-schema-legacy.sql │ │ │ │ ├── mysql-create-schema.sql │ │ │ │ ├── mysql-drop-schema-legacy.sql │ │ │ │ └── mysql-drop-schema.sql │ │ │ ├── oracle/ │ │ │ │ ├── oracle-create-schema-legacy.sql │ │ │ │ ├── oracle-create-schema.sql │ │ │ │ ├── oracle-drop-schema-legacy.sql │ │ │ │ └── oracle-drop-schema.sql │ │ │ ├── postgres/ │ │ │ │ ├── postgres-create-schema-legacy.sql │ │ │ │ ├── postgres-create-schema.sql │ │ │ │ ├── postgres-drop-schema-legacy.sql │ │ │ │ └── postgres-drop-schema.sql │ │ │ └── sqlserver/ │ │ │ ├── sqlserver-create-schema-legacy.sql │ │ │ ├── sqlserver-create-schema.sql │ │ │ ├── sqlserver-drop-schema-legacy.sql │ │ │ └── sqlserver-drop-schema.sql │ │ └── sqlserver-application.conf │ └── scala/ │ └── akka/ │ └── persistence/ │ └── jdbc/ │ └── migrator/ │ ├── JournalMigratorTest.scala │ ├── MigratorSpec.scala │ └── SnapshotMigratorTest.scala ├── migrator-integration/ │ ├── LICENSE │ └── src/ │ └── test/ │ └── scala/ │ └── akka/ │ └── persistence/ │ └── jdbc/ │ └── migrator/ │ └── integration/ │ ├── JournalMigratorTest.scala │ └── SnapshotMigratorTest.scala ├── project/ │ ├── AutomaticModuleName.scala │ ├── Dependencies.scala │ ├── IntegrationTests.scala │ ├── ProjectAutoPlugin.scala │ ├── Publish.scala │ ├── build.properties │ ├── plugins.sbt │ └── project-info.conf └── scripts/ ├── cat-log.sh ├── create-release-issue.sh ├── docker-compose.yml ├── launch-all.sh ├── launch-mysql.sh ├── launch-oracle.sh ├── launch-postgres.sh ├── launch-sqlserver.sh ├── link-validator.conf ├── mysql-cli.sh ├── oracle-cli.sh ├── psql-cli.sh └── sqlserver-cli.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .fossa.yml ================================================ version: 3 # https://github.com/fossas/fossa-cli/blob/master/docs/references/files/fossa-yml.md paths: exclude: - ./integration - ./migrator-integration ================================================ FILE: .github/workflows/checks.yml ================================================ name: Basic checks on: pull_request: push: branches: - main tags-ignore: [ v.* ] permissions: contents: read jobs: check-code-style: name: Check Code Style runs-on: Akka-Default steps: - name: Checkout # https://github.com/actions/checkout/releases uses: actions/checkout@v6 with: # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves fetch-depth: 0 - name: Checkout GitHub merge if: github.event.pull_request run: |- git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch git checkout scratch - name: Cache Coursier cache # https://github.com/coursier/cache-action/releases uses: coursier/cache-action@v8.1.0 - name: Set up JDK 11 # https://github.com/coursier/setup-action/releases uses: coursier/setup-action@v3.0.0 with: jvm: temurin:1.11.0 - name: Run akka/github-actions-scripts uses: akka/github-actions-scripts/setup_global_resolver@main - name: Code style check and binary-compatibility check # Run locally with: sbt 'verifyCodeStyle ; mimaReportBinaryIssues' run: sbt "; verifyCodeStyle; mimaReportBinaryIssues" check-code-compilation: name: Check Code Compilation runs-on: Akka-Default steps: - name: Checkout # https://github.com/actions/checkout/releases uses: actions/checkout@v6 with: fetch-depth: 0 - name: Checkout GitHub merge if: github.event.pull_request run: |- git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch git checkout scratch - name: Cache Coursier cache # https://github.com/coursier/cache-action/releases uses: coursier/cache-action@v8.1.0 - name: Set up JDK 11 # https://github.com/coursier/setup-action/releases uses: coursier/setup-action@v3.0.0 with: jvm: temurin:1.11.0 - name: Run akka/github-actions-scripts uses: akka/github-actions-scripts/setup_global_resolver@main - name: Compile all code with fatal warnings for Java 11 and Scala 2.13 # Run locally with: sbt 'clean ; +Test/compile ; +It/compile' run: sbt "; Test/compile" - name: Compile all code with Scala 3.3 run: sbt "++3.3; Test/compile" check-docs: name: Check Docs runs-on: Akka-Default steps: - name: Checkout # https://github.com/actions/checkout/releases uses: actions/checkout@v6 with: fetch-depth: 0 - name: Checkout GitHub merge if: github.event.pull_request run: |- git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch git checkout scratch - name: Cache Coursier cache # https://github.com/coursier/cache-action/releases uses: coursier/cache-action@v8.1.0 - name: Set up JDK 11 # https://github.com/coursier/setup-action/releases uses: coursier/setup-action@v3.0.0 with: jvm: temurin:1.11.0 - name: Run akka/github-actions-scripts uses: akka/github-actions-scripts/setup_global_resolver@main - name: Create all API docs for artifacts/website and all reference docs run: sbt docs/paradox ================================================ FILE: .github/workflows/fossa.yml ================================================ name: Dependency License Scanning on: workflow_dispatch: schedule: - cron: '0 0 * * 0' # At 00:00 on Sunday permissions: contents: read jobs: fossa: name: Fossa runs-on: Akka-Default if: github.repository == 'akka/akka-persistence-jdbc' steps: - name: Checkout # https://github.com/actions/checkout/releases uses: actions/checkout@v6 with: # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves fetch-depth: 0 - name: Cache Coursier cache # https://github.com/coursier/cache-action/releases uses: coursier/cache-action@v8.1.0 - name: Set up JDK 11 # https://github.com/coursier/setup-action/releases uses: coursier/setup-action@v3.0.0 with: jvm: temurin:1.11.0 - name: Run akka/github-actions-scripts uses: akka/github-actions-scripts/setup_global_resolver@main - name: FOSSA policy check run: |- curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash fossa analyze && fossa test env: FOSSA_API_KEY: "${{secrets.FOSSA_API_KEY}}" ================================================ FILE: .github/workflows/link-validator.yml ================================================ name: Link Validator on: workflow_dispatch: pull_request: schedule: - cron: '40 6 1 * *' permissions: contents: read jobs: validate-links: runs-on: Akka-Default if: github.repository == 'akka/akka-persistence-jdbc' steps: - name: Checkout # https://github.com/actions/checkout/releases uses: actions/checkout@v6 with: # See https://github.com/actions/checkout/issues/299#issuecomment-677674415 ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 100 - name: Fetch tags run: git fetch --depth=100 origin +refs/tags/*:refs/tags/* - name: Cache Coursier cache # https://github.com/coursier/cache-action/releases uses: coursier/cache-action@v8.1.0 - name: Set up JDK 25 # https://github.com/coursier/setup-action/releases uses: coursier/setup-action@v3.0.0 with: jvm: temurin:1.25 apps: cs - name: Run akka/github-actions-scripts uses: akka/github-actions-scripts/setup_global_resolver@main - name: sbt site run: sbt docs/makeSite - name: Run Link Validator run: cs launch net.runne::site-link-validator:0.2.3 -- scripts/link-validator.conf ================================================ FILE: .github/workflows/release.yml ================================================ name: Release on: push: branches: - main tags: ["v*"] permissions: contents: read jobs: release: # runs on main repo only if: github.event.repository.fork == false name: Release # the release environment provides access to secrets required in the release process # https://github.com/akka/akka-persistence-jdbc/settings/environments/164872635/edit environment: release runs-on: Akka-Default steps: - name: Checkout # https://github.com/actions/checkout/releases uses: actions/checkout@v6 with: # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves fetch-depth: 0 - name: Checkout GitHub merge if: github.event.pull_request run: |- git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch git checkout scratch - name: Cache Coursier cache # https://github.com/coursier/cache-action/releases uses: coursier/cache-action@v8.1.0 - name: Set up JDK 11 # https://github.com/coursier/setup-action/releases uses: coursier/setup-action@v3.0.0 with: jvm: temurin:1.11.0.17 - name: Run akka/github-actions-scripts uses: akka/github-actions-scripts/setup_global_resolver@main - name: Publish artifacts for all Scala versions env: PGP_SECRET: ${{ secrets.PGP_SECRET }} PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }} PUBLISH_USER: ${{ secrets.PUBLISH_USER }} PUBLISH_PASSWORD: ${{ secrets.PUBLISH_PASSWORD }} run: sbt +publishSigned documentation: name: Documentation runs-on: Akka-Default if: github.event.repository.fork == false steps: - name: Checkout # https://github.com/actions/checkout/releases uses: actions/checkout@v6 with: # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves fetch-depth: 0 - name: Set up JDK 25 # https://github.com/coursier/setup-action/releases uses: coursier/setup-action@v3.0.0 with: jvm: temurin:1.25 - name: Run akka/github-actions-scripts uses: akka/github-actions-scripts/setup_global_resolver@main - name: Publish run: |- eval "$(ssh-agent -s)" echo $AKKA_RSYNC_GUSTAV | base64 -d > .github/id_rsa chmod 600 .github/id_rsa ssh-add .github/id_rsa sbt publishRsync env: AKKA_RSYNC_GUSTAV: ${{ secrets.AKKA_RSYNC_GUSTAV }} ================================================ FILE: .github/workflows/test.yml ================================================ name: Integration Tests on: pull_request: push: branches: - main tags-ignore: [ v.* ] permissions: contents: read jobs: integration-test: runs-on: Akka-Default strategy: fail-fast: false matrix: db: - name: "H2" test: "test" - name: "MySQL" test: '"integration/testOnly akka.persistence.jdbc.integration.MySQL*"' script: 'launch-mysql.sh' hasOldDao: true - name: "Oracle" test: '"integration/testOnly akka.persistence.jdbc.integration.Oracle*"' script: 'launch-oracle.sh' hasOldDao: true - name: "Postgres" test: '"integration/testOnly akka.persistence.jdbc.integration.Postgres*"' script: 'launch-postgres.sh' hasOldDao: true - name: "SqlServer" test: '"integration/testOnly akka.persistence.jdbc.integration.SqlServer*"' script: 'launch-sqlserver.sh' hasOldDao: true name: Integration Test ${{ matrix.db.name }} steps: - name: Checkout # https://github.com/actions/checkout/releases uses: actions/checkout@v6 with: fetch-depth: 0 - name: Checkout GitHub merge if: github.event.pull_request run: |- git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch git checkout scratch - name: Cache Coursier cache # https://github.com/coursier/cache-action/releases uses: coursier/cache-action@v8.1.0 - name: Set up JDK 11 # https://github.com/coursier/setup-action/releases uses: coursier/setup-action@v3.0.0 with: jvm: temurin:1.11.0 - name: Run akka/github-actions-scripts uses: akka/github-actions-scripts/setup_global_resolver@main - name: Start DB in docker container if: ${{ matrix.db.script }} run: |- ./scripts/${{ matrix.db.script }} - name: Run Integration tests for ${{ matrix.db.name }} run: sbt ${{ matrix.db.test }} ${{ matrix.old-dao.extraOpts }} - name: Run Integration tests for ${{ matrix.db.name }} (old dao) if: ${{ matrix.db.hasOldDao }} run: sbt ${{ matrix.db.test }} ${{ matrix.old-dao.extraOpts }} -Djdbc-journal.dao=akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao -Djdbc-snapshot-store.dao=akka.persistence.jdbc.snapshot.dao.legacy.ByteArraySnapshotDao -Djdbc-read-journal.dao=akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao - name: Print logs on failure if: ${{ failure() }} run: find . -name "*.log" -exec ./scripts/cat-log.sh {} \; ================================================ FILE: .github/workflows/weekly.yml ================================================ name: Weekly Integration Tests on: schedule: - cron: "0 0 * * 1" workflow_dispatch: permissions: contents: read jobs: integration-test: name: Weekly Integration Test ${{ matrix.db.name }}, ${{ matrix.db.jdk }} runs-on: Akka-Default strategy: fail-fast: false matrix: db: - name: "H2" test: "test" jdk: 'temurin:1.21' - name: "H2" test: "test" jdk: 'temurin:1.25' - name: "MySQL" test: '"integration/testOnly akka.persistence.jdbc.integration.MySQL*"' script: 'launch-mysql.sh' jdk: 'temurin:1.21' - name: "Oracle" test: '"integration/testOnly akka.persistence.jdbc.integration.Oracle*"' script: 'launch-oracle.sh' jdk: 'temurin:1.21' - name: "Postgres" test: '"integration/testOnly akka.persistence.jdbc.integration.Postgres*"' script: 'launch-postgres.sh' jdk: 'temurin:1.21' - name: "SqlServer" test: '"integration/testOnly akka.persistence.jdbc.integration.SqlServer*"' script: 'launch-sqlserver.sh' jdk: 'temurin:1.21' steps: - name: Checkout # https://github.com/actions/checkout/releases uses: actions/checkout@v6 with: fetch-depth: 0 - name: Checkout GitHub merge if: github.event.pull_request run: |- git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch git checkout scratch - name: Cache Coursier cache # https://github.com/coursier/cache-action/releases uses: coursier/cache-action@v8.1.0 - name: Set up ${{ matrix.db.jdk }} # https://github.com/coursier/setup-action/releases uses: coursier/setup-action@v3.0.0 with: jvm: ${{ matrix.db.jdk }} - name: Run akka/github-actions-scripts uses: akka/github-actions-scripts/setup_global_resolver@main - name: Start DB in docker container if: ${{ matrix.db.script }} run: |- ./scripts/${{ matrix.db.script }} - name: Run Integration tests for ${{ matrix.db.name }} run: sbt ${{ matrix.db.test }} - name: Print logs on failure if: ${{ failure() }} run: find . -name "*.log" -exec ./scripts/cat-log.sh {} \; ================================================ FILE: .gitignore ================================================ /RUNNING_PID logs target .idea *.iml *.iws .settings .classpath .project .worksheet .bsp *.code-workspace .bloop .metals metals.sbt .DS_Store ================================================ FILE: .sbtopts ================================================ -J-Xms512M -J-Xmx4096M -J-XX:MaxGCPauseMillis=200 ================================================ FILE: .scala-steward.conf ================================================ pullRequests.frequency = "@monthly" updates.ignore = [ { groupId = "org.scalameta", artifactId = "scalafmt-core" } { groupId = "org.scalameta", artifactId = "sbt-scalafmt" } // explicit updates { groupId = "com.typesafe.akka" } ] commits.message = "bump: ${artifactName} ${nextVersion} (was ${currentVersion})" updatePullRequests = never ================================================ FILE: .scalafmt.conf ================================================ version = 3.0.8 style = defaultWithAlign docstrings.style = Asterisk docstrings.wrap = no indentOperator.preset = spray maxColumn = 120 rewrite.rules = [RedundantParens, SortImports, AvoidInfix] unindentTopLevelOperators = true align.tokens = [{code = "=>", owner = "Case"}] align.openParenDefnSite = false align.openParenCallSite = false optIn.breakChainOnFirstMethodDot = false optIn.configStyleArguments = false danglingParentheses.defnSite = false danglingParentheses.callSite = false spaces.inImportCurlyBraces = true rewrite.neverInfix.excludeFilters = [ and min max until to by eq ne "should.*" "contain.*" "must.*" in ignore be taggedAs thrownBy synchronized have when size only noneOf oneElementOf noElementsOf atLeastOneElementOf atMostOneElementOf allElementsOf inOrderElementsOf theSameElementsAs ] rewriteTokens = { "⇒": "=>" "→": "->" "←": "<-" } newlines.afterCurlyLambda = preserve newlines.implicitParamListModifierPrefer = before ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing to Akka Persistence JDBC ## General Workflow This is the process for committing code into master. 1. Make sure you have signed the Lightbend CLA, if not, [sign it online](https://www.lightbend.com/contribute/cla/akka/current). 2. Before starting to work on a feature or a fix, make sure that there is a ticket for your work in the [issue tracker](https://github.com/akka/akka-persistence-jdbc/issues). If not, create it first. 3. Perform your work according to the [pull request requirements](#pull-request-requirements). 4. When the feature or fix is completed you should open a [Pull Request](https://help.github.com/articles/using-pull-requests) on [GitHub](https://github.com/akka/akka-persistence-jdbc/pulls). 5. The Pull Request should be reviewed by other maintainers (as many as feasible/practical). Note that the maintainers can consist of outside contributors, both within and outside Lightbend. Outside contributors are encouraged to participate in the review process, it is not a closed process. 6. After the review you should fix the issues (review comments, CI failures) by pushing a new commit for new review, iterating until the reviewers give their thumbs up and CI tests pass. 7. If the branch merge conflicts with its target, rebase your branch onto the target branch. In case of questions about the contribution process or for discussion of specific issues please visit the [akka forum](https://discuss.akka.io/c/akka/). ## Pull Request Requirements For a Pull Request to be considered at all it has to meet these requirements: 1. Pull Request branch should be given a unique descriptive name that explains its intent. 2. Code in the branch should live up to the current code standard: - Not violate [DRY](http://programmer.97things.oreilly.com/wiki/index.php/Don%27t_Repeat_Yourself). - [Boy Scout Rule](http://programmer.97things.oreilly.com/wiki/index.php/The_Boy_Scout_Rule) needs to have been applied. 3. Regardless if the code introduces new features or fixes bugs or regressions, it must have comprehensive tests. 4. The code must be well documented (see the [Documentation](#documentation) section below). 5. The commit messages must properly describe the changes, see [further below](#creating-commits-and-writing-commit-messages). 6. Do not use ``@author`` tags since it does not encourage [Collective Code Ownership](http://www.extremeprogramming.org/rules/collective.html). Contributors get the credit they deserve in the release notes. If these requirements are not met then the code should **not** be merged into master, or even reviewed - regardless of how good or important it is. No exceptions. ## Documentation Documentation should be written in two forms: 1. API documentation in the form of scaladoc/javadoc comments on the Scala and Java user API. 2. Guide documentation in [docs](docs/) subproject using [Paradox](https://github.com/lightbend/paradox) documentation tool. This documentation should give a short introduction of how a given connector should be used. ## External Dependencies All the external runtime dependencies for the project, including transitive dependencies, must have an open source license that is equal to, or compatible with, [Apache 2](https://www.apache.org/licenses/LICENSE-2.0). This must be ensured by manually verifying the license for all the dependencies for the project: 1. Whenever a committer to the project changes a version of a dependency (including Scala) in the build file. 2. Whenever a committer to the project adds a new dependency. 3. Whenever a new release is cut (public or private for a customer). Every external dependency listed in the build file must have a trailing comment with the license name of the dependency. Which licenses are compatible with Apache 2 are defined in [this doc](https://www.apache.org/legal/3party.html#category-a), where you can see that the licenses that are listed under ``Category A`` automatically compatible with Apache 2, while the ones listed under ``Category B`` needs additional action: > Each license in this category requires some degree of [reciprocity](https://www.apache.org/legal/3party.html#define-reciprocal); therefore, additional action must be taken in order to minimize the chance that a user of an Apache product will create a derivative work of a reciprocally-licensed portion of an Apache product without being aware of the applicable requirements. ## Creating Commits And Writing Commit Messages Follow these guidelines when creating public commits and writing commit messages. 1. If your work spans multiple local commits (for example; if you do safe point commits while working in a feature branch or work in a branch for long time doing merges/rebases etc.) then please do not commit it all but rewrite the history by squashing the commits into a single big commit which you write a good commit message for (like discussed in the following sections). For more info read this article: [Git Workflow](https://sandofsky.com/blog/git-workflow.html). Every commit should be able to be used in isolation, cherry picked etc. 2. First line should be a descriptive sentence what the commit is doing, including the ticket number. It should be possible to fully understand what the commit does—but not necessarily how it does it—by just reading this single line. We follow the “imperative present tense” style for commit messages ([more info here](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html)). It is **not ok** to only list the ticket number, type "minor fix" or similar. If the commit is a small fix, then you are done. If not, go to 3. 3. Following the single line description should be a blank line followed by an enumerated list with the details of the commit. 4. Add keywords for your commit (depending on the degree of automation we reach, the list may change over time): * ``Review by @gituser`` - if you want to notify someone on the team. The others can, and are encouraged to participate. Example: Add eventsByTag query #123 * Details 1 * Details 2 * Details 3 ## How To Enforce These Guidelines? 1. [Scalafmt](https://scalameta.org/scalafmt/) enforces some of the code style rules. 2. [sbt-header plugin](https://github.com/sbt/sbt-header) manages consistent copyright headers in every source file. ================================================ FILE: LICENSE ================================================ Business Source License 1.1 Parameters Licensor: Lightbend, Inc. Licensed Work: Akka Persistence JDBC 5.5.4 This license applies to all sub directories and files UNLESS another license file is present in a sub directory, then that other license applies to all files in its directory and sub directories. The Licensed Work is (c) 2025 Lightbend Inc. Additional Use Grant: If you develop an application using a version of Play Framework that utilizes binary versions of akka-streams and its dependencies, you may use such binary versions of akka-streams and its dependencies in the development of your application only as they are incorporated into Play Framework and solely to implement the functionality provided by Play Framework; provided that, they are only used in the following way: Connecting to a Play Framework websocket and/or Play Framework request/response bodies for server and play-ws client. Change Date: 2028-10-30 Change License: Apache License, Version 2.0 For information about alternative licensing arrangements for the Software, please visit: https://akka.io ----------------------------------------------------------------------------- Business Source License 1.1 License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. “Business Source License” is a trademark of MariaDB Corporation Ab. Terms The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. MariaDB hereby grants you permission to use this License’s text to license your works, and to refer to it using the trademark “Business Source License”, as long as you comply with the Covenants of Licensor below. Covenants of Licensor In consideration of the right to use this License’s text and the “Business Source License” name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: 1. To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where “compatible” means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. 2. To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text “None”. 3. To specify a Change Date. 4. Not to modify this License in any other way. ================================================ FILE: README.md ================================================ Akka ==== *Akka is a powerful platform that simplifies building and operating highly responsive, resilient, and scalable services.* The platform consists of * the [**Akka SDK**](https://doc.akka.io/) for straightforward, rapid development with AI assist and automatic clustering. Services built with the Akka SDK are automatically clustered and can be deployed on any infrastructure. * and [**Akka Automated Operations**](https://doc.akka.io/operations/akka-platform.html), a managed solution that handles everything for Akka SDK services from auto-elasticity to multi-region high availability running safely within your VPC. The **Akka SDK** and **Akka Automated Operations** are built upon the foundational [**Akka libraries**](https://doc.akka.io/libraries/akka-dependencies/current/), providing the building blocks for distributed systems. JDBC plugin for Akka Persistence ================================ akka-persistence-jdbc writes journal and snapshot entries to a configured JDBC store. It implements the full akka-persistence-query API and is therefore very useful for implementing DDD-style application models using Akka for creating reactive applications. Please note that the H2 database is not recommended to be used as a production database, and support for H2 is primarily for testing purposes. The Akka Persistence JDBC was originally created by @dnvriend. Reference Documentation ----------------------- The reference documentation for all Akka libraries is available via [doc.akka.io/libraries/](https://doc.akka.io/libraries/), details for the Akka JDBC plugin for [Scala](https://doc.akka.io/libraries/akka-persistence-jdbc/current/?language=scala) and [Java](https://doc.akka.io/libraries/akka-persistence-jdbc/current/?language=java). The current versions of all Akka libraries are listed on the [Akka Dependencies](https://doc.akka.io/libraries/akka-dependencies/current/) page. Releases of the Akka JDBC plugin in this repository are listed on the [GitHub releases](https://github.com/akka/akka-persistence-jdbc/releases) page. ## Build Token To build locally, you need to fetch a token at https://account.akka.io/token that you have to place into `~/.sbt/1.0/akka-commercial.sbt` file like this: ``` ThisBuild / resolvers += "lightbend-akka".at("your token resolver here") ``` ## Contributing Contributions are *very* welcome! The Akka team appreciates community contributions by both those new to Akka and those more experienced. If you find an issue that you'd like to see fixed, the quickest way to make that happen is to implement the fix and submit a pull request. Refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for more details about the workflow, and general hints on how to prepare your pull request. You can also ask for clarifications or guidance in GitHub issues directly, or in the [akka forum](https://discuss.akka.io/c/akka/). ## License Akka is licensed under the Business Source License 1.1, please see the [Akka License FAQ](https://www.lightbend.com/akka/license-faq). Tests and documentation are under a separate license, see the LICENSE file in each documentation and test root directory for details. ================================================ FILE: RELEASING.md ================================================ ## Releasing Use this command to create a release issue of [Release Train Issue Template](docs/release-train-issue-template.md) and follow the steps. ```bash ~/akka-persistence-jdbc> scripts/create-release-issue.sh `version-to-be-released` ``` ### Releasing only updated docs It is possible to release a revised documentation to the already existing release. 1. Create a new branch from a release tag. If a revised documentation is for the `v0.3` release, then the name of the new branch should be `docs/v0.3`. 1. Add and commit `version.sbt` file that pins the version to the one, that is being revised. Also set `isSnapshot` to `false` for the stable documentation links. For example: ```scala ThisBuild / version := "4.0.0" ThisBuild / isSnapshot := false ``` 1. Make all of the required changes to the documentation. 1. Build documentation locally with `CI` settings: ```sh env CI=true sbt docs/previewSite ``` 1. If the generated documentation looks good, send it to Gustav: ```sh env CI=true sbt docs/publishRsync ``` 1. Do not forget to push the new branch back to GitHub. 1. Commit the changes to Gustav's local git repo ### Releasing a Snapshot Snapshots are released automatically when commits are pushed to master. ================================================ FILE: build.sbt ================================================ import com.lightbend.paradox.apidoc.ApidocPlugin.autoImport.apidocRootPackage import com.geirsson.CiReleasePlugin lazy val `akka-persistence-jdbc` = project .in(file(".")) .enablePlugins(ScalaUnidocPlugin) .disablePlugins(MimaPlugin, SitePlugin, CiReleasePlugin) .aggregate(core, docs, migrator) .settings(name := "akka-persistence-jdbc-root", publish / skip := true) lazy val core = project .in(file("core")) .enablePlugins(MimaPlugin) .disablePlugins(SitePlugin, CiReleasePlugin) .settings( name := "akka-persistence-jdbc", AutomaticModuleName.settings("akka.persistence.jdbc"), libraryDependencies ++= Dependencies.Libraries, // Workaround for https://github.com/slick/slick/issues/2933 libraryDependencies ++= (if (scalaVersion.value.startsWith("2.13")) Seq("org.scala-lang" % "scala-reflect" % scalaVersion.value) else Nil), mimaReportSignatureProblems := true, mimaPreviousArtifacts := { if (scalaVersion.value.startsWith("3")) { Set.empty } else { Set( organization.value %% name.value % previousStableVersion.value.getOrElse( throw new Error("Unable to determine previous version for MiMa"))) } }) lazy val integration = project .in(file("integration")) .settings(IntegrationTests.settings) .settings(name := "akka-persistence-jdbc-integration", libraryDependencies ++= Dependencies.Libraries) .disablePlugins(MimaPlugin, SitePlugin, CiReleasePlugin) .dependsOn(core % "compile->compile;test->test") lazy val migrator = project .in(file("migrator")) .disablePlugins(SitePlugin, MimaPlugin, CiReleasePlugin) .settings( name := "akka-persistence-jdbc-migrator", AutomaticModuleName.settings("akka.persistence.jdbc.migrator"), libraryDependencies ++= Dependencies.Migration ++ Dependencies.Libraries, // TODO remove this when ready to publish it publish / skip := true) .dependsOn(core % "compile->compile;test->test") lazy val `migrator-integration` = project .in(file("migrator-integration")) .settings(IntegrationTests.settings) .settings(name := "akka-persistence-jdbc-migrator-integration", libraryDependencies ++= Dependencies.Libraries) .disablePlugins(MimaPlugin, SitePlugin, CiReleasePlugin) .dependsOn(migrator) lazy val docs = project .enablePlugins(ProjectAutoPlugin, AkkaParadoxPlugin, ParadoxSitePlugin, PreprocessPlugin, PublishRsyncPlugin) .disablePlugins(MimaPlugin, CiReleasePlugin) .settings( name := "Akka Persistence plugin for JDBC", publish / skip := true, makeSite := makeSite.dependsOn(LocalRootProject / ScalaUnidoc / doc).value, previewPath := (Paradox / siteSubdirName).value, Preprocess / siteSubdirName := s"api/akka-persistence-jdbc/${if (isSnapshot.value) "snapshot" else version.value}", Preprocess / sourceDirectory := (LocalRootProject / ScalaUnidoc / unidoc / target).value, Paradox / siteSubdirName := s"libraries/akka-persistence-jdbc/${if (isSnapshot.value) "snapshot" else version.value}", Compile / paradoxProperties ++= Map( "project.url" -> "https://doc.akka.io/libraries/akka-persistence-jdbc/current/", "github.base_url" -> "https://github.com/akka/akka-persistence-jdbc/", "canonical.base_url" -> "https://doc.akka.io/libraries/akka-persistence-jdbc/current", "akka.version" -> Dependencies.AkkaVersion, "slick.version" -> Dependencies.SlickVersion, "extref.github.base_url" -> s"https://github.com/akka/akka-persistence-jdbc/blob/${if (isSnapshot.value) "master" else "v" + version.value}/%s", // Slick "extref.slick.base_url" -> s"https://scala-slick.org/doc/${Dependencies.SlickVersion}/%s", // Akka "extref.akka.base_url" -> s"https://doc.akka.io/libraries/akka-core/${Dependencies.AkkaBinaryVersion}/%s", "scaladoc.akka.base_url" -> s"https://doc.akka.io/api/akka-core/${Dependencies.AkkaBinaryVersion}/", "javadoc.akka.base_url" -> s"https://doc.akka.io/japi/akka-core/${Dependencies.AkkaBinaryVersion}/", "javadoc.akka.link_style" -> "direct", // Java "javadoc.base_url" -> "https://docs.oracle.com/javase/8/docs/api/", // Scala "scaladoc.scala.base_url" -> s"https://www.scala-lang.org/api/${scalaBinaryVersion.value}.x/", "scaladoc.akka.persistence.jdbc.base_url" -> s"/${(Preprocess / siteSubdirName).value}/"), paradoxGroups := Map("Language" -> Seq("Java", "Scala")), resolvers += Resolver.jcenterRepo, publishRsyncArtifacts += makeSite.value -> "www/", publishRsyncHost := "akkarepo@gustav.akka.io", apidocRootPackage := "akka") Global / onLoad := (Global / onLoad).value.andThen { s => val v = version.value if (dynverGitDescribeOutput.value.hasNoTags) throw new MessageOnlyException( s"Failed to derive version from git tags. Maybe run `git fetch --unshallow`? Derived version: $v") s } TaskKey[Unit]("verifyCodeFmt") := { scalafmtCheckAll.all(ScopeFilter(inAnyProject)).result.value.toEither.left.foreach { _ => throw new MessageOnlyException( "Unformatted Scala code found. Please run 'scalafmtAll' and commit the reformatted code") } (Compile / scalafmtSbtCheck).result.value.toEither.left.foreach { _ => throw new MessageOnlyException( "Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code") } } addCommandAlias("verifyCodeStyle", "headerCheck; verifyCodeFmt") val isJdk11orHigher: Boolean = { val result = VersionNumber(sys.props("java.specification.version")).matchesSemVer(SemanticSelector(">=11")) if (!result) throw new IllegalArgumentException("JDK 11 or higher is required") result } ================================================ FILE: core/src/main/mima-filters/3.5.3.backwards.excludes/issue-322-messagesWithBatch.excludes ================================================ # #322 Adding messagesWithBatch to Dao traits ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.journal.dao.JournalDao.messagesWithBatch") ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.journal.dao.H2JournalDao.messagesWithBatch") ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.journal.dao.JournalDaoWithUpdates.messagesWithBatch") ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.query.dao.BaseByteArrayReadJournalDao.ec") ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.query.dao.BaseByteArrayReadJournalDao.mat") ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.query.dao.H2ReadJournalDao.messagesWithBatch") ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.query.dao.OracleReadJournalDao.messagesWithBatch") ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.query.dao.ReadJournalDao.messagesWithBatch") ================================================ FILE: core/src/main/mima-filters/3.5.3.backwards.excludes/issue-91-ordering-offset.excludes ================================================ # #91 changing signature of messages and messagesWithBatch in JournalDaoWithReadMessages # tuple (PersistentRepr, Long) to include the ordering number ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.serialization.FlowPersistentReprSerializer.deserializeFlowWithoutTags") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.ByteArrayJournalSerializer.deserializeFlowWithoutTags") ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$ContinueDelayed$") ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$FlowControl") ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$Stop$") ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$Continue$") ================================================ FILE: core/src/main/mima-filters/4.x.x.backwards.excludes/pr-401-highest-seq-nr.excludes ================================================ # https://github.com/akka/akka-persistence-jdbc/pull/401/files ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.JournalQueries.highestSequenceNrForPersistenceId") ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.JournalQueries.highestMarkedSequenceNrForPersistenceId") ================================================ FILE: core/src/main/mima-filters/5.0.1.backwards.excludes/pr-570-akka-serialization.excludes ================================================ # https://github.com/akka/akka-persistence-jdbc/pull/570/files # The problem comes from an earlier PR where the class akka.persistence.jdbc.journal.dao.AkkaSerialization # was moved to akka.persistence.jdbc.AkkaSerialization as it was also being used from durable state ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.journal.dao.AkkaSerialization") ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.journal.dao.AkkaSerialization$") ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.journal.dao.AkkaSerialization$AkkaSerialized") ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.journal.dao.AkkaSerialization$AkkaSerialized$") ================================================ FILE: core/src/main/mima-filters/5.0.2.backwards.excludes/issue-585-performance-regression.excludes ================================================ # internals ProblemFilters.exclude[IncompatibleTemplateDefProblem]("akka.persistence.jdbc.journal.dao.BaseDao") ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.jdbc.journal.dao.DefaultJournalDao") ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.DefaultJournalDao.queueWriteJournalRows") ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.DefaultJournalDao.writeQueue") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.JournalQueries.insertAndReturn") ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.JournalQueries.writeJournalRows") ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao") ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao") ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.queueWriteJournalRows") ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.writeQueue") ================================================ FILE: core/src/main/mima-filters/5.1.0.backwards.excludes/issue-557-logical-delete.excludes ================================================ ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.config.BaseDaoConfig.logicalDelete") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.config.ReadJournalConfig.includeDeleted") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.logWarnAboutLogicalDeletionDeprecation") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.logWarnAboutLogicalDeletionDeprecation") ================================================ FILE: core/src/main/mima-filters/5.4.0.backwards.excludes/issue-710-tag-fk.excludes ================================================ ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.JournalTables#EventTags.eventId") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.eventId") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.copy") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.copy$default$1") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.copy$default$2") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.this") ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.jdbc.journal.dao.JournalTables$TagRow$") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.apply") ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.unapply") ================================================ FILE: core/src/main/mima-filters/5.4.0.backwards.excludes/issue-775-slick-3.50.excludes ================================================ ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.apply") ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.unapply") ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.tupled") ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.curried") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.database") ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.copy") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.copy$default$1") ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.this") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.db.LazySlickDatabase.database") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.db.SlickDatabase.forConfig") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.db.SlickDatabase.database") ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.jdbc.db.SlickDatabase.database") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.JdbcAsyncWriteJournal.db") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.DefaultJournalDao.db") ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.journal.dao.DefaultJournalDao.this") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.db") ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.db") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.db") ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.this") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.query.dao.DefaultReadJournalDao.db") ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.query.dao.DefaultReadJournalDao.this") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.query.dao.legacy.BaseByteArrayReadJournalDao.db") ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.jdbc.query.dao.legacy.BaseByteArrayReadJournalDao.db") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao.db") ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao.this") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.query.dao.legacy.OracleReadJournalDao.db") ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.jdbc.query.dao.legacy.OracleReadJournalDao.db") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.snapshot.JdbcSnapshotStore.db") ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao.this") ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.snapshot.dao.legacy.ByteArraySnapshotDao.this") ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.state.JdbcDurableStateStoreProvider.db") ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore.this") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.query.JournalSequenceActor.receive") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.query.JournalSequenceActor.receive$default$4") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.query.JournalSequenceActor.findGaps") ================================================ FILE: core/src/main/mima-filters/5.5.0.backwards.excludes/issue-891-durable-store.excludes ================================================ # internal api changes ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.state.JdbcDurableStateStoreProvider.this") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore.this") ================================================ FILE: core/src/main/mima-filters/5.5.2.backwards.excludes/pr-928-cleanup-tool.excludes ================================================ # internal api changes ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.jdbc.journal.dao.JournalDao.deleteEventsTo") ProblemFilters.exclude[NewMixinForwarderProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.delete") ================================================ FILE: core/src/main/resources/reference.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. akka-persistence-jdbc { # The tag separator to use when tagging events with more than one tag. # This property affects jdbc-journal.tagSeparator and jdbc-read-journal.tagSeparator. tagSeparator = "," database-provider-fqcn = "akka.persistence.jdbc.db.DefaultSlickDatabaseProvider" shared-databases { // Shared databases can be defined here. // This reference config contains a partial example if a shared database which is enabled by configuring "slick" as the shared db // this example is ignored by default as long as no profile is default slick { # This property indicates which profile must be used by Slick. # Possible values are: # - slick.jdbc.PostgresProfile$ # - slick.jdbc.MySQLProfile$ # - slick.jdbc.H2Profile$ # - slick.jdbc.SQLServerProfile$ # - slick.jdbc.OracleProfile$ # (uncomment and set the property below to match your needs) # profile = "slick.jdbc.PostgresProfile$" db { connectionPool = "HikariCP" # The JDBC URL for the chosen database # (uncomment and set the property below to match your needs) # url = "jdbc:postgresql://localhost:5432/akka-plugin" # The database username # (uncomment and set the property below to match your needs) # user = "akka-plugin" # The username's password # (uncomment and set the property below to match your needs) # password = "akka-plugin" # The JDBC driver to use # (uncomment and set the property below to match your needs) # driver = "org.postgresql.Driver" # hikariCP settings; see: https://github.com/brettwooldridge/HikariCP # Slick will use an async executor with a fixed size queue of 10.000 objects # The async executor is a connection pool for asynchronous execution of blocking I/O actions. # This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. queueSize = 10000 // number of objects that can be queued by the async executor # This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection # from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. # 1000ms is the minimum value. Default: 180000 (3 minutes) connectionTimeout = 180000 # This property controls the maximum amount of time that a connection will be tested for aliveness. # This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 validationTimeout = 5000 # 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. # Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation # of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections # are never removed from the pool. Default: 600000 (10 minutes) idleTimeout = 600000 # 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout # it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, # only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds # less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), # subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) maxLifetime = 1800000 # This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a # possible connection leak. A value of 0 means leak detection is disabled. # Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 leakDetectionThreshold = 0 # ensures that the database does not get dropped while we are using it keepAliveConnection = on # See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing # Keep in mind that the number of threads must equal the maximum number of connections. numThreads = 20 maxConnections = 20 minConnections = 20 } } } } # the akka-persistence-journal in use jdbc-journal { class = "akka.persistence.jdbc.journal.JdbcAsyncWriteJournal" tables { # Only used in pre 5.0.0 Dao legacy_journal { tableName = "journal" schemaName = "" columnNames { ordering = "ordering" deleted = "deleted" persistenceId = "persistence_id" sequenceNumber = "sequence_number" created = "created" tags = "tags" message = "message" } } event_journal { tableName = "event_journal" schemaName = "" columnNames { ordering = "ordering" deleted = "deleted" persistenceId = "persistence_id" sequenceNumber = "sequence_number" writer = "writer" writeTimestamp = "write_timestamp" adapterManifest = "adapter_manifest" eventPayload = "event_payload" eventSerId = "event_ser_id" eventSerManifest = "event_ser_manifest" metaPayload = "meta_payload" metaSerId = "meta_ser_id" metaSerManifest = "meta_ser_manifest" } } event_tag { tableName = "event_tag" schemaName = "" columnNames { # use for older foreign key. eventId = "event_id" persistenceId = "persistence_id" sequenceNumber = "sequence_number" tag = "tag" } # For rolling updates the event_tag table migration. # switch those to enable new region key write and read. legacy-tag-key = true } # Otherwise it would be a pinned dispatcher, see https://github.com/akka/akka/issues/31058 plugin-dispatcher = "akka.actor.default-dispatcher" } # The tag separator to use when tagging events with more than one tag. # should not be configured directly, but through property akka-persistence-jdbc.tagSeparator # in order to keep consistent behavior over write/read sides # Only used for the legacy schema tagSeparator = ${akka-persistence-jdbc.tagSeparator} # If you have data from pre 5.0.0 use the legacy akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao # Dao. Migration to the new dao will be added in the future. dao = "akka.persistence.jdbc.journal.dao.DefaultJournalDao" # The size of the buffer used when queueing up events for batch writing. This number must be bigger then the number # of events that may be written concurrently. In other words this number must be bigger than the number of persistent # actors that are actively persisting at the same time. bufferSize = 1000 # The maximum size of the batches in which journal rows will be inserted batchSize = 400 # The maximum size of the batches in which journal rows will be read when recovering replayBatchSize = 400 # The maximum number of batch-inserts that may be running concurrently parallelism = 8 # This setting can be used to configure usage of a shared database. # To disable usage of a shared database, set to null or an empty string. # When set to a non empty string, this setting does two things: # - The actor which manages the write-journal will not automatically close the db when the actor stops (since it is shared) # - If akka-persistence-jdbc.database-provider-fqcn is set to akka.persistence.jdbc.db.DefaultSlickDatabaseProvider # then the shared database with the given name will be used. (shared databases are configured as part of akka-persistence-jdbc.shared-databases) # Please note that the database will only be shared with the other journals if the use-shared-db is also set # to the same value for these other journals. use-shared-db = null slick { # This property indicates which profile must be used by Slick. # Possible values are: # - slick.jdbc.PostgresProfile$ # - slick.jdbc.MySQLProfile$ # - slick.jdbc.H2Profile$ # - slick.jdbc.SQLServerProfile$ # - slick.jdbc.OracleProfile$ # (uncomment and set the property below to match your needs) # profile = "slick.jdbc.PostgresProfile$" db { connectionPool = "HikariCP" # The JDBC URL for the chosen database # (uncomment and set the property below to match your needs) # url = "jdbc:postgresql://localhost:5432/akka-plugin" # The database username # (uncomment and set the property below to match your needs) # user = "akka-plugin" # The username's password # (uncomment and set the property below to match your needs) # password = "akka-plugin" # The JDBC driver to use # (uncomment and set the property below to match your needs) # driver = "org.postgresql.Driver" # hikariCP settings; see: https://github.com/brettwooldridge/HikariCP # Slick will use an async executor with a fixed size queue of 10.000 objects # The async executor is a connection pool for asynchronous execution of blocking I/O actions. # This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. queueSize = 10000 // number of objects that can be queued by the async executor # This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection # from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. # 1000ms is the minimum value. Default: 180000 (3 minutes) connectionTimeout = 180000 # This property controls the maximum amount of time that a connection will be tested for aliveness. # This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 validationTimeout = 5000 # 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. # Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation # of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections # are never removed from the pool. Default: 600000 (10 minutes) idleTimeout = 600000 # 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout # it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, # only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds # less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), # subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) maxLifetime = 1800000 # This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a # possible connection leak. A value of 0 means leak detection is disabled. # Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 leakDetectionThreshold = 0 # ensures that the database does not get dropped while we are using it keepAliveConnection = on # See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing # Keep in mind that the number of threads must equal the maximum number of connections. numThreads = 20 maxConnections = 20 minConnections = 20 } } } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { class = "akka.persistence.jdbc.snapshot.JdbcSnapshotStore" tables { legacy_snapshot { tableName = "snapshot" schemaName = "" columnNames { persistenceId = "persistence_id" sequenceNumber = "sequence_number" created = "created" snapshot = "snapshot" } } snapshot { tableName = "snapshot" schemaName = "" columnNames { persistenceId = "persistence_id" sequenceNumber = "sequence_number" created = "created" snapshotPayload = "snapshot_payload" snapshotSerId = "snapshot_ser_id" snapshotSerManifest = "snapshot_ser_manifest" metaPayload = "meta_payload" metaSerId = "meta_ser_id" metaSerManifest = "meta_ser_manifest" } } # Otherwise it would be a pinned dispatcher, see https://github.com/akka/akka/issues/31058 plugin-dispatcher = "akka.actor.default-dispatcher" } # This setting can be used to configure usage of a shared database. # To disable usage of a shared database, set to null or an empty string. # When set to a non empty string, this setting does two things: # - The actor which manages the snapshot-journal will not automatically close the db when the actor stops (since it is shared) # - If akka-persistence-jdbc.database-provider-fqcn is set to akka.persistence.jdbc.db.DefaultSlickDatabaseProvider # then the shared database with the given name will be used. (shared databases are configured as part of akka-persistence-jdbc.shared-databases) # Please note that the database will only be shared with the other journals if the use-shared-db is also set # to the same value for these other journals. use-shared-db = null dao = "akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao" slick { # This property indicates which profile must be used by Slick. # Possible values are: # - slick.jdbc.PostgresProfile$ # - slick.jdbc.MySQLProfile$ # - slick.jdbc.H2Profile$ # - slick.jdbc.SQLServerProfile$ # - slick.jdbc.OracleProfile$ # (uncomment and set the property below to match your needs) # profile = "slick.jdbc.PostgresProfile$" db { connectionPool = "HikariCP" # The JDBC URL for the chosen database # (uncomment and set the property below to match your needs) # url = "jdbc:postgresql://localhost:5432/akka-plugin" # The database username # (uncomment and set the property below to match your needs) # user = "akka-plugin" # The username's password # (uncomment and set the property below to match your needs) # password = "akka-plugin" # The JDBC driver to use # (uncomment and set the property below to match your needs) # driver = "org.postgresql.Driver" # hikariCP settings; see: https://github.com/brettwooldridge/HikariCP # Slick will use an async executor with a fixed size queue of 10.000 objects # The async executor is a connection pool for asynchronous execution of blocking I/O actions. # This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. queueSize = 10000 // number of objects that can be queued by the async executor # This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection # from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. # 1000ms is the minimum value. Default: 180000 (3 minutes) connectionTimeout = 180000 # This property controls the maximum amount of time that a connection will be tested for aliveness. # This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 validationTimeout = 5000 # 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. # Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation # of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections # are never removed from the pool. Default: 600000 (10 minutes) idleTimeout = 600000 # 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout # it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, # only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds # less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), # subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) maxLifetime = 1800000 # This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a # possible connection leak. A value of 0 means leak detection is disabled. # Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 leakDetectionThreshold = 0 # ensures that the database does not get dropped while we are using it keepAliveConnection = on # See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing # Keep in mind that the number of threads must equal the maximum number of connections. numThreads = 20 maxConnections = 20 minConnections = 20 } } } # the akka-persistence-query provider in use jdbc-read-journal { class = "akka.persistence.jdbc.query.JdbcReadJournalProvider" # Absolute path to the write journal plugin configuration section. # Read journal uses event adapters from the write plugin # to adapt events. write-plugin = "jdbc-journal" # New events are retrieved (polled) with this interval. refresh-interval = "1s" # How many events to fetch in one query (replay) and keep buffered until they # are delivered downstreams. max-buffer-size = "500" # Number of 'max-buffer-size's to limit each events by tag query to # # Events by tag will fetch batches of elements limiting both using the DB LIMIT support and # the "ordering" column of the journal. When executing a query starting from the beginning of the # journal, for example adding a new projection to an existing application with a large number # of already persisted events this can cause performance problems in some databases. # # This factor limits the "slices" of ordering the journal is queried for into smaller chunks, # issuing more queries where each query covers a smaller slice of the journal instead of one # covering the entire journal. # # Note that setting this too low will have a performance overhead in many queries being issued where # each query returns no or very few entries, but what number is to low depends on how many tags are # used and how well those are distributed, setting this value requires application specific benchmarking # to find a good number. # # 0 means disable the factor and query the entire journal and limit to max-buffer-size elements events-by-tag-buffer-sizes-per-query = 0 # If enabled, automatically close the database connection when the actor system is terminated add-shutdown-hook = true # This setting can be used to configure usage of a shared database. # To disable usage of a shared database, set to null or an empty string. # This setting only has effect if akka-persistence-jdbc.database-provider-fqcn is set to # akka.persistence.jdbc.db.DefaultSlickDatabaseProvider. When this setting is set to a non empty string # then the shared database with the given name will be used. (shared databases are configured as part of akka-persistence-jdbc.shared-databases) # Please note that the database will only be shared with the other journals if the use-shared-db is also set # to the same value for these other journals. use-shared-db = null dao = "akka.persistence.jdbc.query.dao.DefaultReadJournalDao" # Settings for determining if ids (ordering column) in the journal are out of sequence. journal-sequence-retrieval { # The maximum number of ids that will be retrieved in each batch batch-size = 10000 # In case a number in the sequence is missing, this is the amount of retries that will be done to see # if the number is still found. Note that the time after which a number in the sequence is assumed missing is # equal to maxTries * queryDelay # (maxTries may not be zero) max-tries = 10 # How often the actor will query for new data query-delay = 1 second # The maximum backoff time before trying to query again in case of database failures max-backoff-query-delay = 1 minute # The ask timeout to use when querying the journal sequence actor, the actor should normally respond very quickly, # since it always replies with its current internal state ask-timeout = 1 second } tables { legacy_journal = ${jdbc-journal.tables.legacy_journal} event_journal = ${jdbc-journal.tables.event_journal} event_tag = ${jdbc-journal.tables.event_tag} } # The tag separator to use when tagging events with more than one tag. # should not be configured directly, but through property akka-persistence-jdbc.tagSeparator # in order to keep consistent behavior over write/read sides tagSeparator = ${akka-persistence-jdbc.tagSeparator} slick { # This property indicates which profile must be used by Slick. # Possible values are: # - slick.jdbc.PostgresProfile$ # - slick.jdbc.MySQLProfile$ # - slick.jdbc.H2Profile$ # - slick.jdbc.SQLServerProfile$ # - slick.jdbc.OracleProfile$ # (uncomment and set the property below to match your needs) # profile = "slick.jdbc.PostgresProfile$" db { connectionPool = "HikariCP" # The JDBC URL for the chosen database # (uncomment and set the property below to match your needs) # url = "jdbc:postgresql://localhost:5432/akka-plugin" # The database username # (uncomment and set the property below to match your needs) # user = "akka-plugin" # The username's password # (uncomment and set the property below to match your needs) # password = "akka-plugin" # The JDBC driver to use # (uncomment and set the property below to match your needs) # driver = "org.postgresql.Driver" # hikariCP settings; see: https://github.com/brettwooldridge/HikariCP # Slick will use an async executor with a fixed size queue of 10.000 objects # The async executor is a connection pool for asynchronous execution of blocking I/O actions. # This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. queueSize = 10000 // number of objects that can be queued by the async executor # This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection # from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. # 1000ms is the minimum value. Default: 180000 (3 minutes) connectionTimeout = 180000 # This property controls the maximum amount of time that a connection will be tested for aliveness. # This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 validationTimeout = 5000 # 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. # Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation # of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections # are never removed from the pool. Default: 600000 (10 minutes) idleTimeout = 600000 # 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout # it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, # only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds # less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), # subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) maxLifetime = 1800000 # This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a # possible connection leak. A value of 0 means leak detection is disabled. # Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 leakDetectionThreshold = 0 # ensures that the database does not get dropped while we are using it keepAliveConnection = on # See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing # Keep in mind that the number of threads must equal the maximum number of connections. numThreads = 20 maxConnections = 20 minConnections = 20 } } } # the akka-persistence-durable-state-store in use jdbc-durable-state-store { class = "akka.persistence.jdbc.state.JdbcDurableStateStoreProvider" # number of records fetched from the store at once batchSize = 500 # New states are retrieved (polled) with this interval. refreshInterval = "1s" tables { durable_state { ## The table and column names are not always read and used in SQL statements. If you change ## these values you may need to edit some source code ## https://github.com/akka/akka-persistence-jdbc/issues/573 tableName = "durable_state" schemaName = "" columnNames { globalOffset = "global_offset" persistenceId = "persistence_id" revision = "revision" statePayload = "state_payload" stateSerId = "state_serial_id" stateSerManifest = "state_serial_manifest" tag = "tag" stateTimestamp = "state_timestamp" } } } # Settings for determining if global_offset column in the durable-state are out of sequence. durable-state-sequence-retrieval { # The maximum number of ids that will be retrieved in each batch batch-size = 10000 # In case a number in the sequence is missing, this is the amount of retries that will be done to see # if the number is still found. Note that the time after which a number in the sequence is assumed missing is # equal to maxTries * queryDelay # (maxTries may not be zero) max-tries = 5 # How often the actor will query for new data query-delay = 1 second # The maximum backoff time before trying to query again in case of database failures max-backoff-query-delay = 1 minute # The ask timeout to use when querying the durable-state sequence actor, the actor should normally respond very quickly, # since it always replies with its current internal state ask-timeout = 1 second # cache of revision numbers per persistence id revision-cache-capacity = 10000 } slick { # This property indicates which profile must be used by Slick. # Possible values are: # - slick.jdbc.PostgresProfile$ # - slick.jdbc.MySQLProfile$ # - slick.jdbc.H2Profile$ # - slick.jdbc.SQLServerProfile$ # - slick.jdbc.OracleProfile$ # (uncomment and set the property below to match your needs) # profile = "slick.jdbc.PostgresProfile$" db { connectionPool = "HikariCP" # The JDBC URL for the chosen database # (uncomment and set the property below to match your needs) # url = "jdbc:postgresql://localhost:5432/akka-plugin" # The database username # (uncomment and set the property below to match your needs) # user = "akka-plugin" # The username's password # (uncomment and set the property below to match your needs) # password = "akka-plugin" # The JDBC driver to use # (uncomment and set the property below to match your needs) # driver = "org.postgresql.Driver" # hikariCP settings; see: https://github.com/brettwooldridge/HikariCP # Slick will use an async executor with a fixed size queue of 10.000 objects # The async executor is a connection pool for asynchronous execution of blocking I/O actions. # This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. queueSize = 10000 // number of objects that can be queued by the async executor # This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection # from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. # 1000ms is the minimum value. Default: 180000 (3 minutes) connectionTimeout = 180000 # This property controls the maximum amount of time that a connection will be tested for aliveness. # This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 validationTimeout = 5000 # 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. # Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation # of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections # are never removed from the pool. Default: 600000 (10 minutes) idleTimeout = 600000 # 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout # it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, # only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds # less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), # subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) maxLifetime = 1800000 # This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a # possible connection leak. A value of 0 means leak detection is disabled. # Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 leakDetectionThreshold = 0 # ensures that the database does not get dropped while we are using it keepAliveConnection = on # See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing # Keep in mind that the number of threads must equal the maximum number of connections. numThreads = 20 maxConnections = 20 minConnections = 20 } } } ================================================ FILE: core/src/main/resources/schema/h2/h2-create-schema-legacy.sql ================================================ CREATE TABLE IF NOT EXISTS PUBLIC."journal" ( "ordering" BIGINT AUTO_INCREMENT, "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" BIGINT NOT NULL, "deleted" BOOLEAN DEFAULT FALSE NOT NULL, "tags" VARCHAR(255) DEFAULT NULL, "message" BYTEA NOT NULL, PRIMARY KEY("persistence_id", "sequence_number") ); CREATE UNIQUE INDEX IF NOT EXISTS "journal_ordering_idx" ON PUBLIC."journal"("ordering"); CREATE TABLE IF NOT EXISTS PUBLIC."snapshot" ( "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" BIGINT NOT NULL, "created" BIGINT NOT NULL, "snapshot" BYTEA NOT NULL, PRIMARY KEY("persistence_id", "sequence_number") ); CREATE TABLE IF NOT EXISTS "durable_state" ( "global_offset" BIGINT NOT NULL AUTO_INCREMENT, "persistence_id" VARCHAR(255) NOT NULL, "revision" BIGINT NOT NULL, "state_payload" BLOB NOT NULL, "state_serial_id" INTEGER NOT NULL, "state_serial_manifest" VARCHAR, "tag" VARCHAR, "state_timestamp" BIGINT NOT NULL, PRIMARY KEY("persistence_id") ); CREATE INDEX "state_tag_idx" on "durable_state" ("tag"); CREATE INDEX "state_global_offset_idx" on "durable_state" ("global_offset"); ================================================ FILE: core/src/main/resources/schema/h2/h2-create-schema.sql ================================================ CREATE TABLE IF NOT EXISTS "event_journal" ( "ordering" BIGINT UNIQUE NOT NULL AUTO_INCREMENT, "deleted" BOOLEAN DEFAULT false NOT NULL, "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" BIGINT NOT NULL, "writer" VARCHAR NOT NULL, "write_timestamp" BIGINT NOT NULL, "adapter_manifest" VARCHAR NOT NULL, "event_payload" BLOB NOT NULL, "event_ser_id" INTEGER NOT NULL, "event_ser_manifest" VARCHAR NOT NULL, "meta_payload" BLOB, "meta_ser_id" INTEGER, "meta_ser_manifest" VARCHAR, PRIMARY KEY("persistence_id","sequence_number") ); CREATE UNIQUE INDEX "event_journal_ordering_idx" on "event_journal" ("ordering"); CREATE TABLE IF NOT EXISTS "event_tag" ( "event_id" BIGINT, "persistence_id" VARCHAR(255), "sequence_number" BIGINT, "tag" VARCHAR NOT NULL, PRIMARY KEY("persistence_id", "sequence_number", "tag"), CONSTRAINT fk_event_journal FOREIGN KEY("persistence_id", "sequence_number") REFERENCES "event_journal"("persistence_id", "sequence_number") ON DELETE CASCADE ); CREATE TABLE IF NOT EXISTS "snapshot" ( "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" BIGINT NOT NULL, "created" BIGINT NOT NULL,"snapshot_ser_id" INTEGER NOT NULL, "snapshot_ser_manifest" VARCHAR NOT NULL, "snapshot_payload" BLOB NOT NULL, "meta_ser_id" INTEGER, "meta_ser_manifest" VARCHAR, "meta_payload" BLOB, PRIMARY KEY("persistence_id","sequence_number") ); CREATE SEQUENCE IF NOT EXISTS "global_offset_seq"; CREATE TABLE IF NOT EXISTS "durable_state" ( "global_offset" BIGINT DEFAULT NEXT VALUE FOR "global_offset_seq", "persistence_id" VARCHAR(255) NOT NULL, "revision" BIGINT NOT NULL, "state_payload" BLOB NOT NULL, "state_serial_id" INTEGER NOT NULL, "state_serial_manifest" VARCHAR, "tag" VARCHAR, "state_timestamp" BIGINT NOT NULL, PRIMARY KEY("persistence_id") ); CREATE INDEX IF NOT EXISTS "state_tag_idx" on "durable_state" ("tag"); CREATE INDEX IF NOT EXISTS "state_global_offset_idx" on "durable_state" ("global_offset"); ================================================ FILE: core/src/main/resources/schema/h2/h2-drop-schema-legacy.sql ================================================ DROP TABLE IF EXISTS PUBLIC."journal"; DROP TABLE IF EXISTS PUBLIC."snapshot"; DROP TABLE IF EXISTS PUBLIC."durable_state"; ================================================ FILE: core/src/main/resources/schema/h2/h2-drop-schema.sql ================================================ DROP TABLE IF EXISTS PUBLIC."event_tag"; DROP TABLE IF EXISTS PUBLIC."event_journal"; DROP TABLE IF EXISTS PUBLIC."snapshot"; DROP TABLE IF EXISTS PUBLIC."durable_state"; DROP SEQUENCE IF EXISTS PUBLIC."global_offset_seq"; ================================================ FILE: core/src/main/resources/schema/mysql/mysql-create-schema-legacy.sql ================================================ CREATE TABLE IF NOT EXISTS journal ( ordering SERIAL, persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, deleted BOOLEAN DEFAULT FALSE NOT NULL, tags VARCHAR(255) DEFAULT NULL, message BLOB NOT NULL, PRIMARY KEY(persistence_id, sequence_number) ); CREATE UNIQUE INDEX journal_ordering_idx ON journal(ordering); CREATE TABLE IF NOT EXISTS snapshot ( persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, created BIGINT NOT NULL, snapshot BLOB NOT NULL, PRIMARY KEY (persistence_id, sequence_number) ); ================================================ FILE: core/src/main/resources/schema/mysql/mysql-create-schema.sql ================================================ CREATE TABLE IF NOT EXISTS event_journal ( ordering SERIAL, deleted BOOLEAN DEFAULT false NOT NULL, persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, writer TEXT NOT NULL, write_timestamp BIGINT NOT NULL, adapter_manifest TEXT NOT NULL, event_payload BLOB NOT NULL, event_ser_id INTEGER NOT NULL, event_ser_manifest TEXT NOT NULL, meta_payload BLOB, meta_ser_id INTEGER,meta_ser_manifest TEXT, PRIMARY KEY(persistence_id,sequence_number) ); CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering); CREATE TABLE IF NOT EXISTS event_tag ( event_id BIGINT UNSIGNED, persistence_id VARCHAR(255), sequence_number BIGINT, tag VARCHAR(255) NOT NULL, PRIMARY KEY(persistence_id, sequence_number, tag), FOREIGN KEY (persistence_id, sequence_number) REFERENCES event_journal(persistence_id, sequence_number) ON DELETE CASCADE ); CREATE TABLE IF NOT EXISTS snapshot ( persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, created BIGINT NOT NULL, snapshot_ser_id INTEGER NOT NULL, snapshot_ser_manifest TEXT NOT NULL, snapshot_payload BLOB NOT NULL, meta_ser_id INTEGER, meta_ser_manifest TEXT, meta_payload BLOB, PRIMARY KEY (persistence_id, sequence_number)); ================================================ FILE: core/src/main/resources/schema/mysql/mysql-drop-schema-legacy.sql ================================================ DROP TABLE IF EXISTS journal; DROP TABLE IF EXISTS snapshot; ================================================ FILE: core/src/main/resources/schema/mysql/mysql-drop-schema.sql ================================================ DROP TABLE IF EXISTS event_tag; DROP TABLE IF EXISTS event_journal; DROP TABLE IF EXISTS snapshot; ================================================ FILE: core/src/main/resources/schema/mysql/mysql-event-tag-migration.sql ================================================ -- **************** first step **************** -- add new column ALTER TABLE event_tag ADD persistence_id VARCHAR(255), ADD sequence_number BIGINT; -- **************** second step **************** -- migrate rows UPDATE event_tag INNER JOIN event_journal ON event_tag.event_id = event_journal.ordering SET event_tag.persistence_id = event_journal.persistence_id, event_tag.sequence_number = event_journal.sequence_number; -- drop old FK constraint SELECT CONSTRAINT_NAME INTO @fk_constraint_name FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS WHERE TABLE_NAME = 'event_tag'; SET @alter_query = CONCAT('ALTER TABLE event_tag DROP FOREIGN KEY ', @fk_constraint_name); PREPARE stmt FROM @alter_query; EXECUTE stmt; DEALLOCATE PREPARE stmt; -- drop old PK constraint ALTER TABLE event_tag DROP PRIMARY KEY; -- create new PK constraint for PK column. ALTER TABLE event_tag ADD CONSTRAINT PRIMARY KEY (persistence_id, sequence_number, tag); -- create new FK constraint for PK column. ALTER TABLE event_tag ADD CONSTRAINT fk_event_journal_on_pk FOREIGN KEY (persistence_id, sequence_number) REFERENCES event_journal (persistence_id, sequence_number) ON DELETE CASCADE; -- alter the event_id to nullable, so we can skip the InsertAndReturn. ALTER TABLE event_tag MODIFY COLUMN event_id BIGINT UNSIGNED NULL; ================================================ FILE: core/src/main/resources/schema/oracle/oracle-create-schema-legacy.sql ================================================ CREATE SEQUENCE "ordering_seq" START WITH 1 INCREMENT BY 1 NOMAXVALUE / CREATE TABLE "journal" ( "ordering" NUMERIC, "deleted" char check ("deleted" in (0,1)) NOT NULL, "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC NOT NULL, "tags" VARCHAR(255) DEFAULT NULL, "message" BLOB NOT NULL, PRIMARY KEY("persistence_id", "sequence_number") ) / CREATE UNIQUE INDEX "journal_ordering_idx" ON "journal"("ordering") / CREATE OR REPLACE TRIGGER "ordering_seq_trigger" BEFORE INSERT ON "journal" FOR EACH ROW BEGIN SELECT "ordering_seq".NEXTVAL INTO :NEW."ordering" FROM DUAL; END; / CREATE OR REPLACE PROCEDURE "reset_sequence" IS l_value NUMBER; BEGIN EXECUTE IMMEDIATE 'SELECT "ordering_seq".nextval FROM dual' INTO l_value; EXECUTE IMMEDIATE 'ALTER SEQUENCE "ordering_seq" INCREMENT BY -' || l_value || ' MINVALUE 0'; EXECUTE IMMEDIATE 'SELECT "ordering_seq".nextval FROM dual' INTO l_value; EXECUTE IMMEDIATE 'ALTER SEQUENCE "ordering_seq" INCREMENT BY 1 MINVALUE 0'; END; / CREATE TABLE "snapshot" ( "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC NOT NULL, "created" NUMERIC NOT NULL, "snapshot" BLOB NOT NULL, PRIMARY KEY ("persistence_id", "sequence_number") ) / ================================================ FILE: core/src/main/resources/schema/oracle/oracle-create-schema.sql ================================================ CREATE SEQUENCE EVENT_JOURNAL__ORDERING_SEQ START WITH 1 INCREMENT BY 1 NOMAXVALUE / CREATE TABLE EVENT_JOURNAL ( ORDERING NUMERIC UNIQUE, DELETED CHAR(1) DEFAULT 0 NOT NULL check (DELETED in (0, 1)), PERSISTENCE_ID VARCHAR(255) NOT NULL, SEQUENCE_NUMBER NUMERIC NOT NULL, WRITER VARCHAR(255) NOT NULL, WRITE_TIMESTAMP NUMBER(19) NOT NULL, ADAPTER_MANIFEST VARCHAR(255), EVENT_PAYLOAD BLOB NOT NULL, EVENT_SER_ID NUMBER(10) NOT NULL, EVENT_SER_MANIFEST VARCHAR(255), META_PAYLOAD BLOB, META_SER_ID NUMBER(10), META_SER_MANIFEST VARCHAR(255), PRIMARY KEY(PERSISTENCE_ID, SEQUENCE_NUMBER) ) / CREATE OR REPLACE TRIGGER EVENT_JOURNAL__ORDERING_TRG before insert on EVENT_JOURNAL REFERENCING NEW AS NEW FOR EACH ROW WHEN (new.ORDERING is null) begin select EVENT_JOURNAL__ORDERING_seq.nextval into :new.ORDERING from sys.dual; end; / CREATE TABLE EVENT_TAG ( EVENT_ID NUMERIC, PERSISTENCE_ID VARCHAR(255), SEQUENCE_NUMBER NUMERIC, TAG VARCHAR(255) NOT NULL, PRIMARY KEY(PERSISTENCE_ID, SEQUENCE_NUMBER, TAG), FOREIGN KEY(PERSISTENCE_ID, SEQUENCE_NUMBER) REFERENCES EVENT_JOURNAL(PERSISTENCE_ID, SEQUENCE_NUMBER) ON DELETE CASCADE ) / CREATE TABLE SNAPSHOT ( PERSISTENCE_ID VARCHAR(255) NOT NULL, SEQUENCE_NUMBER NUMERIC NOT NULL, CREATED NUMERIC NOT NULL, SNAPSHOT_SER_ID NUMBER(10) NOT NULL, SNAPSHOT_SER_MANIFEST VARCHAR(255), SNAPSHOT_PAYLOAD BLOB NOT NULL, META_SER_ID NUMBER(10), META_SER_MANIFEST VARCHAR(255), META_PAYLOAD BLOB, PRIMARY KEY(PERSISTENCE_ID,SEQUENCE_NUMBER) ) / CREATE OR REPLACE PROCEDURE "reset_sequence" IS l_value NUMBER; BEGIN EXECUTE IMMEDIATE 'SELECT EVENT_JOURNAL__ORDERING_SEQ.nextval FROM dual' INTO l_value; EXECUTE IMMEDIATE 'ALTER SEQUENCE EVENT_JOURNAL__ORDERING_SEQ INCREMENT BY -' || l_value || ' MINVALUE 0'; EXECUTE IMMEDIATE 'SELECT EVENT_JOURNAL__ORDERING_SEQ.nextval FROM dual' INTO l_value; EXECUTE IMMEDIATE 'ALTER SEQUENCE EVENT_JOURNAL__ORDERING_SEQ INCREMENT BY 1 MINVALUE 0'; END; / ================================================ FILE: core/src/main/resources/schema/oracle/oracle-drop-schema-legacy.sql ================================================ -- (ddl lock timeout in seconds) this allows tests which are still writing to the db to finish gracefully ALTER SESSION SET ddl_lock_timeout = 150 / DROP TABLE "journal" CASCADE CONSTRAINT / DROP TABLE "snapshot" CASCADE CONSTRAINT / DROP TABLE "deleted_to" CASCADE CONSTRAINT / DROP TRIGGER "ordering_seq_trigger" / DROP PROCEDURE "reset_sequence" / DROP SEQUENCE "ordering_seq" / ================================================ FILE: core/src/main/resources/schema/oracle/oracle-drop-schema.sql ================================================ ALTER SESSION SET ddl_lock_timeout = 15 / DROP TABLE EVENT_TAG CASCADE CONSTRAINT / DROP TABLE EVENT_JOURNAL CASCADE CONSTRAINT / DROP TABLE SNAPSHOT CASCADE CONSTRAINT / DROP TABLE SNAPSHOT CASCADE CONSTRAINT / DROP SEQUENCE EVENT_JOURNAL__ORDERING_SEQ / DROP TRIGGER EVENT_JOURNAL__ORDERING_TRG / ================================================ FILE: core/src/main/resources/schema/oracle/oracle-event-tag-migration.sql ================================================ -- **************** first step **************** -- add new column ALTER TABLE EVENT_TAG ADD (PERSISTENCE_ID VARCHAR2(255), SEQUENCE_NUMBER NUMERIC); -- **************** second step **************** -- migrate rows UPDATE EVENT_TAG SET PERSISTENCE_ID = (SELECT PERSISTENCE_ID FROM EVENT_JOURNAL WHERE EVENT_TAG.EVENT_ID = EVENT_JOURNAL.ORDERING), SEQUENCE_NUMBER = (SELECT SEQUENCE_NUMBER FROM EVENT_JOURNAL WHERE EVENT_TAG.EVENT_ID = EVENT_JOURNAL.ORDERING) -- drop old FK constraint DECLARE v_constraint_name VARCHAR2(255); BEGIN SELECT CONSTRAINT_NAME INTO v_constraint_name FROM USER_CONSTRAINTS WHERE TABLE_NAME = 'EVENT_TAG' AND CONSTRAINT_TYPE = 'R'; IF v_constraint_name IS NOT NULL THEN EXECUTE IMMEDIATE 'ALTER TABLE EVENT_TAG DROP CONSTRAINT ' || v_constraint_name; END IF; COMMIT; EXCEPTION WHEN OTHERS THEN ROLLBACK; RAISE; END; / -- drop old PK constraint ALTER TABLE EVENT_TAG DROP PRIMARY KEY; -- create new PK constraint for PK column. ALTER TABLE EVENT_TAG ADD CONSTRAINT "pk_event_tag" PRIMARY KEY (PERSISTENCE_ID, SEQUENCE_NUMBER, TAG); -- create new FK constraint for PK column. ALTER TABLE EVENT_TAG ADD CONSTRAINT fk_EVENT_JOURNAL_on_pk FOREIGN KEY (PERSISTENCE_ID, SEQUENCE_NUMBER) REFERENCES EVENT_JOURNAL (PERSISTENCE_ID, SEQUENCE_NUMBER) ON DELETE CASCADE; -- alter the EVENT_ID to nullable, so we can skip the InsertAndReturn. ALTER TABLE EVENT_TAG MODIFY EVENT_ID NULL; ================================================ FILE: core/src/main/resources/schema/postgres/postgres-create-schema-legacy.sql ================================================ CREATE TABLE IF NOT EXISTS public.journal ( ordering BIGSERIAL, persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, deleted BOOLEAN DEFAULT FALSE NOT NULL, tags VARCHAR(255) DEFAULT NULL, message BYTEA NOT NULL, PRIMARY KEY(persistence_id, sequence_number) ); CREATE UNIQUE INDEX IF NOT EXISTS journal_ordering_idx ON public.journal(ordering); CREATE TABLE IF NOT EXISTS public.snapshot ( persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, created BIGINT NOT NULL, snapshot BYTEA NOT NULL, PRIMARY KEY(persistence_id, sequence_number) ); CREATE TABLE IF NOT EXISTS public.durable_state ( global_offset BIGSERIAL, persistence_id VARCHAR(255) NOT NULL, revision BIGINT NOT NULL, state_payload BYTEA NOT NULL, state_serial_id INTEGER NOT NULL, state_serial_manifest VARCHAR(255), tag VARCHAR, state_timestamp BIGINT NOT NULL, PRIMARY KEY(persistence_id) ); CREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag); CREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_state (global_offset); ================================================ FILE: core/src/main/resources/schema/postgres/postgres-create-schema.sql ================================================ CREATE TABLE IF NOT EXISTS public.event_journal ( ordering BIGSERIAL, persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, deleted BOOLEAN DEFAULT FALSE NOT NULL, writer VARCHAR(255) NOT NULL, write_timestamp BIGINT, adapter_manifest VARCHAR(255), event_ser_id INTEGER NOT NULL, event_ser_manifest VARCHAR(255) NOT NULL, event_payload BYTEA NOT NULL, meta_ser_id INTEGER, meta_ser_manifest VARCHAR(255), meta_payload BYTEA, PRIMARY KEY(persistence_id, sequence_number) ); CREATE UNIQUE INDEX event_journal_ordering_idx ON public.event_journal(ordering); CREATE TABLE IF NOT EXISTS public.event_tag( event_id BIGINT, persistence_id VARCHAR(255), sequence_number BIGINT, tag VARCHAR(256), PRIMARY KEY(persistence_id, sequence_number, tag), CONSTRAINT fk_event_journal FOREIGN KEY(persistence_id, sequence_number) REFERENCES event_journal(persistence_id, sequence_number) ON DELETE CASCADE ); CREATE TABLE IF NOT EXISTS public.snapshot ( persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, created BIGINT NOT NULL, snapshot_ser_id INTEGER NOT NULL, snapshot_ser_manifest VARCHAR(255) NOT NULL, snapshot_payload BYTEA NOT NULL, meta_ser_id INTEGER, meta_ser_manifest VARCHAR(255), meta_payload BYTEA, PRIMARY KEY(persistence_id, sequence_number) ); CREATE TABLE IF NOT EXISTS public.durable_state ( global_offset BIGSERIAL, persistence_id VARCHAR(255) NOT NULL, revision BIGINT NOT NULL, state_payload BYTEA NOT NULL, state_serial_id INTEGER NOT NULL, state_serial_manifest VARCHAR(255), tag VARCHAR, state_timestamp BIGINT NOT NULL, PRIMARY KEY(persistence_id) ); CREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag); CREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_state (global_offset); ================================================ FILE: core/src/main/resources/schema/postgres/postgres-drop-schema-legacy.sql ================================================ DROP TABLE IF EXISTS public.journal; DROP TABLE IF EXISTS public.snapshot; DROP TABLE IF EXISTS public.durable_state; ================================================ FILE: core/src/main/resources/schema/postgres/postgres-drop-schema.sql ================================================ DROP TABLE IF EXISTS public.event_tag; DROP TABLE IF EXISTS public.event_journal; DROP TABLE IF EXISTS public.snapshot; DROP TABLE IF EXISTS public.durable_state; ================================================ FILE: core/src/main/resources/schema/postgres/postgres-event-tag-migration.sql ================================================ -- **************** first step **************** -- add new column ALTER TABLE public.event_tag ADD persistence_id VARCHAR(255), ADD sequence_number BIGINT; -- **************** second step **************** -- migrate rows UPDATE public.event_tag SET persistence_id = public.event_journal.persistence_id, sequence_number = public.event_journal.sequence_number FROM event_journal WHERE public.event_tag.event_id = public.event_journal.ordering; -- drop old FK constraint ALTER TABLE public.event_tag DROP CONSTRAINT "fk_event_journal"; -- drop old PK constraint ALTER TABLE public.event_tag DROP CONSTRAINT "event_tag_pkey"; -- create new PK constraint for PK column. ALTER TABLE public.event_tag ADD CONSTRAINT "pk_event_tag" PRIMARY KEY (persistence_id, sequence_number, tag); -- create new FK constraint for PK column. ALTER TABLE public.event_tag ADD CONSTRAINT "fk_event_journal_on_pk" FOREIGN KEY (persistence_id, sequence_number) REFERENCES public.event_journal (persistence_id, sequence_number) ON DELETE CASCADE; -- alter the event_id to nullable, so we can skip the InsertAndReturn. ALTER TABLE public.event_tag ALTER COLUMN event_id DROP NOT NULL; ================================================ FILE: core/src/main/resources/schema/sqlserver/sqlserver-create-schema-legacy.sql ================================================ IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'"journal"') AND type in (N'U')) begin CREATE TABLE journal ( "ordering" BIGINT IDENTITY(1,1) NOT NULL, "deleted" BIT DEFAULT 0 NOT NULL, "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC(10,0) NOT NULL, "tags" VARCHAR(255) NULL DEFAULT NULL, "message" VARBINARY(max) NOT NULL, PRIMARY KEY ("persistence_id", "sequence_number") ) CREATE UNIQUE INDEX journal_ordering_idx ON journal (ordering) end; IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'"snapshot"') AND type in (N'U')) CREATE TABLE snapshot ( "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC(10,0) NOT NULL, "created" NUMERIC NOT NULL, "snapshot" VARBINARY(max) NOT NULL, PRIMARY KEY ("persistence_id", "sequence_number") ); end; ================================================ FILE: core/src/main/resources/schema/sqlserver/sqlserver-create-schema-varchar.sql ================================================ /* Akka Persistence JDBC versions from 5.0.0 through 5.1.0 used this schema. The only difference from the post-5.0.4 schema is the use of VARCHAR instead of NVARCHAR for string fields. It is strongly recommended that new uses of Akka Persistence JDBC 5.0.0 and later use the NVARCHAR schema. This schema is still usable with post-5.0.4 versions of Akka Persistence JDBC, though will not support Unicode persistence IDs, manifests, or tags. Additionally, if using this schema, it is highly recommended to not have the SQL Server JDBC client send strings as Unicode, by appending ;sendStringParametersAsUnicode=false to the JDBC connection string. */ CREATE TABLE event_journal( "ordering" BIGINT IDENTITY(1,1) NOT NULL, "deleted" BIT DEFAULT 0 NOT NULL, "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC(10,0) NOT NULL, "writer" VARCHAR(255) NOT NULL, "write_timestamp" BIGINT NOT NULL, "adapter_manifest" VARCHAR(MAX) NOT NULL, "event_payload" VARBINARY(MAX) NOT NULL, "event_ser_id" INTEGER NOT NULL, "event_ser_manifest" VARCHAR(MAX) NOT NULL, "meta_payload" VARBINARY(MAX), "meta_ser_id" INTEGER, "meta_ser_manifest" VARCHAR(MAX) PRIMARY KEY ("persistence_id", "sequence_number") ); CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering); CREATE TABLE event_tag ( "event_id" BIGINT NOT NULL, "tag" VARCHAR(255) NOT NULL PRIMARY KEY ("event_id","tag") constraint "fk_event_journal" foreign key("event_id") references "dbo"."event_journal"("ordering") on delete CASCADE ); CREATE TABLE "snapshot" ( "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC(10,0) NOT NULL, "created" BIGINT NOT NULL, "snapshot_ser_id" INTEGER NOT NULL, "snapshot_ser_manifest" VARCHAR(255) NOT NULL, "snapshot_payload" VARBINARY(MAX) NOT NULL, "meta_ser_id" INTEGER, "meta_ser_manifest" VARCHAR(255), "meta_payload" VARBINARY(MAX), PRIMARY KEY ("persistence_id", "sequence_number") ) ================================================ FILE: core/src/main/resources/schema/sqlserver/sqlserver-create-schema.sql ================================================ CREATE TABLE event_journal ( "ordering" BIGINT IDENTITY(1,1) NOT NULL, "deleted" BIT DEFAULT 0 NOT NULL, "persistence_id" NVARCHAR(255) NOT NULL, "sequence_number" NUMERIC(10,0) NOT NULL, "writer" NVARCHAR(255) NOT NULL, "write_timestamp" BIGINT NOT NULL, "adapter_manifest" NVARCHAR(MAX) NOT NULL, "event_payload" VARBINARY(MAX) NOT NULL, "event_ser_id" INTEGER NOT NULL, "event_ser_manifest" NVARCHAR(MAX) NOT NULL, "meta_payload" VARBINARY(MAX), "meta_ser_id" INTEGER, "meta_ser_manifest" NVARCHAR(MAX) PRIMARY KEY ("persistence_id", "sequence_number") ); CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering); CREATE TABLE event_tag ( "event_id" BIGINT, "persistence_id" NVARCHAR(255), "sequence_number" NUMERIC(10,0), "tag" NVARCHAR(255) NOT NULL PRIMARY KEY ("persistence_id", "sequence_number","tag"), constraint "fk_event_journal" foreign key("persistence_id", "sequence_number") references "dbo"."event_journal"("persistence_id", "sequence_number") on delete CASCADE ); CREATE TABLE "snapshot" ( "persistence_id" NVARCHAR(255) NOT NULL, "sequence_number" NUMERIC(10,0) NOT NULL, "created" BIGINT NOT NULL, "snapshot_ser_id" INTEGER NOT NULL, "snapshot_ser_manifest" NVARCHAR(255) NOT NULL, "snapshot_payload" VARBINARY(MAX) NOT NULL, "meta_ser_id" INTEGER, "meta_ser_manifest" NVARCHAR(255), "meta_payload" VARBINARY(MAX), PRIMARY KEY ("persistence_id", "sequence_number") ) ================================================ FILE: core/src/main/resources/schema/sqlserver/sqlserver-drop-schema-legacy.sql ================================================ DROP TABLE IF EXISTS journal; DROP TABLE IF EXISTS snapshot; ================================================ FILE: core/src/main/resources/schema/sqlserver/sqlserver-drop-schema.sql ================================================ DROP TABLE IF EXISTS event_tag; DROP TABLE IF EXISTS event_journal; DROP TABLE IF EXISTS snapshot; ================================================ FILE: core/src/main/resources/schema/sqlserver/sqlserver-event-tag-migration.sql ================================================ -- **************** first step **************** -- add new column ALTER TABLE event_tag ADD persistence_id VARCHAR(255), ADD sequence_number BIGINT; -- **************** second step **************** -- migrate rows UPDATE event_tag SET persistence_id = event_journal.persistence_id, sequence_number = event_journal.sequence_number FROM event_journal WHERE event_tag.event_id = event_journal.ordering; -- drop old FK constraint DECLARE @fkConstraintName NVARCHAR(MAX); DECLARE @dropFKConstraintQuery NVARCHAR(MAX); SELECT @fkConstraintName = CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_NAME = 'event_tag' AND CONSTRAINT_TYPE = 'FOREIGN KEY'; IF @fkConstraintName IS NOT NULL BEGIN SET @dropFKConstraintQuery = 'ALTER TABLE event_tag DROP CONSTRAINT ' + QUOTENAME(@fkConstraintName); EXEC sp_executesql @dropFKConstraintQuery; END -- drop old PK constraint DECLARE @constraintName NVARCHAR(MAX); DECLARE @dropConstraintQuery NVARCHAR(MAX); SELECT @constraintName = CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE TABLE_NAME = 'event_tag' AND CONSTRAINT_TYPE = 'PRIMARY KEY'; IF @constraintName IS NOT NULL BEGIN SET @dropConstraintQuery = 'ALTER TABLE event_tag DROP CONSTRAINT ' + QUOTENAME(@constraintName); EXEC sp_executesql @dropConstraintQuery; END -- create new PK constraint for PK column. ALTER TABLE event_tag ALTER COLUMN persistence_id NVARCHAR(255) NOT NULL ALTER TABLE event_tag ALTER COLUMN sequence_number NUMERIC(10, 0) NOT NULL ALTER TABLE event_tag ADD CONSTRAINT "pk_event_tag" PRIMARY KEY (persistence_id, sequence_number, TAG) -- create new FK constraint for PK column. ALTER TABLE event_tag ADD CONSTRAINT "fk_event_journal_on_pk" FOREIGN KEY (persistence_id, sequence_number) REFERENCES event_journal (persistence_id, sequence_number) ON DELETE CASCADE -- alter the event_id to nullable, so we can skip the InsertAndReturn. ALTER TABLE event_tag ALTER COLUMN event_id BIGINT NULL ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/AkkaSerialization.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc import akka.annotation.InternalApi import akka.persistence.PersistentRepr import akka.persistence.jdbc.state.DurableStateTables import akka.persistence.jdbc.journal.dao.JournalTables.JournalAkkaSerializationRow import akka.serialization.{ Serialization, Serializers } import scala.util.{ Success, Try } /** * INTERNAL API */ @InternalApi object AkkaSerialization { case class AkkaSerialized(serId: Int, serManifest: String, payload: Array[Byte]) def serialize(serialization: Serialization, payload: Any): Try[AkkaSerialized] = { val p2 = payload.asInstanceOf[AnyRef] val serializer = serialization.findSerializerFor(p2) val serManifest = Serializers.manifestFor(serializer, p2) val serialized = serialization.serialize(p2) serialized.map(payload => AkkaSerialized(serializer.identifier, serManifest, payload)) } def fromRow(serialization: Serialization)(row: JournalAkkaSerializationRow): Try[(PersistentRepr, Long)] = { serialization.deserialize(row.eventPayload, row.eventSerId, row.eventSerManifest).flatMap { payload => val metadata = for { mPayload <- row.metaPayload mSerId <- row.metaSerId } yield (mPayload, mSerId) val repr = PersistentRepr( payload, row.sequenceNumber, row.persistenceId, row.adapterManifest, row.deleted, sender = null, writerUuid = row.writer) // This means that failure to deserialize the meta will fail the read, I think this is the correct to do for { withMeta <- metadata match { case None => Success(repr) case Some((payload, id)) => serialization.deserialize(payload, id, row.metaSerManifest.getOrElse("")).map { meta => repr.withMetadata(meta) } } } yield (withMeta.withTimestamp(row.writeTimestamp), row.ordering) } } def fromDurableStateRow(serialization: Serialization)(row: DurableStateTables.DurableStateRow): Try[AnyRef] = { serialization.deserialize(row.statePayload, row.stateSerId, row.stateSerManifest.getOrElse("")) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/JournalRow.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc final case class JournalRow( ordering: Long, deleted: Boolean, persistenceId: String, sequenceNumber: Long, message: Array[Byte], tags: Option[String] = None) ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/cleanup/javadsl/EventSourcedCleanup.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.cleanup.javadsl import java.util.concurrent.CompletionStage import scala.jdk.FutureConverters._ import akka.Done import akka.actor.ClassicActorSystemProvider import akka.annotation.ApiMayChange import akka.persistence.jdbc.cleanup.scaladsl /** * Java API: Tool for deleting events and/or snapshots for a `persistenceId` without using persistent actors. * * When running an operation with `EventSourcedCleanup` that deletes all events for a persistence id, the actor with * that persistence id must not be running! If the actor is restarted it would in that case be recovered to the wrong * state since the stored events have been deleted. Delete events before snapshot can still be used while the actor is * running. * * If `resetSequenceNumber` is `true` then the creating entity with the same `persistenceId` will start from 0. * Otherwise it will continue from the latest highest used sequence number. * * WARNING: reusing the same `persistenceId` after resetting the sequence number should be avoided, since it might be * confusing to reuse the same sequence number for new events. */ @ApiMayChange final class EventSourcedCleanup private (delegate: scaladsl.EventSourcedCleanup) { def this(systemProvider: ClassicActorSystemProvider, journalConfigPath: String, snapshotConfigPath: String) = this(new scaladsl.EventSourcedCleanup(systemProvider, journalConfigPath, snapshotConfigPath)) def this(systemProvider: ClassicActorSystemProvider) = this(systemProvider, "jdbc-journal", "jdbc-snapshot-store") /** * Delete all events related to one single `persistenceId`. Snapshots are not deleted. */ def deleteAllEvents(persistenceId: String, resetSequenceNumber: Boolean): CompletionStage[Done] = delegate.deleteAllEvents(persistenceId, resetSequenceNumber).asJava /** * Delete snapshots related to one single `persistenceId`. Events are not deleted. */ def deleteSnapshot(persistenceId: String): CompletionStage[Done] = delegate.deleteSnapshot(persistenceId).asJava /** * Delete everything related to one single `persistenceId`. All events and snapshots are deleted. */ def deleteAll(persistenceId: String, resetSequenceNumber: Boolean): CompletionStage[Done] = delegate.deleteAll(persistenceId, resetSequenceNumber).asJava } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/cleanup/scaladsl/EventSourcedCleanup.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.cleanup.scaladsl import scala.concurrent.{ ExecutionContext, Future } import akka.Done import akka.actor.{ ActorSystem, ClassicActorSystemProvider } import akka.annotation.ApiMayChange import akka.persistence.jdbc.config.{ JournalConfig, SnapshotConfig } import akka.persistence.jdbc.db.SlickExtension import akka.persistence.jdbc.journal.dao.JournalDaoInstantiation import akka.persistence.jdbc.snapshot.dao.SnapshotDaoInstantiation import akka.stream.{ Materializer, SystemMaterializer } /** * Scala API: Tool for deleting events and/or snapshots for a `persistenceId` without using persistent actors. * * When running an operation with `EventSourcedCleanup` that deletes all events for a persistence id, the actor with * that persistence id must not be running! If the actor is restarted it would in that case be recovered to the wrong * state since the stored events have been deleted. Delete events before snapshot can still be used while the actor is * running. * * If `resetSequenceNumber` is `true` then the creating entity with the same `persistenceId` will start from 0. * Otherwise it will continue from the latest highest used sequence number. * * WARNING: reusing the same `persistenceId` after resetting the sequence number should be avoided, since it might be * confusing to reuse the same sequence number for new events. */ @ApiMayChange final class EventSourcedCleanup( systemProvider: ClassicActorSystemProvider, journalConfigPath: String, snapshotConfigPath: String) { def this(systemProvider: ClassicActorSystemProvider) = this(systemProvider, "jdbc-journal", "jdbc-snapshot-store") private implicit val system: ActorSystem = systemProvider.classicSystem private implicit val executionContext: ExecutionContext = system.dispatchers.defaultGlobalDispatcher private implicit val mat: Materializer = SystemMaterializer(system).materializer private val slick = SlickExtension(system) private val journalConfig = system.settings.config.getConfig(journalConfigPath) private val journalDao = JournalDaoInstantiation.journalDao(new JournalConfig(journalConfig), slick.database(journalConfig)) private val snapshotConfig = system.settings.config.getConfig(snapshotConfigPath) private val snapshotDao = SnapshotDaoInstantiation.snapshotDao(new SnapshotConfig(snapshotConfig), slick.database(snapshotConfig)) /** * Delete all events related to one single `persistenceId`. Snapshots are not deleted. */ def deleteAllEvents(persistenceId: String, resetSequenceNumber: Boolean): Future[Done] = { journalDao.deleteEventsTo(persistenceId, toSequenceNr = Long.MaxValue, resetSequenceNumber).map(_ => Done) } /** * Delete snapshots related to one single `persistenceId`. Events are not deleted. */ def deleteSnapshot(persistenceId: String): Future[Done] = { snapshotDao.deleteUpToMaxSequenceNr(persistenceId, Long.MaxValue).map(_ => Done) } /** * Delete everything related to one single `persistenceId`. All events and snapshots are deleted. */ def deleteAll(persistenceId: String, resetSequenceNumber: Boolean): Future[Done] = { for { _ <- deleteAllEvents(persistenceId, resetSequenceNumber) _ <- deleteSnapshot(persistenceId) } yield Done } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/config/AkkaPersistenceConfig.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.config import akka.persistence.jdbc.util.ConfigOps._ import com.typesafe.config.Config import scala.concurrent.duration._ object ConfigKeys { val useSharedDb = "use-shared-db" } class SlickConfiguration(config: Config) { val jndiName: Option[String] = config.asStringOption("jndiName") val jndiDbName: Option[String] = config.asStringOption("jndiDbName") override def toString: String = s"SlickConfiguration($jndiName,$jndiDbName)" } class LegacyJournalTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.legacy_journal.columnNames") val ordering: String = cfg.getString("ordering") val deleted: String = cfg.getString("deleted") val persistenceId: String = cfg.getString("persistenceId") val sequenceNumber: String = cfg.getString("sequenceNumber") val created: String = cfg.getString("created") val tags: String = cfg.getString("tags") val message: String = cfg.getString("message") override def toString: String = s"JournalTableColumnNames($persistenceId,$sequenceNumber,$created,$tags,$message)" } class EventJournalTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.event_journal.columnNames") val ordering: String = cfg.getString("ordering") val deleted: String = cfg.getString("deleted") val persistenceId: String = cfg.getString("persistenceId") val sequenceNumber: String = cfg.getString("sequenceNumber") val writer: String = cfg.getString("writer") val writeTimestamp: String = cfg.getString("writeTimestamp") val adapterManifest: String = cfg.getString("adapterManifest") val eventPayload: String = cfg.getString("eventPayload") val eventSerId: String = cfg.getString("eventSerId") val eventSerManifest: String = cfg.getString("eventSerManifest") val metaPayload: String = cfg.getString("metaPayload") val metaSerId: String = cfg.getString("metaSerId") val metaSerManifest: String = cfg.getString("metaSerManifest") } class EventTagTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.event_tag.columnNames") val eventId: String = cfg.getString("eventId") // for compatibility val persistenceId: String = cfg.getString("persistenceId") val sequenceNumber: String = cfg.getString("sequenceNumber") val tag: String = cfg.getString("tag") } class LegacyJournalTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.legacy_journal") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: LegacyJournalTableColumnNames = new LegacyJournalTableColumnNames(config) override def toString: String = s"LegacyJournalTableConfiguration($tableName,$schemaName,$columnNames)" } class EventJournalTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.event_journal") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: EventJournalTableColumnNames = new EventJournalTableColumnNames(config) override def toString: String = s"EventJournalTableConfiguration($tableName,$schemaName,$columnNames)" } class EventTagTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.event_tag") val legacyTagKey: Boolean = cfg.getBoolean("legacy-tag-key") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: EventTagTableColumnNames = new EventTagTableColumnNames(config) } class LegacySnapshotTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.legacy_snapshot.columnNames") val persistenceId: String = cfg.getString("persistenceId") val sequenceNumber: String = cfg.getString("sequenceNumber") val created: String = cfg.getString("created") val snapshot: String = cfg.getString("snapshot") override def toString: String = s"SnapshotTableColumnNames($persistenceId,$sequenceNumber,$created,$snapshot)" } class SnapshotTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.snapshot.columnNames") val persistenceId: String = cfg.getString("persistenceId") val sequenceNumber: String = cfg.getString("sequenceNumber") val created: String = cfg.getString("created") val snapshotPayload: String = cfg.getString("snapshotPayload") val snapshotSerId: String = cfg.getString("snapshotSerId") val snapshotSerManifest: String = cfg.getString("snapshotSerManifest") val metaPayload: String = cfg.getString("metaPayload") val metaSerId: String = cfg.getString("metaSerId") val metaSerManifest: String = cfg.getString("metaSerManifest") } class LegacySnapshotTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.legacy_snapshot") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: LegacySnapshotTableColumnNames = new LegacySnapshotTableColumnNames(config) override def toString: String = s"LegacySnapshotTableConfiguration($tableName,$schemaName,$columnNames)" } class SnapshotTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.snapshot") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: SnapshotTableColumnNames = new SnapshotTableColumnNames(config) override def toString: String = s"SnapshotTableConfiguration($tableName,$schemaName,$columnNames)" } class JournalPluginConfig(config: Config) { val tagSeparator: String = config.getString("tagSeparator") val dao: String = config.getString("dao") override def toString: String = s"JournalPluginConfig($tagSeparator,$dao)" } class BaseDaoConfig(config: Config) { val bufferSize: Int = config.getInt("bufferSize") val batchSize: Int = config.getInt("batchSize") val replayBatchSize: Int = config.getInt("replayBatchSize") val parallelism: Int = config.getInt("parallelism") override def toString: String = s"BaseDaoConfig($bufferSize,$batchSize,$parallelism)" } class ReadJournalPluginConfig(config: Config) { val tagSeparator: String = config.getString("tagSeparator") val dao: String = config.getString("dao") override def toString: String = s"ReadJournalPluginConfig($tagSeparator,$dao)" } class SnapshotPluginConfig(config: Config) { val dao: String = config.getString("dao") override def toString: String = s"SnapshotPluginConfig($dao)" } // aggregations class JournalConfig(config: Config) { val journalTableConfiguration = new LegacyJournalTableConfiguration(config) val eventJournalTableConfiguration = new EventJournalTableConfiguration(config) val eventTagTableConfiguration = new EventTagTableConfiguration(config) val pluginConfig = new JournalPluginConfig(config) val daoConfig = new BaseDaoConfig(config) val useSharedDb: Option[String] = config.asStringOption(ConfigKeys.useSharedDb) override def toString: String = s"JournalConfig($journalTableConfiguration,$pluginConfig,$useSharedDb)" } class SnapshotConfig(config: Config) { val legacySnapshotTableConfiguration = new LegacySnapshotTableConfiguration(config) val snapshotTableConfiguration = new SnapshotTableConfiguration(config) val pluginConfig = new SnapshotPluginConfig(config) val useSharedDb: Option[String] = config.asStringOption(ConfigKeys.useSharedDb) override def toString: String = s"SnapshotConfig($snapshotTableConfiguration,$pluginConfig,$useSharedDb)" } object JournalSequenceRetrievalConfig { def apply(config: Config): JournalSequenceRetrievalConfig = JournalSequenceRetrievalConfig( batchSize = config.getInt("journal-sequence-retrieval.batch-size"), maxTries = config.getInt("journal-sequence-retrieval.max-tries"), queryDelay = config.asFiniteDuration("journal-sequence-retrieval.query-delay"), maxBackoffQueryDelay = config.asFiniteDuration("journal-sequence-retrieval.max-backoff-query-delay"), askTimeout = config.asFiniteDuration("journal-sequence-retrieval.ask-timeout")) } case class JournalSequenceRetrievalConfig( batchSize: Int, maxTries: Int, queryDelay: FiniteDuration, maxBackoffQueryDelay: FiniteDuration, askTimeout: FiniteDuration) class ReadJournalConfig(config: Config) { val journalTableConfiguration = new LegacyJournalTableConfiguration(config) val eventJournalTableConfiguration = new EventJournalTableConfiguration(config) val eventTagTableConfiguration = new EventTagTableConfiguration(config) val journalSequenceRetrievalConfiguration = JournalSequenceRetrievalConfig(config) val pluginConfig = new ReadJournalPluginConfig(config) val refreshInterval: FiniteDuration = config.asFiniteDuration("refresh-interval") val maxBufferSize: Int = config.getInt("max-buffer-size") val eventsByTagBufferSizesPerQuery: Long = config.getLong("events-by-tag-buffer-sizes-per-query") require(eventsByTagBufferSizesPerQuery >= 0, "events-by-tag-buffer-sizes-per-query must not be negative") val addShutdownHook: Boolean = config.getBoolean("add-shutdown-hook") override def toString: String = s"ReadJournalConfig($journalTableConfiguration,$pluginConfig,$refreshInterval,$maxBufferSize,$addShutdownHook)" } class DurableStateTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.durable_state.columnNames") val globalOffset: String = cfg.getString("globalOffset") val persistenceId: String = cfg.getString("persistenceId") val revision: String = cfg.getString("revision") val statePayload: String = cfg.getString("statePayload") val stateSerId: String = cfg.getString("stateSerId") val stateSerManifest: String = cfg.getString("stateSerManifest") val tag: String = cfg.getString("tag") val stateTimestamp: String = cfg.getString("stateTimestamp") } class DurableStateTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.durable_state") val tableName: String = cfg.getString("tableName") val refreshInterval: FiniteDuration = config.asFiniteDuration("refreshInterval") val batchSize: Int = config.getInt("batchSize") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: DurableStateTableColumnNames = new DurableStateTableColumnNames(config) val stateSequenceConfig = DurableStateSequenceRetrievalConfig(config) override def toString: String = s"DurableStateTableConfiguration($tableName,$schemaName,$columnNames)" } object DurableStateSequenceRetrievalConfig { def apply(config: Config): DurableStateSequenceRetrievalConfig = DurableStateSequenceRetrievalConfig( batchSize = config.getInt("durable-state-sequence-retrieval.batch-size"), maxTries = config.getInt("durable-state-sequence-retrieval.max-tries"), queryDelay = config.asFiniteDuration("durable-state-sequence-retrieval.query-delay"), maxBackoffQueryDelay = config.asFiniteDuration("durable-state-sequence-retrieval.max-backoff-query-delay"), askTimeout = config.asFiniteDuration("durable-state-sequence-retrieval.ask-timeout"), revisionCacheCapacity = config.getInt("durable-state-sequence-retrieval.revision-cache-capacity")) } case class DurableStateSequenceRetrievalConfig( batchSize: Int, maxTries: Int, queryDelay: FiniteDuration, maxBackoffQueryDelay: FiniteDuration, askTimeout: FiniteDuration, revisionCacheCapacity: Int) ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/db/SlickDatabase.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.db import akka.actor.ActorSystem import akka.annotation.InternalApi import javax.naming.InitialContext import akka.persistence.jdbc.config.SlickConfiguration import com.typesafe.config.Config import slick.basic.DatabaseConfig import slick.jdbc.JdbcProfile import slick.jdbc.JdbcBackend._ /** * INTERNAL API */ @deprecated(message = "Internal API, will be removed in 4.0.0", since = "3.4.0") object SlickDriver { /** * INTERNAL API */ @deprecated(message = "Internal API, will be removed in 4.0.0", since = "3.4.0") def forDriverName(config: Config): JdbcProfile = SlickDatabase.profile(config, "slick") } /** * INTERNAL API */ object SlickDatabase { /** * INTERNAL API */ @deprecated(message = "Internal API, will be removed in 4.0.0", since = "3.4.0") def forConfig(config: Config, slickConfiguration: SlickConfiguration): Database = { database(config, slickConfiguration, "slick.db") } /** * INTERNAL API */ private[jdbc] def profile(config: Config, path: String): JdbcProfile = DatabaseConfig.forConfig[JdbcProfile](path, config).profile /** * INTERNAL API */ private[jdbc] def database(config: Config, slickConfiguration: SlickConfiguration, path: String): Database = { slickConfiguration.jndiName .map(Database.forName(_, None)) .orElse { slickConfiguration.jndiDbName.map(new InitialContext().lookup(_).asInstanceOf[Database]) } .getOrElse(Database.forConfig(path, config)) } /** * INTERNAL API */ private[jdbc] def initializeEagerly( config: Config, slickConfiguration: SlickConfiguration, path: String): SlickDatabase = { val dbPath = if (path.isEmpty) "db" else s"$path.db" EagerSlickDatabase(database(config, slickConfiguration, dbPath), profile(config, path)) } } trait SlickDatabase { def database: Database def profile: JdbcProfile /** * If true, the requesting side usually a (read/write/snapshot journal) * should shutdown the database when it closes. If false, it should leave * the database connection pool open, since it might still be used elsewhere. */ def allowShutdown: Boolean } @InternalApi case class EagerSlickDatabase(database: Database, profile: JdbcProfile) extends SlickDatabase { override def allowShutdown: Boolean = true } /** * A LazySlickDatabase lazily initializes a database, it also manages the shutdown of the database * @param config The configuration used to create the database */ @InternalApi class LazySlickDatabase(config: Config, system: ActorSystem) extends SlickDatabase { val profile: JdbcProfile = SlickDatabase.profile(config, path = "") lazy val database: Database = { val db = SlickDatabase.database(config, new SlickConfiguration(config), path = "db") system.registerOnTermination { db.close() } db } /** This database shutdown is managed by the db holder, so users of this db do not need to bother shutting it down */ override def allowShutdown: Boolean = false } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/db/SlickExtension.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.db import akka.actor.{ ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider } import akka.persistence.jdbc.config.{ ConfigKeys, SlickConfiguration } import akka.persistence.jdbc.util.ConfigOps._ import com.typesafe.config.{ Config, ConfigObject } import scala.jdk.CollectionConverters._ import scala.util.{ Failure, Success } object SlickExtension extends ExtensionId[SlickExtensionImpl] with ExtensionIdProvider { override def lookup: SlickExtension.type = SlickExtension override def createExtension(system: ExtendedActorSystem) = new SlickExtensionImpl(system) } class SlickExtensionImpl(system: ExtendedActorSystem) extends Extension { private val dbProvider: SlickDatabaseProvider = { val fqcn = system.settings.config.getString("akka-persistence-jdbc.database-provider-fqcn") val args = List(classOf[ActorSystem] -> system) system.dynamicAccess.createInstanceFor[SlickDatabaseProvider](fqcn, args) match { case Success(result) => result case Failure(t) => throw new RuntimeException("Failed to create SlickDatabaseProvider", t) } } def database(config: Config): SlickDatabase = dbProvider.database(config) } /** * User overridable database provider. * Since this provider is called from an akka extension it must be thread safe! * * A SlickDatabaseProvider is loaded using reflection, * The instance is created using the following: * - The fully qualified class name as configured in `jdbc-journal.database-provider-fqcn`. * - The constructor with one argument of type [[akka.actor.ActorSystem]] is used to create the instance. * Therefore the class must have such a constructor. */ trait SlickDatabaseProvider { /** * Create or retrieve the database * @param config The configuration which may be used to create the database. If the database is shared * then the SlickDatabaseProvider implementation may choose to ignore this parameter. */ def database(config: Config): SlickDatabase } class DefaultSlickDatabaseProvider(system: ActorSystem) extends SlickDatabaseProvider { val sharedDatabases: Map[String, LazySlickDatabase] = system.settings.config .getObject("akka-persistence-jdbc.shared-databases") .asScala .flatMap { case (key, confObj: ConfigObject) => val conf = confObj.toConfig if (conf.hasPath("profile")) { // Only create the LazySlickDatabase if a profile has actually been configured, this ensures that the example in the reference conf is ignored List(key -> new LazySlickDatabase(conf, system)) } else Nil case (key, notAnObject) => throw new RuntimeException( s"""Expected "akka-persistence-jdbc.shared-databases.$key" to be a config ConfigObject, but got ${notAnObject .valueType()} (${notAnObject.getClass})""") } .toMap private def getSharedDbOrThrow(sharedDbName: String): LazySlickDatabase = sharedDatabases.getOrElse( sharedDbName, throw new RuntimeException( s"No shared database is configured under akka-persistence-jdbc.shared-databases.$sharedDbName")) def database(config: Config): SlickDatabase = { config.asStringOption(ConfigKeys.useSharedDb) match { case None => SlickDatabase.initializeEagerly(config, new SlickConfiguration(config.getConfig("slick")), "slick") case Some(sharedDbName) => getSharedDbOrThrow(sharedDbName) } } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/JdbcAsyncWriteJournal.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal import java.util.{ HashMap => JHMap, Map => JMap } import akka.Done import akka.actor.ActorSystem import akka.persistence.jdbc.config.JournalConfig import akka.persistence.jdbc.journal.JdbcAsyncWriteJournal.{ InPlaceUpdateEvent, WriteFinished } import akka.persistence.jdbc.journal.dao.{ JournalDao, JournalDaoInstantiation, JournalDaoWithUpdates } import akka.persistence.jdbc.db.{ SlickDatabase, SlickExtension } import akka.persistence.journal.AsyncWriteJournal import akka.persistence.{ AtomicWrite, PersistentRepr } import akka.stream.{ Materializer, SystemMaterializer } import com.typesafe.config.Config import slick.jdbc.JdbcBackend._ import scala.collection.immutable._ import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success, Try } import akka.pattern.pipe import akka.persistence.jdbc.util.PluginVersionChecker object JdbcAsyncWriteJournal { private case class WriteFinished(pid: String, f: Future[_]) /** * Extra Plugin API: May be used to issue in-place updates for events. * To be used only for data migrations such as "encrypt all events" and similar operations. * * The write payload may be wrapped in a [[akka.persistence.journal.Tagged]], * in which case the new tags will overwrite the existing tags of the event. */ final case class InPlaceUpdateEvent(persistenceId: String, seqNr: Long, write: AnyRef) } class JdbcAsyncWriteJournal(config: Config) extends AsyncWriteJournal { implicit val ec: ExecutionContext = context.dispatcher implicit val system: ActorSystem = context.system implicit val mat: Materializer = SystemMaterializer(system).materializer val journalConfig = new JournalConfig(config) PluginVersionChecker.check() val slickDb: SlickDatabase = SlickExtension(system).database(config) def db: Database = slickDb.database val journalDao: JournalDao = JournalDaoInstantiation.journalDao(journalConfig, slickDb) // only accessed if we need to perform Updates -- which is very rarely def journalDaoWithUpdates: JournalDaoWithUpdates = journalDao match { case upgraded: JournalDaoWithUpdates => upgraded case _ => throw new IllegalStateException(s"The ${journalDao.getClass} does NOT implement [JournalDaoWithUpdates], " + s"which is required to perform updates of events! Please configure a valid update capable DAO (e.g. the default [ByteArrayJournalDao].") } // readHighestSequence must be performed after pending write for a persistenceId // when the persistent actor is restarted. private val writeInProgress: JMap[String, Future[_]] = new JHMap override def asyncWriteMessages(messages: Seq[AtomicWrite]): Future[Seq[Try[Unit]]] = { // add timestamp to all payloads in all AtomicWrite messages val now = System.currentTimeMillis() val timedMessages = messages.map { atomWrt => atomWrt.copy(payload = atomWrt.payload.map(pr => pr.withTimestamp(now))) } val future = journalDao.asyncWriteMessages(timedMessages) val persistenceId = timedMessages.head.persistenceId writeInProgress.put(persistenceId, future) future.onComplete(_ => self ! WriteFinished(persistenceId, future)) future } override def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = journalDao.delete(persistenceId, toSequenceNr) override def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = { def fetchHighestSeqNr() = journalDao.highestSequenceNr(persistenceId, fromSequenceNr) writeInProgress.get(persistenceId) match { case null => fetchHighestSeqNr() case f: Future[Any @unchecked] => // we must fetch the highest sequence number after the previous write has completed // If the previous write failed then we can ignore this f.recover { case _ => () }.flatMap(_ => fetchHighestSeqNr()) } } private def asyncUpdateEvent(persistenceId: String, sequenceNr: Long, message: AnyRef): Future[Done] = { journalDaoWithUpdates.update(persistenceId, sequenceNr, message) } override def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( recoveryCallback: (PersistentRepr) => Unit): Future[Unit] = journalDao .messagesWithBatch(persistenceId, fromSequenceNr, toSequenceNr, journalConfig.daoConfig.replayBatchSize, None) .take(max) .runForeach { case Success((repr, _)) => recoveryCallback(repr) case Failure(ex) => throw ex } .map(_ => ()) override def postStop(): Unit = { if (slickDb.allowShutdown) { // Since a (new) db is created when this actor (re)starts, we must close it when the actor stops db.close() } super.postStop() } override def receivePluginInternal: Receive = { case WriteFinished(persistenceId, future) => writeInProgress.remove(persistenceId, future) case InPlaceUpdateEvent(pid, seq, write) => asyncUpdateEvent(pid, seq, write).pipeTo(sender()) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/BaseDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import akka.persistence.jdbc.config.BaseDaoConfig import akka.stream.scaladsl.{ Keep, Sink, Source, SourceQueueWithComplete } import akka.stream.{ Materializer, OverflowStrategy, QueueOfferResult } import scala.collection.immutable.{ Seq, Vector } import scala.concurrent.{ ExecutionContext, Future, Promise } // Shared with the legacy DAO abstract class BaseDao[T] { implicit val mat: Materializer implicit val ec: ExecutionContext def baseDaoConfig: BaseDaoConfig val writeQueue: SourceQueueWithComplete[(Promise[Unit], Seq[T])] = Source .queue[(Promise[Unit], Seq[T])](baseDaoConfig.bufferSize, OverflowStrategy.dropNew) .batchWeighted[(Seq[Promise[Unit]], Seq[T])](baseDaoConfig.batchSize, _._2.size, tup => Vector(tup._1) -> tup._2) { case ((promises, rows), (newPromise, newRows)) => (promises :+ newPromise) -> (rows ++ newRows) } .mapAsync(baseDaoConfig.parallelism) { case (promises, rows) => writeJournalRows(rows).map(unit => promises.foreach(_.success(unit))).recover { case t => promises.foreach(_.failure(t)) } } .toMat(Sink.ignore)(Keep.left) .run() def writeJournalRows(xs: Seq[T]): Future[Unit] def queueWriteJournalRows(xs: Seq[T]): Future[Unit] = { val promise = Promise[Unit]() writeQueue.offer(promise -> xs).flatMap { case QueueOfferResult.Enqueued => promise.future case QueueOfferResult.Failure(t) => Future.failed(new Exception("Failed to write journal row batch", t)) case QueueOfferResult.Dropped => Future.failed(new Exception( s"Failed to enqueue journal row batch write, the queue buffer was full (${baseDaoConfig.bufferSize} elements) please check the jdbc-journal.bufferSize setting")) case QueueOfferResult.QueueClosed => Future.failed(new Exception("Failed to enqueue journal row batch write, the queue was closed")) } } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/BaseJournalDaoWithReadMessages.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import akka.NotUsed import akka.actor.Scheduler import akka.persistence.PersistentRepr import akka.persistence.jdbc.journal.dao.FlowControl.{ Continue, ContinueDelayed, Stop } import akka.stream.Materializer import akka.stream.scaladsl.{ Sink, Source } import scala.concurrent.{ ExecutionContext, Future } import scala.concurrent.duration.FiniteDuration import scala.util.{ Failure, Success, Try } trait BaseJournalDaoWithReadMessages extends JournalDaoWithReadMessages { implicit val ec: ExecutionContext implicit val mat: Materializer override def messagesWithBatch( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, batchSize: Int, refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[Try[(PersistentRepr, Long)], NotUsed] = { Source .unfoldAsync[(Long, FlowControl), Seq[Try[(PersistentRepr, Long)]]]((Math.max(1, fromSequenceNr), Continue)) { case (from, control) => def retrieveNextBatch(): Future[Option[((Long, FlowControl), Seq[Try[(PersistentRepr, Long)]])]] = { for { xs <- messages(persistenceId, from, toSequenceNr, batchSize).runWith(Sink.seq) } yield { val hasMoreEvents = xs.size == batchSize // Events are ordered by sequence number, therefore the last one is the largest) val lastSeqNrInBatch: Option[Long] = xs.lastOption match { case Some(Success((repr, _))) => Some(repr.sequenceNr) case Some(Failure(e)) => throw e // fail the returned Future case None => None } val hasLastEvent = lastSeqNrInBatch.exists(_ >= toSequenceNr) val nextControl: FlowControl = if (hasLastEvent || from > toSequenceNr) Stop else if (hasMoreEvents) Continue else if (refreshInterval.isEmpty) Stop else ContinueDelayed val nextFrom: Long = lastSeqNrInBatch match { // Continue querying from the last sequence number (the events are ordered) case Some(lastSeqNr) => lastSeqNr + 1 case None => from } Some(((nextFrom, nextControl), xs)) } } control match { case Stop => Future.successful(None) case Continue => retrieveNextBatch() case ContinueDelayed => val (delay, scheduler) = refreshInterval.get akka.pattern.after(delay, scheduler)(retrieveNextBatch()) } } .mapConcat(identity) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/DefaultJournalDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import scala.collection.immutable import scala.collection.immutable.Nil import scala.collection.immutable.Seq import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.Try import akka.NotUsed import akka.persistence.jdbc.AkkaSerialization import akka.persistence.jdbc.config.BaseDaoConfig import akka.persistence.jdbc.config.JournalConfig import akka.persistence.jdbc.journal.dao.JournalTables.JournalAkkaSerializationRow import akka.persistence.journal.Tagged import akka.persistence.AtomicWrite import akka.persistence.PersistentRepr import akka.serialization.Serialization import akka.stream.Materializer import akka.stream.scaladsl.Source import slick.jdbc.JdbcBackend.Database import slick.jdbc.JdbcProfile /** * A [[JournalDao]] that uses Akka serialization to serialize the payload and store * the manifest and serializer id used. */ class DefaultJournalDao( val db: Database, val profile: JdbcProfile, val journalConfig: JournalConfig, serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer) extends BaseDao[(JournalAkkaSerializationRow, Set[String])] with BaseJournalDaoWithReadMessages with JournalDao with H2Compat { import profile.api._ override def baseDaoConfig: BaseDaoConfig = journalConfig.daoConfig override def writeJournalRows(xs: immutable.Seq[(JournalAkkaSerializationRow, Set[String])]): Future[Unit] = { db.run(queries.writeJournalRows(xs).transactionally).map(_ => ())(ExecutionContext.parasitic) } val queries = new JournalQueries(profile, journalConfig.eventJournalTableConfiguration, journalConfig.eventTagTableConfiguration) override def deleteEventsTo(persistenceId: String, toSequenceNr: Long, resetSequenceNumber: Boolean): Future[Unit] = { // note: the passed toSequenceNr will be Long.MaxValue when doing a 'full' journal clean-up // see JournalSpec's test: 'not reset highestSequenceNr after journal cleanup' val actions: DBIOAction[Unit, NoStream, Effect.Write with Effect.Read] = { // If we're resetting the sequence number, no need to determine the highest sequence number. if (resetSequenceNumber) { queries.delete(persistenceId, toSequenceNr).map(_ => ()) } else { highestSequenceNrAction(persistenceId) .flatMap { // are we trying to delete the highest or even higher seqNr ? case highestSeqNr if highestSeqNr <= toSequenceNr => // if so, we delete up to the before last and // mark the last as logically deleted preserving highestSeqNr queries .delete(persistenceId, highestSeqNr - 1) .flatMap(_ => queries.markAsDeleted(persistenceId, highestSeqNr)) case _ => // if not, we delete up to the requested seqNr queries.delete(persistenceId, toSequenceNr) } .map(_ => ()) } } db.run(actions.transactionally) } override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = db.run(highestSequenceNrAction(persistenceId)) private def highestSequenceNrAction(persistenceId: String): DBIOAction[Long, NoStream, Effect.Read] = queries.highestSequenceNrForPersistenceId(persistenceId).result.map(_.getOrElse(0)) private def highestMarkedSequenceNr(persistenceId: String) = queries.highestMarkedSequenceNrForPersistenceId(persistenceId).result override def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = { def serializeAtomicWrite(aw: AtomicWrite): Try[Seq[(JournalAkkaSerializationRow, Set[String])]] = { Try(aw.payload.map(serialize)) } def serialize(pr: PersistentRepr): (JournalAkkaSerializationRow, Set[String]) = { val (updatedPr, tags) = pr.payload match { case Tagged(payload, tags) => (pr.withPayload(payload), tags) case _ => (pr, Set.empty[String]) } val serializedPayload = AkkaSerialization.serialize(serialization, updatedPr.payload).get val serializedMetadata = updatedPr.metadata.flatMap(m => AkkaSerialization.serialize(serialization, m).toOption) val row = JournalAkkaSerializationRow( Long.MinValue, updatedPr.deleted, updatedPr.persistenceId, updatedPr.sequenceNr, updatedPr.writerUuid, updatedPr.timestamp, updatedPr.manifest, serializedPayload.payload, serializedPayload.serId, serializedPayload.serManifest, serializedMetadata.map(_.payload), serializedMetadata.map(_.serId), serializedMetadata.map(_.serManifest)) (row, tags) } val serializedTries = messages.map(serializeAtomicWrite) val rowsToWrite: Seq[(JournalAkkaSerializationRow, Set[String])] = for { serializeTry <- serializedTries row <- serializeTry.getOrElse(Seq.empty) } yield row def resultWhenWriteComplete = if (serializedTries.forall(_.isSuccess)) Nil else serializedTries.map(_.map(_ => ())) queueWriteJournalRows(rowsToWrite).map(_ => resultWhenWriteComplete) } override def messages( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = { Source .fromPublisher( db.stream( queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result)) .map(AkkaSerialization.fromRow(serialization)(_)) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/FlowControl.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao private[jdbc] sealed trait FlowControl private[jdbc] object FlowControl { /** Keep querying - used when we are sure that there is more events to fetch */ case object Continue extends FlowControl /** * Keep querying with delay - used when we have consumed all events, * but want to poll for future events */ case object ContinueDelayed extends FlowControl /** Stop querying - used when we reach the desired offset */ case object Stop extends FlowControl } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/H2Compat.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import slick.jdbc.JdbcProfile trait H2Compat { val profile: JdbcProfile private lazy val isH2Driver = profile match { case slick.jdbc.H2Profile => true case _ => false } def correctMaxForH2Driver(max: Long): Long = { if (isH2Driver) { Math.min(max, Int.MaxValue) // H2 only accepts a LIMIT clause as an Integer } else { max } } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import akka.persistence.AtomicWrite import java.time.Instant import scala.collection.immutable.Seq import scala.concurrent.Future import scala.util.Try trait JournalDao extends JournalDaoWithReadMessages { /** * Deletes all persistent messages up to toSequenceNr (inclusive) for the persistenceId */ def delete(persistenceId: String, toSequenceNr: Long): Future[Unit] = deleteEventsTo(persistenceId, toSequenceNr, false) /** * Deletes all persistent events up to toSequenceNr (inclusive) for the persistenceId */ def deleteEventsTo(persistenceId: String, toSequenceNr: Long, resetSequenceNumber: Boolean): Future[Unit] /** * Returns the highest sequence number for the events that are stored for that `persistenceId`. When no events are * found for the `persistenceId`, 0L will be the highest sequence number */ def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] /** * @see [[akka.persistence.journal.AsyncWriteJournal.asyncWriteMessages(messages)]] */ def asyncWriteMessages(messages: Seq[AtomicWrite]): Future[Seq[Try[Unit]]] } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoInstantiation.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import akka.actor.{ ActorSystem, ExtendedActorSystem } import akka.annotation.InternalApi import akka.persistence.jdbc.config.JournalConfig import akka.persistence.jdbc.db.SlickDatabase import akka.serialization.{ Serialization, SerializationExtension } import akka.stream.Materializer import slick.jdbc.JdbcBackend.Database import slick.jdbc.JdbcProfile import scala.concurrent.ExecutionContext import scala.util.{ Failure, Success } @InternalApi private[jdbc] object JournalDaoInstantiation { def journalDao( journalConfig: JournalConfig, slickDb: SlickDatabase)(implicit system: ActorSystem, ec: ExecutionContext, mat: Materializer): JournalDao = { val fqcn = journalConfig.pluginConfig.dao val profile: JdbcProfile = slickDb.profile val args = Seq( (classOf[Database], slickDb.database), (classOf[JdbcProfile], profile), (classOf[JournalConfig], journalConfig), (classOf[Serialization], SerializationExtension(system)), (classOf[ExecutionContext], ec), (classOf[Materializer], mat)) system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[JournalDao](fqcn, args) match { case Success(dao) => dao case Failure(cause) => throw cause } } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoWithReadMessages.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import scala.concurrent.duration.FiniteDuration import scala.util.Try import akka.NotUsed import akka.actor.Scheduler import akka.persistence.PersistentRepr import akka.stream.scaladsl.Source trait JournalDaoWithReadMessages { /** * Returns a Source of PersistentRepr and ordering number for a certain persistenceId. * It includes the events with sequenceNr between `fromSequenceNr` (inclusive) and * `toSequenceNr` (inclusive). */ def messages( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] /** * Returns a Source of PersistentRepr and ordering number for a certain persistenceId. * It includes the events with sequenceNr between `fromSequenceNr` (inclusive) and * `toSequenceNr` (inclusive). */ def messagesWithBatch( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, batchSize: Int, refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[Try[(PersistentRepr, Long)], NotUsed] } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoWithUpdates.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import akka.Done import scala.concurrent.Future /** * A [[JournalDao]] with extended capabilities, such as updating payloads and tags of existing events. * These operations should be used sparingly, for example for migrating data from un-encrypted to encrypted formats */ trait JournalDaoWithUpdates extends JournalDao { /** * Update (!) an existing event with the passed in data. */ def update(persistenceId: String, sequenceNr: Long, payload: AnyRef): Future[Done] } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalQueries.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import akka.persistence.jdbc.config.{ EventJournalTableConfiguration, EventTagTableConfiguration } import akka.persistence.jdbc.journal.dao.JournalTables.{ JournalAkkaSerializationRow, TagRow } import slick.jdbc.JdbcProfile import scala.concurrent.ExecutionContext class JournalQueries( val profile: JdbcProfile, override val journalTableCfg: EventJournalTableConfiguration, override val tagTableCfg: EventTagTableConfiguration) extends JournalTables { import profile.api._ private val JournalTableC = Compiled(JournalTable) private val insertAndReturn = JournalTable.returning(JournalTable.map(_.ordering)) private val TagTableC = Compiled(TagTable) def writeJournalRows(xs: Seq[(JournalAkkaSerializationRow, Set[String])])( implicit ec: ExecutionContext): DBIOAction[Any, NoStream, Effect.Write] = { val sorted = xs.sortBy(event => event._1.sequenceNumber) if (sorted.exists(_._2.nonEmpty)) { // only if there are any tags writeEventsAndTags(sorted) } else { // optimization avoid some work when not using tags val events = sorted.map(_._1) JournalTableC ++= events } } private def writeEventsAndTags(sorted: Seq[(JournalAkkaSerializationRow, Set[String])])( implicit ec: ExecutionContext): DBIOAction[Any, NoStream, Effect.Write] = { val (events, _) = sorted.unzip if (tagTableCfg.legacyTagKey) { for { ids <- insertAndReturn ++= events tagInserts = ids.zip(sorted).flatMap { case (id, (e, tags)) => tags.map(tag => TagRow(Some(id), Some(e.persistenceId), Some(e.sequenceNumber), tag)) } _ <- TagTableC ++= tagInserts } yield () } else { val tagInserts = sorted.map { case (e, tags) => tags.map(t => TagRow(None, Some(e.persistenceId), Some(e.sequenceNumber), t)) } // optimization using batch insert for { _ <- JournalTableC ++= events _ <- TagTableC ++= tagInserts.flatten } yield () } } private def selectAllJournalForPersistenceIdDesc(persistenceId: Rep[String]) = selectAllJournalForPersistenceId(persistenceId).sortBy(_.sequenceNumber.desc) private def selectAllJournalForPersistenceId(persistenceId: Rep[String]) = JournalTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc) def delete(persistenceId: String, toSequenceNr: Long) = { JournalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber <= toSequenceNr).delete } private[akka] def markAsDeleted(persistenceId: String, seqNr: Long) = JournalTable .filter(_.persistenceId === persistenceId) .filter(_.sequenceNumber === seqNr) .filter(_.deleted === false) .map(_.deleted) .update(true) @deprecated(message = "Intended to be internal API", since = "5.4.2") def markJournalMessagesAsDeleted(persistenceId: String, maxSequenceNr: Long) = JournalTable .filter(_.persistenceId === persistenceId) .filter(_.sequenceNumber <= maxSequenceNr) .filter(_.deleted === false) .map(_.deleted) .update(true) private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = selectAllJournalForPersistenceId(persistenceId).take(1).map(_.sequenceNumber).max private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = selectAllJournalForPersistenceId(persistenceId).filter(_.deleted === true).take(1).map(_.sequenceNumber).max val highestSequenceNrForPersistenceId = Compiled(_highestSequenceNrForPersistenceId _) val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _) private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = selectAllJournalForPersistenceIdDesc(persistenceId).filter(_.sequenceNumber <= maxSequenceNr) val selectByPersistenceIdAndMaxSequenceNumber = Compiled(_selectByPersistenceIdAndMaxSequenceNumber _) private def _allPersistenceIdsDistinct: Query[Rep[String], String, Seq] = JournalTable.map(_.persistenceId).distinct val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct) def journalRowByPersistenceIds(persistenceIds: Iterable[String]): Query[Rep[String], String, Seq] = for { query <- JournalTable.map(_.persistenceId) if query.inSetBind(persistenceIds) } yield query private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], toSequenceNr: Rep[Long], max: ConstColumn[Long]) = JournalTable .filter(_.persistenceId === persistenceId) .filter(_.deleted === false) .filter(_.sequenceNumber >= fromSequenceNr) .filter(_.sequenceNumber <= toSequenceNr) .sortBy(_.sequenceNumber.asc) .take(max) val messagesQuery = Compiled(_messagesQuery _) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalTables.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import akka.annotation.InternalApi import akka.persistence.jdbc.config.{ EventJournalTableConfiguration, EventTagTableConfiguration } import akka.persistence.jdbc.journal.dao.JournalTables.{ JournalAkkaSerializationRow, TagRow } /** * INTERNAL API */ @InternalApi object JournalTables { case class JournalAkkaSerializationRow( ordering: Long, deleted: Boolean, persistenceId: String, sequenceNumber: Long, writer: String, writeTimestamp: Long, adapterManifest: String, eventPayload: Array[Byte], eventSerId: Int, eventSerManifest: String, metaPayload: Option[Array[Byte]], metaSerId: Option[Int], metaSerManifest: Option[String]) case class TagRow(eventId: Option[Long], persistenceId: Option[String], sequenceNumber: Option[Long], tag: String) } /** * For the schema added in 5.0.0 * INTERNAL API */ @InternalApi trait JournalTables { val profile: slick.jdbc.JdbcProfile import profile.api._ def journalTableCfg: EventJournalTableConfiguration def tagTableCfg: EventTagTableConfiguration class JournalEvents(_tableTag: Tag) extends Table[JournalAkkaSerializationRow]( _tableTag, _schemaName = journalTableCfg.schemaName, _tableName = journalTableCfg.tableName) { def * = ( ordering, deleted, persistenceId, sequenceNumber, writer, timestamp, adapterManifest, eventPayload, eventSerId, eventSerManifest, metaPayload, metaSerId, metaSerManifest).<>((JournalAkkaSerializationRow.apply _).tupled, JournalAkkaSerializationRow.unapply) val ordering: Rep[Long] = column[Long](journalTableCfg.columnNames.ordering, O.AutoInc) val persistenceId: Rep[String] = column[String](journalTableCfg.columnNames.persistenceId, O.Length(255, varying = true)) val sequenceNumber: Rep[Long] = column[Long](journalTableCfg.columnNames.sequenceNumber) val deleted: Rep[Boolean] = column[Boolean](journalTableCfg.columnNames.deleted, O.Default(false)) val writer: Rep[String] = column[String](journalTableCfg.columnNames.writer) val adapterManifest: Rep[String] = column[String](journalTableCfg.columnNames.adapterManifest) val timestamp: Rep[Long] = column[Long](journalTableCfg.columnNames.writeTimestamp) val eventPayload: Rep[Array[Byte]] = column[Array[Byte]](journalTableCfg.columnNames.eventPayload) val eventSerId: Rep[Int] = column[Int](journalTableCfg.columnNames.eventSerId) val eventSerManifest: Rep[String] = column[String](journalTableCfg.columnNames.eventSerManifest) val metaPayload: Rep[Option[Array[Byte]]] = column[Option[Array[Byte]]](journalTableCfg.columnNames.metaPayload) val metaSerId: Rep[Option[Int]] = column[Option[Int]](journalTableCfg.columnNames.metaSerId) val metaSerManifest: Rep[Option[String]] = column[Option[String]](journalTableCfg.columnNames.metaSerManifest) val pk = primaryKey(s"${tableName}_pk", (persistenceId, sequenceNumber)) val orderingIdx = index(s"${tableName}_ordering_idx", ordering, unique = true) } lazy val JournalTable = new TableQuery(tag => new JournalEvents(tag)) class EventTags(_tableTag: Tag) extends Table[TagRow](_tableTag, tagTableCfg.schemaName, tagTableCfg.tableName) { override def * = (eventId, persistenceId, sequenceNumber, tag).<>((TagRow.apply _).tupled, TagRow.unapply) // allow null value insert. val eventId: Rep[Option[Long]] = column[Option[Long]](tagTableCfg.columnNames.eventId) val persistenceId: Rep[Option[String]] = column[Option[String]](tagTableCfg.columnNames.persistenceId) val sequenceNumber: Rep[Option[Long]] = column[Option[Long]](tagTableCfg.columnNames.sequenceNumber) val tag: Rep[String] = column[String](tagTableCfg.columnNames.tag) val pk = primaryKey(s"${tagTableCfg.tableName}_pk", (persistenceId, sequenceNumber, tag)) val journalEvent = foreignKey(s"fk_${journalTableCfg.tableName}", (persistenceId, sequenceNumber), JournalTable)(e => (Rep.Some(e.persistenceId), Rep.Some(e.sequenceNumber))) } lazy val TagTable = new TableQuery(tag => new EventTags(tag)) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao.legacy import akka.persistence.jdbc.config.{ BaseDaoConfig, JournalConfig } import akka.persistence.jdbc.journal.dao.{ BaseDao, BaseJournalDaoWithReadMessages, H2Compat, JournalDaoWithUpdates } import akka.persistence.jdbc.serialization.FlowPersistentReprSerializer import akka.persistence.{ AtomicWrite, PersistentRepr } import akka.serialization.Serialization import akka.stream.Materializer import akka.stream.scaladsl.Source import akka.{ Done, NotUsed } import org.slf4j.LoggerFactory import slick.jdbc.JdbcBackend.Database import slick.jdbc.JdbcProfile import scala.annotation.nowarn import scala.collection.immutable.{ Nil, Seq } import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success, Try } class ByteArrayJournalDao( val db: Database, val profile: JdbcProfile, val journalConfig: JournalConfig, serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer) extends BaseByteArrayJournalDao { val queries = new JournalQueries(profile, journalConfig.journalTableConfiguration) val serializer: ByteArrayJournalSerializer = new ByteArrayJournalSerializer(serialization, journalConfig.pluginConfig.tagSeparator) } /** * The DefaultJournalDao contains all the knowledge to persist and load serialized journal entries */ trait BaseByteArrayJournalDao extends BaseDao[JournalRow] with JournalDaoWithUpdates with BaseJournalDaoWithReadMessages with H2Compat { val db: Database val profile: JdbcProfile val queries: JournalQueries val journalConfig: JournalConfig override def baseDaoConfig: BaseDaoConfig = journalConfig.daoConfig @nowarn("msg=deprecated") val serializer: FlowPersistentReprSerializer[JournalRow] implicit val ec: ExecutionContext implicit val mat: Materializer import profile.api._ val logger = LoggerFactory.getLogger(this.getClass) def writeJournalRows(xs: Seq[JournalRow]): Future[Unit] = { // Write atomically without auto-commit db.run(queries.writeJournalRows(xs).transactionally).map(_ => ()) } /** * @see [[akka.persistence.journal.AsyncWriteJournal.asyncWriteMessages(messages)]] */ def asyncWriteMessages(messages: Seq[AtomicWrite]): Future[Seq[Try[Unit]]] = { val serializedTries: Seq[Try[Seq[JournalRow]]] = serializer.serialize(messages) // If serialization fails for some AtomicWrites, the other AtomicWrites may still be written val rowsToWrite: Seq[JournalRow] = for { serializeTry <- serializedTries row <- serializeTry.getOrElse(Seq.empty) } yield row def resultWhenWriteComplete = if (serializedTries.forall(_.isSuccess)) Nil else serializedTries.map(_.map(_ => ())) queueWriteJournalRows(rowsToWrite).map(_ => resultWhenWriteComplete) } override def deleteEventsTo( persistenceId: String, maxSequenceNr: Long, resetSequenceNumber: Boolean): Future[Unit] = { val actions: DBIOAction[Unit, NoStream, Effect.Write with Effect.Read] = if (resetSequenceNumber) { queries.delete(persistenceId, maxSequenceNr).map(_ => ()) } else { // We should keep journal record with highest sequence number in order to be compliant // with @see [[akka.persistence.journal.JournalSpec]] for { _ <- queries.markJournalMessagesAsDeleted(persistenceId, maxSequenceNr) highestMarkedSequenceNr <- highestMarkedSequenceNr(persistenceId) _ <- queries.delete(persistenceId, highestMarkedSequenceNr.getOrElse(0L) - 1) } yield () } db.run(actions.transactionally) } def update(persistenceId: String, sequenceNr: Long, payload: AnyRef): Future[Done] = { val write = PersistentRepr(payload, sequenceNr, persistenceId) val serializedRow = serializer.serialize(write) match { case Success(t) => t case Failure(cause) => throw new IllegalArgumentException( s"Failed to serialize ${write.getClass} for update of [$persistenceId] @ [$sequenceNr]", cause) } db.run(queries.update(persistenceId, sequenceNr, serializedRow.message).map(_ => Done)) } private def highestMarkedSequenceNr(persistenceId: String) = queries.highestMarkedSequenceNrForPersistenceId(persistenceId).result override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = for { maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result) } yield maybeHighestSeqNo.getOrElse(0L) override def messages( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = Source .fromPublisher( db.stream( queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result)) .via(serializer.deserializeFlow) .map { case Success((repr, _, ordering)) => Success(repr -> ordering) case Failure(e) => Failure(e) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalSerializer.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc package journal.dao.legacy import akka.persistence.PersistentRepr import akka.persistence.jdbc.serialization.FlowPersistentReprSerializer import akka.serialization.Serialization import scala.annotation.nowarn import scala.collection.immutable._ import scala.util.Try @nowarn("msg=deprecated") class ByteArrayJournalSerializer(serialization: Serialization, separator: String) extends FlowPersistentReprSerializer[JournalRow] { override def serialize(persistentRepr: PersistentRepr, tags: Set[String]): Try[JournalRow] = { serialization .serialize(persistentRepr) .map( JournalRow( Long.MinValue, persistentRepr.deleted, persistentRepr.persistenceId, persistentRepr.sequenceNr, _, encodeTags(tags, separator))) } override def deserialize(journalRow: JournalRow): Try[(PersistentRepr, Set[String], Long)] = { serialization .deserialize(journalRow.message, classOf[PersistentRepr]) .map((_, decodeTags(journalRow.tags, separator), journalRow.ordering)) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/JournalQueries.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc package journal.dao.legacy import akka.persistence.jdbc.config.LegacyJournalTableConfiguration import slick.jdbc.JdbcProfile class JournalQueries(val profile: JdbcProfile, override val journalTableCfg: LegacyJournalTableConfiguration) extends JournalTables { import profile.api._ private val JournalTableC = Compiled(JournalTable) def writeJournalRows(xs: Seq[JournalRow]) = JournalTableC ++= xs.sortBy(_.sequenceNumber) private def selectAllJournalForPersistenceIdDesc(persistenceId: Rep[String]) = selectAllJournalForPersistenceId(persistenceId).sortBy(_.sequenceNumber.desc) private def selectAllJournalForPersistenceId(persistenceId: Rep[String]) = JournalTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc) def delete(persistenceId: String, toSequenceNr: Long) = { JournalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber <= toSequenceNr).delete } /** * Updates (!) a payload stored in a specific events row. * Intended to be used sparingly, e.g. moving all events to their encrypted counterparts. */ def update(persistenceId: String, seqNr: Long, replacement: Array[Byte]) = { val baseQuery = JournalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber === seqNr) baseQuery.map(_.message).update(replacement) } def markJournalMessagesAsDeleted(persistenceId: String, maxSequenceNr: Long) = JournalTable .filter(_.persistenceId === persistenceId) .filter(_.sequenceNumber <= maxSequenceNr) .filter(_.deleted === false) .map(_.deleted) .update(true) private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = selectAllJournalForPersistenceId(persistenceId).take(1).map(_.sequenceNumber).max private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] = selectAllJournalForPersistenceId(persistenceId).filter(_.deleted === true).take(1).map(_.sequenceNumber).max val highestSequenceNrForPersistenceId = Compiled(_highestSequenceNrForPersistenceId _) val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _) private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = selectAllJournalForPersistenceIdDesc(persistenceId).filter(_.sequenceNumber <= maxSequenceNr) val selectByPersistenceIdAndMaxSequenceNumber = Compiled(_selectByPersistenceIdAndMaxSequenceNumber _) private def _allPersistenceIdsDistinct: Query[Rep[String], String, Seq] = JournalTable.map(_.persistenceId).distinct val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct) def journalRowByPersistenceIds(persistenceIds: Iterable[String]): Query[Rep[String], String, Seq] = for { query <- JournalTable.map(_.persistenceId) if query.inSetBind(persistenceIds) } yield query private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], toSequenceNr: Rep[Long], max: ConstColumn[Long]) = JournalTable .filter(_.persistenceId === persistenceId) .filter(_.deleted === false) .filter(_.sequenceNumber >= fromSequenceNr) .filter(_.sequenceNumber <= toSequenceNr) .sortBy(_.sequenceNumber.asc) .take(max) val messagesQuery = Compiled(_messagesQuery _) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/JournalTables.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao.legacy import akka.persistence.jdbc.config.LegacyJournalTableConfiguration trait JournalTables { val profile: slick.jdbc.JdbcProfile import profile.api._ def journalTableCfg: LegacyJournalTableConfiguration class Journal(_tableTag: Tag) extends Table[JournalRow]( _tableTag, _schemaName = journalTableCfg.schemaName, _tableName = journalTableCfg.tableName) { def * = (ordering, deleted, persistenceId, sequenceNumber, message, tags) .<>((JournalRow.apply _).tupled, JournalRow.unapply) val ordering: Rep[Long] = column[Long](journalTableCfg.columnNames.ordering, O.AutoInc) val persistenceId: Rep[String] = column[String](journalTableCfg.columnNames.persistenceId, O.Length(255, varying = true)) val sequenceNumber: Rep[Long] = column[Long](journalTableCfg.columnNames.sequenceNumber) val deleted: Rep[Boolean] = column[Boolean](journalTableCfg.columnNames.deleted, O.Default(false)) val tags: Rep[Option[String]] = column[Option[String]](journalTableCfg.columnNames.tags, O.Length(255, varying = true)) val message: Rep[Array[Byte]] = column[Array[Byte]](journalTableCfg.columnNames.message) val pk = primaryKey(s"${tableName}_pk", (persistenceId, sequenceNumber)) val orderingIdx = index(s"${tableName}_ordering_idx", ordering, unique = true) } lazy val JournalTable = new TableQuery(tag => new Journal(tag)) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/package.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao package object legacy { final case class JournalRow( ordering: Long, deleted: Boolean, persistenceId: String, sequenceNumber: Long, message: Array[Byte], tags: Option[String] = None) def encodeTags(tags: Set[String], separator: String): Option[String] = if (tags.isEmpty) None else Option(tags.mkString(separator)) def decodeTags(tags: Option[String], separator: String): Set[String] = tags.map(_.split(separator).toSet).getOrElse(Set.empty[String]) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/query/JdbcReadJournalProvider.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.actor.ExtendedActorSystem import akka.persistence.query.ReadJournalProvider import com.typesafe.config.Config class JdbcReadJournalProvider(system: ExtendedActorSystem, config: Config, configPath: String) extends ReadJournalProvider { override def scaladslReadJournal(): scaladsl.JdbcReadJournal = new scaladsl.JdbcReadJournal(config, configPath)(system) override def javadslReadJournal(): javadsl.JdbcReadJournal = new javadsl.JdbcReadJournal(scaladslReadJournal()) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/query/JournalSequenceActor.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc package query import akka.actor.{ Actor, ActorLogging, Props, Status, Timers } import akka.pattern.pipe import akka.persistence.jdbc.config.JournalSequenceRetrievalConfig import akka.persistence.jdbc.query.dao.ReadJournalDao import akka.stream.Materializer import akka.stream.scaladsl.Sink import scala.collection.immutable.NumericRange import scala.concurrent.duration.FiniteDuration object JournalSequenceActor { def props(readJournalDao: ReadJournalDao, config: JournalSequenceRetrievalConfig)( implicit materializer: Materializer): Props = Props(new JournalSequenceActor(readJournalDao, config)) private case object QueryOrderingIds private case class NewOrderingIds(originalOffset: Long, elements: Seq[OrderingId]) private case class ScheduleAssumeMaxOrderingId(max: OrderingId) private case class AssumeMaxOrderingId(max: OrderingId) case object GetMaxOrderingId case class MaxOrderingId(maxOrdering: OrderingId) private case object QueryOrderingIdsTimerKey private case object AssumeMaxOrderingIdTimerKey private type OrderingId = Long /** * Efficient representation of missing elements using NumericRanges. * It can be seen as a collection of OrderingIds */ private case class MissingElements(elements: Seq[NumericRange[OrderingId]]) { def addRange(from: OrderingId, until: OrderingId): MissingElements = { val newRange = from.until(until) MissingElements(elements :+ newRange) } def contains(id: OrderingId): Boolean = elements.exists(_.containsTyped(id)) def isEmpty: Boolean = elements.forall(_.isEmpty) } private object MissingElements { def empty: MissingElements = MissingElements(Vector.empty) } } /** * To support the EventsByTag query, this actor keeps track of which rows are visible in the database. * This is required to guarantee the EventByTag does not skip any rows in case rows with a higher (ordering) id are * visible in the database before rows with a lower (ordering) id. */ class JournalSequenceActor(readJournalDao: ReadJournalDao, config: JournalSequenceRetrievalConfig)( implicit materializer: Materializer) extends Actor with ActorLogging with Timers { import JournalSequenceActor._ import context.dispatcher import config.{ batchSize, maxBackoffQueryDelay, maxTries, queryDelay } override def receive: Receive = receive(0L, Map.empty, 0) override def preStart(): Unit = { self ! QueryOrderingIds readJournalDao.maxJournalSequence().mapTo[Long].onComplete { case scala.util.Success(maxInDatabase) => self ! ScheduleAssumeMaxOrderingId(maxInDatabase) case scala.util.Failure(t) => log.info("Failed to recover fast, using event-by-event recovery instead. Cause: {}", t) } } /** * @param currentMaxOrdering The highest ordering value for which it is known that no missing elements exist * @param missingByCounter A map with missing orderingIds. The key of the map is the count at which the missing elements * can be assumed to be "skipped ids" (they are no longer assumed missing). * @param moduloCounter A counter which is incremented every time a new query have been executed, modulo `maxTries` * @param previousDelay The last used delay (may change in case failures occur) */ private def receive( currentMaxOrdering: OrderingId, missingByCounter: Map[Int, MissingElements], moduloCounter: Int, previousDelay: FiniteDuration = queryDelay): Receive = { case ScheduleAssumeMaxOrderingId(max) => // All elements smaller than max can be assumed missing after this delay val delay = queryDelay * maxTries timers.startSingleTimer(key = AssumeMaxOrderingIdTimerKey, AssumeMaxOrderingId(max), delay) case AssumeMaxOrderingId(max) => if (currentMaxOrdering < max) { context.become(receive(max, missingByCounter, moduloCounter, previousDelay)) } case GetMaxOrderingId => sender() ! MaxOrderingId(currentMaxOrdering) case QueryOrderingIds => readJournalDao .journalSequence(currentMaxOrdering, batchSize) .runWith(Sink.seq) .map(result => NewOrderingIds(currentMaxOrdering, result)) .pipeTo(self) case NewOrderingIds(originalOffset, _) if originalOffset < currentMaxOrdering => // search was done using an offset that became obsolete in the meantime // therefore we start a new query self ! QueryOrderingIds case NewOrderingIds(_, elements) => findGaps(elements, currentMaxOrdering, missingByCounter, moduloCounter) case Status.Failure(t) => val newDelay = maxBackoffQueryDelay.min(previousDelay * 2) if (newDelay == maxBackoffQueryDelay) { log.warning("Failed to query max ordering id because of {}, retrying in {}", t, newDelay) } scheduleQuery(newDelay) context.become(receive(currentMaxOrdering, missingByCounter, moduloCounter, newDelay)) } /** * This method that implements the "find gaps" algo. It's the meat and main purpose of this actor. */ private def findGaps( elements: Seq[OrderingId], currentMaxOrdering: OrderingId, missingByCounter: Map[Int, MissingElements], moduloCounter: Int): Unit = { // list of elements that will be considered as genuine gaps. // `givenUp` is either empty or is was filled on a previous iteration val givenUp = missingByCounter.getOrElse(moduloCounter, MissingElements.empty) val (nextMax, _, missingElems) = // using the ordering elements that were fetched, we verify if there are any gaps elements.foldLeft[(OrderingId, OrderingId, MissingElements)]( (currentMaxOrdering, currentMaxOrdering, MissingElements.empty)) { case ((currentMax, previousElement, missing), currentElement) => // we must decide if we move the cursor forward val newMax = { val maxCandidate = currentMax + 1 if ((currentElement - maxCandidate) < Int.MaxValue) { if ((currentMax + 1).until(currentElement).forall(givenUp.contains)) { // we move the cursor forward when: // 1) they have been detected as missing on previous iteration, it's time now to give up // 2) current + 1 == currentElement (meaning no gap). Note that `forall` on an empty range always returns true currentElement } else currentMax } else { // we can't iterate over this... assume that forall failed // the AssumeMaxOrderingId will advance the currentMaxOrdering currentMax } } // we accumulate in newMissing the gaps we detect on each iteration val newMissing = if (previousElement + 1 == currentElement || newMax == currentElement) missing else missing.addRange(previousElement + 1, currentElement) (newMax, currentElement, newMissing) } val newMissingByCounter = missingByCounter + (moduloCounter -> missingElems) // did we detect gaps in the current batch? val noGapsFound = missingElems.isEmpty // full batch means that we retrieved as much elements as the batchSize // that happens when we are not yet at the end of the stream val isFullBatch = elements.size == batchSize if (noGapsFound && isFullBatch) { // Many elements have been retrieved but none are missing // We can query again immediately, as this allows the actor to rapidly retrieve the real max ordering self ! QueryOrderingIds context.become(receive(nextMax, newMissingByCounter, moduloCounter)) } else { // either we detected gaps or we reached the end of stream (batch not full) // in this case we want to keep querying but not immediately scheduleQuery(queryDelay) context.become(receive(nextMax, newMissingByCounter, (moduloCounter + 1) % maxTries)) } } def scheduleQuery(delay: FiniteDuration): Unit = { timers.startSingleTimer(key = QueryOrderingIdsTimerKey, QueryOrderingIds, delay) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/query/dao/DefaultReadJournalDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query.dao import akka.NotUsed import akka.persistence.PersistentRepr import akka.persistence.jdbc.AkkaSerialization import akka.persistence.jdbc.config.ReadJournalConfig import akka.persistence.jdbc.journal.dao.{ BaseJournalDaoWithReadMessages, H2Compat } import akka.serialization.Serialization import akka.stream.Materializer import akka.stream.scaladsl.Source import slick.jdbc.JdbcBackend.Database import slick.jdbc.JdbcProfile import scala.concurrent.{ ExecutionContext, Future } import scala.util.Try class DefaultReadJournalDao( val db: Database, val profile: JdbcProfile, val readJournalConfig: ReadJournalConfig, serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer) extends ReadJournalDao with BaseJournalDaoWithReadMessages with H2Compat { import profile.api._ val queries = new ReadJournalQueries(profile, readJournalConfig) override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] = Source.fromPublisher(db.stream(queries.allPersistenceIdsDistinct(correctMaxForH2Driver(max)).result)) override def eventsByTag( tag: String, offset: Long, maxOffset: Long, max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = { // This doesn't populate the tags. AFAICT they aren't used Source .fromPublisher(db.stream(queries.eventsByTag((tag, offset, maxOffset, correctMaxForH2Driver(max))).result)) .map(row => AkkaSerialization.fromRow(serialization)(row).map { case (repr, ordering) => (repr, Set.empty, ordering) }) } override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] = Source.fromPublisher(db.stream(queries.journalSequenceQuery((offset, limit)).result)) override def maxJournalSequence(): Future[Long] = db.run(queries.maxJournalSequenceQuery.result) override def messages( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = Source .fromPublisher( db.stream( queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result)) .map(AkkaSerialization.fromRow(serialization)(_)) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query.dao import akka.NotUsed import akka.persistence.PersistentRepr import akka.persistence.jdbc.journal.dao.JournalDaoWithReadMessages import akka.stream.scaladsl.Source import scala.collection.immutable.Set import scala.concurrent.Future import scala.util.Try trait ReadJournalDao extends JournalDaoWithReadMessages { /** * Returns distinct stream of persistenceIds */ def allPersistenceIdsSource(max: Long): Source[String, NotUsed] /** * Returns a Source of deserialized data for certain tag from an offset. The result is sorted by * the global ordering of the events. * Each element with be a try with a PersistentRepr, set of tags, and a Long representing the global ordering of events */ def eventsByTag( tag: String, offset: Long, maxOffset: Long, max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] /** * @param offset Minimum value to retrieve * @param limit Maximum number of values to retrieve * @return A Source of journal event sequence numbers (corresponding to the Ordering column) */ def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] /** * @return The value of the maximum (ordering) id in the journal */ def maxJournalSequence(): Future[Long] } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalQueries.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query.dao import akka.persistence.jdbc.config.{ EventJournalTableConfiguration, EventTagTableConfiguration, ReadJournalConfig } import akka.persistence.jdbc.journal.dao.JournalTables import slick.jdbc.JdbcProfile class ReadJournalQueries(val profile: JdbcProfile, val readJournalConfig: ReadJournalConfig) extends JournalTables { override val journalTableCfg: EventJournalTableConfiguration = readJournalConfig.eventJournalTableConfiguration override def tagTableCfg: EventTagTableConfiguration = readJournalConfig.eventTagTableConfiguration import profile.api._ def journalRowByPersistenceIds(persistenceIds: Iterable[String]) = for { query <- JournalTable.map(_.persistenceId) if query.inSetBind(persistenceIds) } yield query private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = baseTableQuery().map(_.persistenceId).distinct.take(max) private def baseTableQuery() = JournalTable.filter(_.deleted === false) private def baseTableWithTagsQuery() = { if (tagTableCfg.legacyTagKey) { baseTableQuery().join(TagTable).on(_.ordering === _.eventId) } else { baseTableQuery() .join(TagTable) .on((e, t) => e.persistenceId === t.persistenceId && e.sequenceNumber === t.sequenceNumber) } } val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], toSequenceNr: Rep[Long], max: ConstColumn[Long]) = baseTableQuery() .filter(_.persistenceId === persistenceId) .filter(_.sequenceNumber >= fromSequenceNr) .filter(_.sequenceNumber <= toSequenceNr) .filter(!_.deleted) .sortBy(_.sequenceNumber.asc) .take(max) val messagesQuery = Compiled(_messagesQuery _) private def _eventsByTag( tag: Rep[String], offset: ConstColumn[Long], maxOffset: ConstColumn[Long], max: ConstColumn[Long]) = { baseTableWithTagsQuery() .filter(_._2.tag === tag) .sortBy(_._1.ordering.asc) .filter(row => row._1.ordering > offset && row._1.ordering <= maxOffset) .take(max) .map(_._1) } val eventsByTag = Compiled(_eventsByTag _) private def _journalSequenceQuery(from: ConstColumn[Long], limit: ConstColumn[Long]) = JournalTable.filter(_.ordering > from).map(_.ordering).sorted.take(limit) val journalSequenceQuery = Compiled(_journalSequenceQuery _) val maxJournalSequenceQuery = Compiled { JournalTable.map(_.ordering).max.getOrElse(0L) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ByteArrayReadJournalDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query.dao.legacy import akka.NotUsed import akka.persistence.PersistentRepr import akka.persistence.jdbc.config.ReadJournalConfig import akka.persistence.jdbc.journal.dao.{ BaseJournalDaoWithReadMessages, H2Compat } import akka.persistence.jdbc.journal.dao.legacy.{ ByteArrayJournalSerializer, JournalRow } import akka.persistence.jdbc.query.dao.ReadJournalDao import akka.persistence.jdbc.query.dao.legacy.TagFilterFlow.perfectlyMatchTag import akka.persistence.jdbc.serialization.FlowPersistentReprSerializer import akka.serialization.Serialization import akka.stream.Materializer import akka.stream.scaladsl.{ Flow, Source } import slick.jdbc.JdbcBackend._ import slick.jdbc.{ GetResult, JdbcProfile } import scala.annotation.nowarn import scala.collection.immutable._ import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success, Try } trait BaseByteArrayReadJournalDao extends ReadJournalDao with BaseJournalDaoWithReadMessages with H2Compat { def db: Database val profile: JdbcProfile def queries: ReadJournalQueries @nowarn("msg=deprecated") def serializer: FlowPersistentReprSerializer[JournalRow] def readJournalConfig: ReadJournalConfig import profile.api._ override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] = Source.fromPublisher(db.stream(queries.allPersistenceIdsDistinct(correctMaxForH2Driver(max)).result)) override def eventsByTag( tag: String, offset: Long, maxOffset: Long, max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = { val publisher = db.stream(queries.eventsByTag((s"%$tag%", offset, maxOffset, correctMaxForH2Driver(max))).result) // applies workaround for https://github.com/akka/akka-persistence-jdbc/issues/168 Source .fromPublisher(publisher) .via(perfectlyMatchTag(tag, readJournalConfig.pluginConfig.tagSeparator)) .via(serializer.deserializeFlow) } override def messages( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = { Source .fromPublisher( db.stream( queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result)) .via(serializer.deserializeFlow) .map { case Success((repr, _, ordering)) => Success(repr -> ordering) case Failure(e) => Failure(e) } } override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] = Source.fromPublisher(db.stream(queries.journalSequenceQuery((offset, limit)).result)) override def maxJournalSequence(): Future[Long] = { db.run(queries.maxJournalSequenceQuery.result) } } object TagFilterFlow { /* * Returns a Flow that retains every event with tags that perfectly match passed tag. * This is a workaround for bug https://github.com/akka/akka-persistence-jdbc/issues/168 */ private[dao] def perfectlyMatchTag(tag: String, separator: String) = Flow[JournalRow].filter(_.tags.exists(tags => tags.split(separator).contains(tag))) } trait OracleReadJournalDao extends ReadJournalDao { val db: Database val profile: JdbcProfile val readJournalConfig: ReadJournalConfig val queries: ReadJournalQueries @nowarn("msg=deprecated") val serializer: FlowPersistentReprSerializer[JournalRow] import readJournalConfig.journalTableConfiguration._ import columnNames._ val theTableName = schemaName.map(_ + ".").getOrElse("") + s""""$tableName"""" import profile.api._ private def isOracleDriver(profile: JdbcProfile): Boolean = profile match { case slick.jdbc.OracleProfile => true case _ => false } abstract override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] = { if (isOracleDriver(profile)) { val selectStatement = sql"""SELECT DISTINCT "#$persistenceId" FROM #$theTableName WHERE rownum <= $max""".as[String] Source.fromPublisher(db.stream(selectStatement)) } else { super.allPersistenceIdsSource(max) } } implicit val getJournalRow: GetResult[JournalRow] = GetResult(r => JournalRow(r.<<, r.<<, r.<<, r.<<, r.nextBytes(), r.<<)) abstract override def eventsByTag( tag: String, offset: Long, maxOffset: Long, max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = { if (isOracleDriver(profile)) { val theOffset = Math.max(0, offset) val theTag = s"%$tag%" val selectStatement = sql""" SELECT "#$ordering", "#$deleted", "#$persistenceId", "#$sequenceNumber", "#$message", "#$tags" FROM ( SELECT * FROM #$theTableName WHERE "#$tags" LIKE $theTag AND "#$ordering" > $theOffset AND "#$ordering" <= $maxOffset AND "#$deleted" = 0 ORDER BY "#$ordering" ) WHERE rownum <= $max""".as[JournalRow] // applies workaround for https://github.com/akka/akka-persistence-jdbc/issues/168 Source .fromPublisher(db.stream(selectStatement)) .via(perfectlyMatchTag(tag, readJournalConfig.pluginConfig.tagSeparator)) .via(serializer.deserializeFlow) } else { super.eventsByTag(tag, offset, maxOffset, max) } } } class ByteArrayReadJournalDao( val db: Database, val profile: JdbcProfile, val readJournalConfig: ReadJournalConfig, serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer) extends BaseByteArrayReadJournalDao with OracleReadJournalDao { val queries = new ReadJournalQueries(profile, readJournalConfig) val serializer: ByteArrayJournalSerializer = new ByteArrayJournalSerializer(serialization, readJournalConfig.pluginConfig.tagSeparator) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ReadJournalQueries.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query.dao.legacy import akka.persistence.jdbc.config.{ LegacyJournalTableConfiguration, ReadJournalConfig } import akka.persistence.jdbc.journal.dao.legacy.JournalTables import slick.jdbc.JdbcProfile class ReadJournalQueries(val profile: JdbcProfile, val readJournalConfig: ReadJournalConfig) extends JournalTables { override val journalTableCfg: LegacyJournalTableConfiguration = readJournalConfig.journalTableConfiguration import profile.api._ def journalRowByPersistenceIds(persistenceIds: Iterable[String]) = for { query <- JournalTable.map(_.persistenceId) if query.inSetBind(persistenceIds) } yield query private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = baseTableQuery().map(_.persistenceId).distinct.take(max) private def baseTableQuery() = JournalTable.filter(_.deleted === false) val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) private def _messagesQuery( persistenceId: Rep[String], fromSequenceNr: Rep[Long], toSequenceNr: Rep[Long], max: ConstColumn[Long]) = baseTableQuery() .filter(_.persistenceId === persistenceId) .filter(_.sequenceNumber >= fromSequenceNr) .filter(_.sequenceNumber <= toSequenceNr) .sortBy(_.sequenceNumber.asc) .take(max) val messagesQuery = Compiled(_messagesQuery _) private def _eventsByTag( tag: Rep[String], offset: ConstColumn[Long], maxOffset: ConstColumn[Long], max: ConstColumn[Long]) = { baseTableQuery() .filter(_.tags.like(tag)) .sortBy(_.ordering.asc) .filter(row => row.ordering > offset && row.ordering <= maxOffset) .take(max) } val eventsByTag = Compiled(_eventsByTag _) private def _journalSequenceQuery(from: ConstColumn[Long], limit: ConstColumn[Long]) = JournalTable.filter(_.ordering > from).map(_.ordering).sorted.take(limit) val journalSequenceQuery = Compiled(_journalSequenceQuery _) val maxJournalSequenceQuery = Compiled { JournalTable.map(_.ordering).max.getOrElse(0L) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/query/javadsl/JdbcReadJournal.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query.javadsl import akka.NotUsed import akka.persistence.jdbc.query.scaladsl.{ JdbcReadJournal => ScalaJdbcReadJournal } import akka.persistence.query.{ EventEnvelope, Offset } import akka.persistence.query.javadsl._ import akka.stream.javadsl.Source object JdbcReadJournal { final val Identifier = ScalaJdbcReadJournal.Identifier } class JdbcReadJournal(journal: ScalaJdbcReadJournal) extends ReadJournal with CurrentPersistenceIdsQuery with PersistenceIdsQuery with CurrentEventsByPersistenceIdQuery with EventsByPersistenceIdQuery with CurrentEventsByTagQuery with EventsByTagQuery { /** * Same type of query as `persistenceIds` but the event stream * is completed immediately when it reaches the end of the "result set". Events that are * stored after the query is completed are not included in the event stream. */ override def currentPersistenceIds(): Source[String, NotUsed] = journal.currentPersistenceIds().asJava /** * `persistenceIds` is used to retrieve a stream of all `persistenceId`s as strings. * * The stream guarantees that a `persistenceId` is only emitted once and there are no duplicates. * Order is not defined. Multiple executions of the same stream (even bounded) may emit different * sequence of `persistenceId`s. * * The stream is not completed when it reaches the end of the currently known `persistenceId`s, * but it continues to push new `persistenceId`s when new events are persisted. * Corresponding query that is completed when it reaches the end of the currently * known `persistenceId`s is provided by `currentPersistenceIds`. */ override def persistenceIds(): Source[String, NotUsed] = journal.persistenceIds().asJava /** * Same type of query as `eventsByPersistenceId` but the event stream * is completed immediately when it reaches the end of the "result set". Events that are * stored after the query is completed are not included in the event stream. */ override def currentEventsByPersistenceId( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] = journal.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava /** * `eventsByPersistenceId` is used to retrieve a stream of events for a particular persistenceId. * * The `EventEnvelope` contains the event and provides `persistenceId` and `sequenceNr` * for each event. The `sequenceNr` is the sequence number for the persistent actor with the * `persistenceId` that persisted the event. The `persistenceId` + `sequenceNr` is an unique * identifier for the event. * * `fromSequenceNr` and `toSequenceNr` can be specified to limit the set of returned events. * The `fromSequenceNr` and `toSequenceNr` are inclusive. * * The `EventEnvelope` also provides the `offset` that corresponds to the `ordering` column in * the Journal table. The `ordering` is a sequential id number that uniquely identifies the * position of each event, also across different `persistenceId`. The `Offset` type is * `akka.persistence.query.Sequence` with the `ordering` as the offset value. This is the * same `ordering` number as is used in the offset of the `eventsByTag` query. * * The returned event stream is ordered by `sequenceNr`. * * Causality is guaranteed (`sequenceNr`s of events for a particular `persistenceId` are always ordered * in a sequence monotonically increasing by one). Multiple executions of the same bounded stream are * guaranteed to emit exactly the same stream of events. * * The stream is not completed when it reaches the end of the currently stored events, * but it continues to push new events when new events are persisted. * Corresponding query that is completed when it reaches the end of the currently * stored events is provided by `currentEventsByPersistenceId`. */ override def eventsByPersistenceId( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] = journal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava /** * Same type of query as `eventsByTag` but the event stream * is completed immediately when it reaches the end of the "result set". Events that are * stored after the query is completed are not included in the event stream. */ override def currentEventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] = journal.currentEventsByTag(tag, offset).asJava /** * Query events that have a specific tag. * * The consumer can keep track of its current position in the event stream by storing the * `offset` and restart the query from a given `offset` after a crash/restart. * The offset is exclusive, i.e. the event corresponding to the given `offset` parameter is not * included in the stream. * * For akka-persistence-jdbc the `offset` corresponds to the `ordering` column in the Journal table. * The `ordering` is a sequential id number that uniquely identifies the position of each event within * the event stream. The `Offset` type is `akka.persistence.query.Sequence` with the `ordering` as the * offset value. * * The returned event stream is ordered by `offset`. * * The stream is not completed when it reaches the end of the currently stored events, * but it continues to push new events when new events are persisted. * Corresponding query that is completed when it reaches the end of the currently * stored events is provided by [[CurrentEventsByTagQuery#currentEventsByTag]]. */ override def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] = journal.eventsByTag(tag, offset).asJava } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/query/package.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc import akka.persistence.query._ package object query { implicit class OffsetOps(val that: Offset) extends AnyVal { def value = that match { case Sequence(offsetValue) => offsetValue case NoOffset => 0L case _ => throw new IllegalArgumentException( "akka-persistence-jdbc does not support " + that.getClass.getName + " offsets") } } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/query/scaladsl/JdbcReadJournal.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query package scaladsl import akka.NotUsed import akka.actor.ExtendedActorSystem import akka.persistence.jdbc.config.ReadJournalConfig import akka.persistence.jdbc.query.JournalSequenceActor.{ GetMaxOrderingId, MaxOrderingId } import akka.persistence.jdbc.db.SlickExtension import akka.persistence.jdbc.journal.dao.FlowControl import akka.persistence.query.scaladsl._ import akka.persistence.query.{ EventEnvelope, Offset, Sequence } import akka.persistence.{ Persistence, PersistentRepr } import akka.serialization.{ Serialization, SerializationExtension } import akka.stream.scaladsl.{ Sink, Source } import akka.stream.{ Materializer, SystemMaterializer } import akka.util.Timeout import com.typesafe.config.Config import slick.jdbc.JdbcBackend._ import slick.jdbc.JdbcProfile import scala.collection.immutable._ import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success } import akka.actor.Scheduler import akka.persistence.jdbc.query.dao.ReadJournalDao import akka.persistence.jdbc.util.PluginVersionChecker object JdbcReadJournal { final val Identifier = "jdbc-read-journal" } class JdbcReadJournal(config: Config, configPath: String)(implicit val system: ExtendedActorSystem) extends ReadJournal with CurrentPersistenceIdsQuery with PersistenceIdsQuery with CurrentEventsByPersistenceIdQuery with EventsByPersistenceIdQuery with CurrentEventsByTagQuery with EventsByTagQuery { PluginVersionChecker.check() implicit val ec: ExecutionContext = system.dispatcher implicit val mat: Materializer = SystemMaterializer(system).materializer val readJournalConfig = new ReadJournalConfig(config) private val writePluginId = config.getString("write-plugin") // If 'config' is empty, or if the plugin reference is not found, then the write plugin will be resolved from the // ActorSystem configuration. Otherwise, it will be resolved from the provided 'config'. private val eventAdapters = Persistence(system).adaptersFor(writePluginId, config) val readJournalDao: ReadJournalDao = { val slickDb = SlickExtension(system).database(config) val db = slickDb.database if (readJournalConfig.addShutdownHook && slickDb.allowShutdown) { system.registerOnTermination { db.close() } } val fqcn = readJournalConfig.pluginConfig.dao val profile: JdbcProfile = slickDb.profile val args = Seq( (classOf[Database], db), (classOf[JdbcProfile], profile), (classOf[ReadJournalConfig], readJournalConfig), (classOf[Serialization], SerializationExtension(system)), (classOf[ExecutionContext], ec), (classOf[Materializer], mat)) system.dynamicAccess.createInstanceFor[ReadJournalDao](fqcn, args) match { case Success(dao) => dao case Failure(cause) => throw cause } } // Started lazily to prevent the actor for querying the db if no eventsByTag queries are used private[query] lazy val journalSequenceActor = system.systemActorOf( JournalSequenceActor.props(readJournalDao, readJournalConfig.journalSequenceRetrievalConfiguration), s"$configPath.akka-persistence-jdbc-journal-sequence-actor") /** * Same type of query as `persistenceIds` but the event stream * is completed immediately when it reaches the end of the "result set". Events that are * stored after the query is completed are not included in the event stream. */ override def currentPersistenceIds(): Source[String, NotUsed] = readJournalDao.allPersistenceIdsSource(Long.MaxValue) /** * `persistenceIds` is used to retrieve a stream of all `persistenceId`s as strings. * * The stream guarantees that a `persistenceId` is only emitted once and there are no duplicates. * Order is not defined. Multiple executions of the same stream (even bounded) may emit different * sequence of `persistenceId`s. * * The stream is not completed when it reaches the end of the currently known `persistenceId`s, * but it continues to push new `persistenceId`s when new events are persisted. * Corresponding query that is completed when it reaches the end of the currently * known `persistenceId`s is provided by `currentPersistenceIds`. */ override def persistenceIds(): Source[String, NotUsed] = Source .single(0) .concat(Source.tick(readJournalConfig.refreshInterval, readJournalConfig.refreshInterval, 0)) .flatMapConcat(_ => currentPersistenceIds()) .statefulMapConcat[String] { () => var knownIds = Set.empty[String] def next(id: String): Iterable[String] = { val xs = Set(id).diff(knownIds) knownIds += id xs } id => next(id) } private def adaptEvents(repr: PersistentRepr): Seq[PersistentRepr] = { val adapter = eventAdapters.get(repr.payload.getClass) adapter.fromJournal(repr.payload, repr.manifest).events.map(repr.withPayload) } /** * Same type of query as `eventsByPersistenceId` but the event stream * is completed immediately when it reaches the end of the "result set". Events that are * stored after the query is completed are not included in the event stream. */ override def currentEventsByPersistenceId( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] = eventsByPersistenceIdSource(persistenceId, fromSequenceNr, toSequenceNr, None) /** * `eventsByPersistenceId` is used to retrieve a stream of events for a particular persistenceId. * * The `EventEnvelope` contains the event and provides `persistenceId` and `sequenceNr` * for each event. The `sequenceNr` is the sequence number for the persistent actor with the * `persistenceId` that persisted the event. The `persistenceId` + `sequenceNr` is an unique * identifier for the event. * * `fromSequenceNr` and `toSequenceNr` can be specified to limit the set of returned events. * The `fromSequenceNr` and `toSequenceNr` are inclusive. * * The `EventEnvelope` also provides the `offset` that corresponds to the `ordering` column in * the Journal table. The `ordering` is a sequential id number that uniquely identifies the * position of each event, also across different `persistenceId`. The `Offset` type is * `akka.persistence.query.Sequence` with the `ordering` as the offset value. This is the * same `ordering` number as is used in the offset of the `eventsByTag` query. * * The returned event stream is ordered by `sequenceNr`. * * Causality is guaranteed (`sequenceNr`s of events for a particular `persistenceId` are always ordered * in a sequence monotonically increasing by one). Multiple executions of the same bounded stream are * guaranteed to emit exactly the same stream of events. * * The stream is not completed when it reaches the end of the currently stored events, * but it continues to push new events when new events are persisted. * Corresponding query that is completed when it reaches the end of the currently * stored events is provided by `currentEventsByPersistenceId`. */ override def eventsByPersistenceId( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] = eventsByPersistenceIdSource( persistenceId, fromSequenceNr, toSequenceNr, Some(readJournalConfig.refreshInterval -> system.scheduler)) private def eventsByPersistenceIdSource( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[EventEnvelope, NotUsed] = { val batchSize = readJournalConfig.maxBufferSize readJournalDao .messagesWithBatch(persistenceId, fromSequenceNr, toSequenceNr, batchSize, refreshInterval) .mapAsync(1)(reprAndOrdNr => Future.fromTry(reprAndOrdNr)) .mapConcat { case (repr, ordNr) => adaptEvents(repr).map(_ -> ordNr) } .map { case (repr, ordNr) => EventEnvelope(Sequence(ordNr), repr.persistenceId, repr.sequenceNr, repr.payload, repr.timestamp, repr.metadata) } } /** * Same type of query as `eventsByTag` but the event stream * is completed immediately when it reaches the end of the "result set". Events that are * stored after the query is completed are not included in the event stream. */ override def currentEventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] = currentEventsByTag(tag, offset.value) private def currentJournalEventsByTag( tag: String, offset: Long, max: Long, latestOrdering: MaxOrderingId): Source[EventEnvelope, NotUsed] = { if (latestOrdering.maxOrdering < offset) Source.empty else { readJournalDao.eventsByTag(tag, offset, latestOrdering.maxOrdering, max).mapAsync(1)(Future.fromTry).mapConcat { case (repr, _, ordering) => adaptEvents(repr).map(r => EventEnvelope(Sequence(ordering), r.persistenceId, r.sequenceNr, r.payload, r.timestamp, r.metadata)) } } } /** * @param terminateAfterOffset If None, the stream never completes. If a Some, then the stream will complete once a * query has been executed which might return an event with this offset (or a higher offset). * The stream may include offsets higher than the value in terminateAfterOffset, since the last batch * will be returned completely. */ private def eventsByTag( tag: String, offset: Long, terminateAfterOffset: Option[Long]): Source[EventEnvelope, NotUsed] = { import akka.pattern.ask import FlowControl._ implicit val askTimeout: Timeout = Timeout(readJournalConfig.journalSequenceRetrievalConfiguration.askTimeout) val batchSize = readJournalConfig.maxBufferSize val maxOrderingRange = readJournalConfig.eventsByTagBufferSizesPerQuery match { case 0 => None case x => Some(x * batchSize) } def getLoopMaxOrderingId(offset: Long, latestOrdering: MaxOrderingId): MaxOrderingId = maxOrderingRange match { case None => latestOrdering case Some(numberOfEvents) => val limitedMaxOrderingId = offset + numberOfEvents if (limitedMaxOrderingId < 0 || limitedMaxOrderingId >= latestOrdering.maxOrdering) latestOrdering else MaxOrderingId(limitedMaxOrderingId) } Source .unfoldAsync[(Long, FlowControl), Seq[EventEnvelope]]((offset, Continue)) { case (from, control) => def retrieveNextBatch() = { for { queryUntil <- journalSequenceActor.ask(GetMaxOrderingId).mapTo[MaxOrderingId] loopMaxOrderingId = getLoopMaxOrderingId(from, queryUntil) xs <- currentJournalEventsByTag(tag, from, batchSize, loopMaxOrderingId).runWith(Sink.seq) } yield { // continue if query over entire journal was fewer than full batch or if we are limiting // the query through eventsByTagBufferSizesPerQuery and didn't reach the last 'ordering' yet val hasMoreEvents = (xs.size == batchSize) || (loopMaxOrderingId.maxOrdering < queryUntil.maxOrdering) val nextControl: FlowControl = terminateAfterOffset match { // we may stop if target is behind queryUntil and we don't have more events to fetch case Some(target) if !hasMoreEvents && target <= queryUntil.maxOrdering => Stop // We may also stop if we have found an event with an offset >= target case Some(target) if xs.exists(_.offset.value >= target) => Stop // otherwise, disregarding if Some or None, we must decide how to continue case _ => if (hasMoreEvents) Continue else ContinueDelayed } val nextStartingOffset = if (xs.isEmpty) { /* If no events matched the tag between `from` and `maxOrdering` then there is no need to execute the exact * same query again. We can continue querying from `maxOrdering`, which will save some load on the db. * (Note: we may never return a value smaller than `from`, otherwise we might return duplicate events) */ math.max(from, loopMaxOrderingId.maxOrdering) } else { // Continue querying from the largest offset xs.map(_.offset.value).max } Some(((nextStartingOffset, nextControl), xs)) } } control match { case Stop => Future.successful(None) case Continue => retrieveNextBatch() case ContinueDelayed => akka.pattern.after(readJournalConfig.refreshInterval, system.scheduler)(retrieveNextBatch()) } } .mapConcat(identity) } def currentEventsByTag(tag: String, offset: Long): Source[EventEnvelope, NotUsed] = { Source .futureSource(readJournalDao.maxJournalSequence().map { maxOrderingInDb => eventsByTag(tag, offset, terminateAfterOffset = Some(maxOrderingInDb)) }) .mapMaterializedValue(_ => NotUsed) } /** * Query events that have a specific tag. * * The consumer can keep track of its current position in the event stream by storing the * `offset` and restart the query from a given `offset` after a crash/restart. * The offset is exclusive, i.e. the event corresponding to the given `offset` parameter is not * included in the stream. * * For akka-persistence-jdbc the `offset` corresponds to the `ordering` column in the Journal table. * The `ordering` is a sequential id number that uniquely identifies the position of each event within * the event stream. The `Offset` type is `akka.persistence.query.Sequence` with the `ordering` as the * offset value. * * The returned event stream is ordered by `offset`. * * In addition to the `offset` the `EventEnvelope` also provides `persistenceId` and `sequenceNr` * for each event. The `sequenceNr` is the sequence number for the persistent actor with the * `persistenceId` that persisted the event. The `persistenceId` + `sequenceNr` is an unique * identifier for the event. * * The stream is not completed when it reaches the end of the currently stored events, * but it continues to push new events when new events are persisted. * Corresponding query that is completed when it reaches the end of the currently * stored events is provided by [[CurrentEventsByTagQuery#currentEventsByTag]]. */ override def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] = eventsByTag(tag, offset.value) def eventsByTag(tag: String, offset: Long): Source[EventEnvelope, NotUsed] = eventsByTag(tag, offset, terminateAfterOffset = None) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/serialization/PersistentReprSerializer.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.serialization import akka.NotUsed import akka.persistence.jdbc.util.TrySeq import akka.persistence.journal.Tagged import akka.persistence.{ AtomicWrite, PersistentRepr } import akka.stream.scaladsl.Flow import scala.collection.immutable._ import scala.util.Try @deprecated("use Akka Serialization for the payloads instead", since = "5.0.0") trait PersistentReprSerializer[T] { /** * An akka.persistence.AtomicWrite contains a Sequence of events (with metadata, the PersistentRepr) * that must all be persisted or all fail, what makes the operation atomic. The function converts * each AtomicWrite to a Try[Seq[T]]. * The Try denotes whether there was a problem with the AtomicWrite or not. */ def serialize(messages: Seq[AtomicWrite]): Seq[Try[Seq[T]]] = { messages.map { atomicWrite => val serialized = atomicWrite.payload.map(serialize) TrySeq.sequence(serialized) } } def serialize(persistentRepr: PersistentRepr): Try[T] = persistentRepr.payload match { case Tagged(payload, tags) => serialize(persistentRepr.withPayload(payload), tags) case _ => serialize(persistentRepr, Set.empty[String]) } def serialize(persistentRepr: PersistentRepr, tags: Set[String]): Try[T] /** * deserialize into a PersistentRepr, a set of tags and a Long representing the global ordering of events */ def deserialize(t: T): Try[(PersistentRepr, Set[String], Long)] } @deprecated("use Akka Serialization for the payloads instead", since = "5.0.0") trait FlowPersistentReprSerializer[T] extends PersistentReprSerializer[T] { /** * A flow which deserializes each element into a PersistentRepr, * a set of tags and a Long representing the global ordering of events */ def deserializeFlow: Flow[T, Try[(PersistentRepr, Set[String], Long)], NotUsed] = { Flow[T].map(deserialize) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/serialization/SnapshotSerializer.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.serialization import akka.persistence.SnapshotMetadata import scala.util.Try trait SnapshotSerializer[T] { def serialize(metadata: SnapshotMetadata, snapshot: Any): Try[T] def deserialize(t: T): Try[(SnapshotMetadata, Any)] } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/snapshot/JdbcSnapshotStore.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot import akka.actor.ActorSystem import akka.persistence.jdbc.config.SnapshotConfig import akka.persistence.jdbc.snapshot.dao.{ SnapshotDao, SnapshotDaoInstantiation } import akka.persistence.jdbc.db.{ SlickDatabase, SlickExtension } import akka.persistence.snapshot.SnapshotStore import akka.persistence.{ SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria } import akka.stream.{ Materializer, SystemMaterializer } import com.typesafe.config.Config import slick.jdbc.JdbcBackend._ import scala.concurrent.{ ExecutionContext, Future } object JdbcSnapshotStore { def toSelectedSnapshot(tupled: (SnapshotMetadata, Any)): SelectedSnapshot = tupled match { case (meta: SnapshotMetadata, snapshot: Any) => SelectedSnapshot(meta, snapshot) } } class JdbcSnapshotStore(config: Config) extends SnapshotStore { import JdbcSnapshotStore._ implicit val ec: ExecutionContext = context.dispatcher implicit val system: ActorSystem = context.system implicit val mat: Materializer = SystemMaterializer(system).materializer val snapshotConfig = new SnapshotConfig(config) val slickDb: SlickDatabase = SlickExtension(system).database(config) def db: Database = slickDb.database val snapshotDao: SnapshotDao = SnapshotDaoInstantiation.snapshotDao(snapshotConfig, slickDb) override def loadAsync( persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = { val result = criteria match { case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) => snapshotDao.latestSnapshot(persistenceId) case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) => snapshotDao.snapshotForMaxTimestamp(persistenceId, maxTimestamp) case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) => snapshotDao.snapshotForMaxSequenceNr(persistenceId, maxSequenceNr) case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) => snapshotDao.snapshotForMaxSequenceNrAndMaxTimestamp(persistenceId, maxSequenceNr, maxTimestamp) case null => Future.successful(None) } result.map(_.map(toSelectedSnapshot)) } override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = snapshotDao.save(metadata, snapshot) override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = for { _ <- snapshotDao.delete(metadata.persistenceId, metadata.sequenceNr) } yield () override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = criteria match { case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) => snapshotDao.deleteAllSnapshots(persistenceId) case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) => snapshotDao.deleteUpToMaxTimestamp(persistenceId, maxTimestamp) case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) => snapshotDao.deleteUpToMaxSequenceNr(persistenceId, maxSequenceNr) case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) => snapshotDao.deleteUpToMaxSequenceNrAndMaxTimestamp(persistenceId, maxSequenceNr, maxTimestamp) case null => Future.successful(()) } override def postStop(): Unit = { if (slickDb.allowShutdown) { // Since a (new) db is created when this actor (re)starts, we must close it when the actor stops db.close() } super.postStop() } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/snapshot/dao/DefaultSnapshotDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot.dao import slick.jdbc.{ JdbcBackend, JdbcProfile } import akka.persistence.SnapshotMetadata import akka.persistence.jdbc.config.SnapshotConfig import akka.serialization.Serialization import akka.stream.Materializer import SnapshotTables._ import akka.persistence.jdbc.AkkaSerialization import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Success, Try } class DefaultSnapshotDao( db: JdbcBackend#Database, profile: JdbcProfile, snapshotConfig: SnapshotConfig, serialization: Serialization)(implicit ec: ExecutionContext, val mat: Materializer) extends SnapshotDao { import profile.api._ val queries = new SnapshotQueries(profile, snapshotConfig.snapshotTableConfiguration) private def toSnapshotData(row: SnapshotRow): Try[(SnapshotMetadata, Any)] = { val snapshot = serialization.deserialize(row.snapshotPayload, row.snapshotSerId, row.snapshotSerManifest) snapshot.flatMap { snapshot => val metadata = for { mPayload <- row.metaPayload mSerId <- row.metaSerId } yield (mPayload, mSerId) metadata match { case None => Success((SnapshotMetadata(row.persistenceId, row.sequenceNumber, row.created), snapshot)) case Some((payload, id)) => serialization.deserialize(payload, id, row.metaSerManifest.getOrElse("")).map { meta => (SnapshotMetadata(row.persistenceId, row.sequenceNumber, row.created, Some(meta)), snapshot) } } } } private def serializeSnapshot(meta: SnapshotMetadata, snapshot: Any): Try[SnapshotRow] = { val serializedMetadata = meta.metadata.flatMap(m => AkkaSerialization.serialize(serialization, m).toOption) AkkaSerialization .serialize(serialization, payload = snapshot) .map(serializedSnapshot => SnapshotRow( meta.persistenceId, meta.sequenceNr, meta.timestamp, serializedSnapshot.serId, serializedSnapshot.serManifest, serializedSnapshot.payload, serializedMetadata.map(_.serId), serializedMetadata.map(_.serManifest), serializedMetadata.map(_.payload))) } private def zeroOrOneSnapshot(rows: Seq[SnapshotRow]): Option[(SnapshotMetadata, Any)] = rows.headOption.map(row => toSnapshotData(row).get) // throw is from a future map override def latestSnapshot(persistenceId: String): Future[Option[(SnapshotMetadata, Any)]] = db.run(queries.selectLatestByPersistenceId(persistenceId).result).flatMap { rows => rows.headOption match { case Some(row) => Future.fromTry(toSnapshotData(row)).map(Option(_)) case None => Future.successful(None) } } override def snapshotForMaxTimestamp( persistenceId: String, maxTimestamp: Long): Future[Option[(SnapshotMetadata, Any)]] = db.run(queries.selectOneByPersistenceIdAndMaxTimestamp((persistenceId, maxTimestamp)).result).map(zeroOrOneSnapshot) override def snapshotForMaxSequenceNr( persistenceId: String, maxSequenceNr: Long): Future[Option[(SnapshotMetadata, Any)]] = db.run(queries.selectOneByPersistenceIdAndMaxSequenceNr((persistenceId, maxSequenceNr)).result) .map(zeroOrOneSnapshot) override def snapshotForMaxSequenceNrAndMaxTimestamp( persistenceId: String, maxSequenceNr: Long, maxTimestamp: Long): Future[Option[(SnapshotMetadata, Any)]] = db.run( queries .selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp((persistenceId, maxSequenceNr, maxTimestamp)) .result) .map(zeroOrOneSnapshot(_)) override def save(snapshotMetadata: SnapshotMetadata, snapshot: Any): Future[Unit] = { val eventualSnapshotRow = Future.fromTry(serializeSnapshot(snapshotMetadata, snapshot)) eventualSnapshotRow.map(queries.insertOrUpdate).flatMap(db.run).map(_ => ())(ExecutionContext.parasitic) } override def delete(persistenceId: String, sequenceNr: Long): Future[Unit] = db.run(queries.selectByPersistenceIdAndSequenceNr((persistenceId, sequenceNr)).delete) .map(_ => ())(ExecutionContext.parasitic) override def deleteAllSnapshots(persistenceId: String): Future[Unit] = db.run(queries.selectAll(persistenceId).delete).map(_ => ())((ExecutionContext.parasitic)) override def deleteUpToMaxSequenceNr(persistenceId: String, maxSequenceNr: Long): Future[Unit] = db.run(queries.selectByPersistenceIdUpToMaxSequenceNr((persistenceId, maxSequenceNr)).delete) .map(_ => ())((ExecutionContext.parasitic)) override def deleteUpToMaxTimestamp(persistenceId: String, maxTimestamp: Long): Future[Unit] = db.run(queries.selectByPersistenceIdUpToMaxTimestamp((persistenceId, maxTimestamp)).delete) .map(_ => ())((ExecutionContext.parasitic)) override def deleteUpToMaxSequenceNrAndMaxTimestamp( persistenceId: String, maxSequenceNr: Long, maxTimestamp: Long): Future[Unit] = db.run( queries .selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp((persistenceId, maxSequenceNr, maxTimestamp)) .delete) .map(_ => ())((ExecutionContext.parasitic)) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot.dao import akka.persistence.SnapshotMetadata import scala.concurrent.Future trait SnapshotDao { def deleteAllSnapshots(persistenceId: String): Future[Unit] def deleteUpToMaxSequenceNr(persistenceId: String, maxSequenceNr: Long): Future[Unit] def deleteUpToMaxTimestamp(persistenceId: String, maxTimestamp: Long): Future[Unit] def deleteUpToMaxSequenceNrAndMaxTimestamp( persistenceId: String, maxSequenceNr: Long, maxTimestamp: Long): Future[Unit] def latestSnapshot(persistenceId: String): Future[Option[(SnapshotMetadata, Any)]] def snapshotForMaxTimestamp(persistenceId: String, timestamp: Long): Future[Option[(SnapshotMetadata, Any)]] def snapshotForMaxSequenceNr(persistenceId: String, sequenceNr: Long): Future[Option[(SnapshotMetadata, Any)]] def snapshotForMaxSequenceNrAndMaxTimestamp( persistenceId: String, sequenceNr: Long, timestamp: Long): Future[Option[(SnapshotMetadata, Any)]] def delete(persistenceId: String, sequenceNr: Long): Future[Unit] def save(snapshotMetadata: SnapshotMetadata, snapshot: Any): Future[Unit] } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotDaoInstantiation.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot.dao import akka.actor.{ ActorSystem, ExtendedActorSystem } import akka.annotation.InternalApi import akka.persistence.jdbc.config.SnapshotConfig import akka.persistence.jdbc.db.SlickDatabase import akka.serialization.{ Serialization, SerializationExtension } import akka.stream.Materializer import slick.jdbc.JdbcBackend.Database import slick.jdbc.JdbcProfile import scala.concurrent.ExecutionContext import scala.util.{ Failure, Success } @InternalApi private[jdbc] object SnapshotDaoInstantiation { def snapshotDao( snapshotConfig: SnapshotConfig, slickDb: SlickDatabase)(implicit system: ActorSystem, ec: ExecutionContext, mat: Materializer): SnapshotDao = { val fqcn = snapshotConfig.pluginConfig.dao val profile: JdbcProfile = slickDb.profile val args = Seq( (classOf[Database], slickDb.database), (classOf[JdbcProfile], profile), (classOf[SnapshotConfig], snapshotConfig), (classOf[Serialization], SerializationExtension(system)), (classOf[ExecutionContext], ec), (classOf[Materializer], mat)) system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[SnapshotDao](fqcn, args) match { case Success(dao) => dao case Failure(cause) => throw cause } } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotQueries.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot.dao import akka.persistence.jdbc.config.SnapshotTableConfiguration import akka.persistence.jdbc.snapshot.dao.SnapshotTables.SnapshotRow import slick.jdbc.JdbcProfile class SnapshotQueries(val profile: JdbcProfile, override val snapshotTableCfg: SnapshotTableConfiguration) extends SnapshotTables { import profile.api._ private val SnapshotTableC = Compiled(SnapshotTable) def insertOrUpdate(snapshotRow: SnapshotRow) = SnapshotTableC.insertOrUpdate(snapshotRow) private def _selectAll(persistenceId: Rep[String]) = SnapshotTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc) val selectAll = Compiled(_selectAll _) private def _selectLatestByPersistenceId(persistenceId: Rep[String]) = _selectAll(persistenceId).take(1) val selectLatestByPersistenceId = Compiled(_selectLatestByPersistenceId _) private def _selectByPersistenceIdAndSequenceNr(persistenceId: Rep[String], sequenceNr: Rep[Long]) = _selectAll(persistenceId).filter(_.sequenceNumber === sequenceNr) val selectByPersistenceIdAndSequenceNr = Compiled(_selectByPersistenceIdAndSequenceNr _) private def _selectByPersistenceIdUpToMaxTimestamp(persistenceId: Rep[String], maxTimestamp: Rep[Long]) = _selectAll(persistenceId).filter(_.created <= maxTimestamp) val selectByPersistenceIdUpToMaxTimestamp = Compiled(_selectByPersistenceIdUpToMaxTimestamp _) private def _selectByPersistenceIdUpToMaxSequenceNr(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = _selectAll(persistenceId).filter(_.sequenceNumber <= maxSequenceNr) val selectByPersistenceIdUpToMaxSequenceNr = Compiled(_selectByPersistenceIdUpToMaxSequenceNr _) private def _selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp( persistenceId: Rep[String], maxSequenceNr: Rep[Long], maxTimestamp: Rep[Long]) = _selectByPersistenceIdUpToMaxSequenceNr(persistenceId, maxSequenceNr).filter(_.created <= maxTimestamp) val selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp = Compiled( _selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp _) private def _selectOneByPersistenceIdAndMaxTimestamp(persistenceId: Rep[String], maxTimestamp: Rep[Long]) = _selectAll(persistenceId).filter(_.created <= maxTimestamp).take(1) val selectOneByPersistenceIdAndMaxTimestamp = Compiled(_selectOneByPersistenceIdAndMaxTimestamp _) private def _selectOneByPersistenceIdAndMaxSequenceNr(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = _selectAll(persistenceId).filter(_.sequenceNumber <= maxSequenceNr).take(1) val selectOneByPersistenceIdAndMaxSequenceNr = Compiled(_selectOneByPersistenceIdAndMaxSequenceNr _) private def _selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp( persistenceId: Rep[String], maxSequenceNr: Rep[Long], maxTimestamp: Rep[Long]) = _selectByPersistenceIdUpToMaxSequenceNr(persistenceId, maxSequenceNr).filter(_.created <= maxTimestamp).take(1) val selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp = Compiled( _selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp _) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotTables.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot.dao import akka.persistence.jdbc.config.SnapshotTableConfiguration import akka.persistence.jdbc.snapshot.dao.SnapshotTables.SnapshotRow import akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.isOracleDriver import akka.persistence.jdbc.util.InputStreamOps.InputStreamImplicits object SnapshotTables { case class SnapshotRow( persistenceId: String, sequenceNumber: Long, created: Long, snapshotSerId: Int, snapshotSerManifest: String, snapshotPayload: Array[Byte], metaSerId: Option[Int], metaSerManifest: Option[String], metaPayload: Option[Array[Byte]]) } trait SnapshotTables { val profile: slick.jdbc.JdbcProfile import profile.api._ def snapshotTableCfg: SnapshotTableConfiguration class Snapshot(_tableTag: Tag) extends Table[SnapshotRow]( _tableTag, _schemaName = snapshotTableCfg.schemaName, _tableName = snapshotTableCfg.tableName) { def * = ( persistenceId, sequenceNumber, created, snapshotSerId, snapshotSerManifest, snapshotPayload, metaSerId, metaSerManifest, metaPayload).<>((SnapshotRow.apply _).tupled, SnapshotRow.unapply) val persistenceId: Rep[String] = column[String](snapshotTableCfg.columnNames.persistenceId, O.Length(255, varying = true)) val sequenceNumber: Rep[Long] = column[Long](snapshotTableCfg.columnNames.sequenceNumber) val created: Rep[Long] = column[Long](snapshotTableCfg.columnNames.created) val snapshotPayload: Rep[Array[Byte]] = column[Array[Byte]](snapshotTableCfg.columnNames.snapshotPayload) val snapshotSerId: Rep[Int] = column[Int](snapshotTableCfg.columnNames.snapshotSerId) val snapshotSerManifest: Rep[String] = column[String](snapshotTableCfg.columnNames.snapshotSerManifest) val metaPayload: Rep[Option[Array[Byte]]] = column[Option[Array[Byte]]](snapshotTableCfg.columnNames.metaPayload) val metaSerId: Rep[Option[Int]] = column[Option[Int]](snapshotTableCfg.columnNames.metaSerId) val metaSerManifest: Rep[Option[String]] = column[Option[String]](snapshotTableCfg.columnNames.metaSerManifest) val pk = primaryKey(s"${tableName}_pk", (persistenceId, sequenceNumber)) } case class OracleSnapshot(_tableTag: Tag) extends Snapshot(_tableTag) { import java.sql.Blob import javax.sql.rowset.serial.SerialBlob private val columnType = MappedColumnType.base[Array[Byte], Blob](bytes => new SerialBlob(bytes), blob => blob.getBinaryStream.toArray) override val snapshotPayload: Rep[Array[Byte]] = column[Array[Byte]](snapshotTableCfg.columnNames.snapshotPayload)(columnType) } lazy val SnapshotTable = new TableQuery(tag => if (isOracleDriver(profile)) OracleSnapshot(tag) else new Snapshot(tag)) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/ByteArraySnapshotDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot.dao.legacy import akka.persistence.SnapshotMetadata import akka.persistence.jdbc.config.SnapshotConfig import akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.SnapshotRow import akka.persistence.jdbc.snapshot.dao.SnapshotDao import akka.serialization.Serialization import akka.stream.Materializer import slick.jdbc.{ JdbcBackend, JdbcProfile } import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success } class ByteArraySnapshotDao( db: JdbcBackend#Database, profile: JdbcProfile, snapshotConfig: SnapshotConfig, serialization: Serialization)(implicit ec: ExecutionContext, val mat: Materializer) extends SnapshotDao { import profile.api._ val queries = new SnapshotQueries(profile, snapshotConfig.legacySnapshotTableConfiguration) val serializer = new ByteArraySnapshotSerializer(serialization) def toSnapshotData(row: SnapshotRow): (SnapshotMetadata, Any) = serializer.deserialize(row) match { case Success(deserialized) => deserialized case Failure(cause) => throw cause } override def latestSnapshot(persistenceId: String): Future[Option[(SnapshotMetadata, Any)]] = for { rows <- db.run(queries.selectLatestByPersistenceId(persistenceId).result) } yield rows.headOption.map(toSnapshotData) override def snapshotForMaxTimestamp( persistenceId: String, maxTimestamp: Long): Future[Option[(SnapshotMetadata, Any)]] = for { rows <- db.run(queries.selectOneByPersistenceIdAndMaxTimestamp((persistenceId, maxTimestamp)).result) } yield rows.headOption.map(toSnapshotData) override def snapshotForMaxSequenceNr( persistenceId: String, maxSequenceNr: Long): Future[Option[(SnapshotMetadata, Any)]] = for { rows <- db.run(queries.selectOneByPersistenceIdAndMaxSequenceNr((persistenceId, maxSequenceNr)).result) } yield rows.headOption.map(toSnapshotData) override def snapshotForMaxSequenceNrAndMaxTimestamp( persistenceId: String, maxSequenceNr: Long, maxTimestamp: Long): Future[Option[(SnapshotMetadata, Any)]] = for { rows <- db.run( queries .selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp((persistenceId, maxSequenceNr, maxTimestamp)) .result) } yield rows.headOption.map(toSnapshotData) override def save(snapshotMetadata: SnapshotMetadata, snapshot: Any): Future[Unit] = { val eventualSnapshotRow = Future.fromTry(serializer.serialize(snapshotMetadata, snapshot)) eventualSnapshotRow.map(queries.insertOrUpdate).flatMap(db.run).map(_ => ()) } override def delete(persistenceId: String, sequenceNr: Long): Future[Unit] = for { _ <- db.run(queries.selectByPersistenceIdAndSequenceNr((persistenceId, sequenceNr)).delete) } yield () override def deleteAllSnapshots(persistenceId: String): Future[Unit] = for { _ <- db.run(queries.selectAll(persistenceId).delete) } yield () override def deleteUpToMaxSequenceNr(persistenceId: String, maxSequenceNr: Long): Future[Unit] = for { _ <- db.run(queries.selectByPersistenceIdUpToMaxSequenceNr((persistenceId, maxSequenceNr)).delete) } yield () override def deleteUpToMaxTimestamp(persistenceId: String, maxTimestamp: Long): Future[Unit] = for { _ <- db.run(queries.selectByPersistenceIdUpToMaxTimestamp((persistenceId, maxTimestamp)).delete) } yield () override def deleteUpToMaxSequenceNrAndMaxTimestamp( persistenceId: String, maxSequenceNr: Long, maxTimestamp: Long): Future[Unit] = for { _ <- db.run( queries .selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp((persistenceId, maxSequenceNr, maxTimestamp)) .delete) } yield () } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/ByteArraySnapshotSerializer.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot.dao.legacy import akka.persistence.SnapshotMetadata import akka.persistence.jdbc.serialization.SnapshotSerializer import akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.SnapshotRow import akka.persistence.serialization.Snapshot import akka.serialization.Serialization import scala.util.Try class ByteArraySnapshotSerializer(serialization: Serialization) extends SnapshotSerializer[SnapshotRow] { def serialize(metadata: SnapshotMetadata, snapshot: Any): Try[SnapshotRow] = { serialization .serialize(Snapshot(snapshot)) .map(SnapshotRow(metadata.persistenceId, metadata.sequenceNr, metadata.timestamp, _)) } def deserialize(snapshotRow: SnapshotRow): Try[(SnapshotMetadata, Any)] = { serialization .deserialize(snapshotRow.snapshot, classOf[Snapshot]) .map(snapshot => { val snapshotMetadata = SnapshotMetadata(snapshotRow.persistenceId, snapshotRow.sequenceNumber, snapshotRow.created) (snapshotMetadata, snapshot.data) }) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/SnapshotQueries.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot.dao.legacy import akka.persistence.jdbc.config.LegacySnapshotTableConfiguration import akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.SnapshotRow import slick.jdbc.JdbcProfile class SnapshotQueries(val profile: JdbcProfile, override val snapshotTableCfg: LegacySnapshotTableConfiguration) extends SnapshotTables { import profile.api._ private val SnapshotTableC = Compiled(SnapshotTable) def insertOrUpdate(snapshotRow: SnapshotRow) = SnapshotTableC.insertOrUpdate(snapshotRow) private def _selectAll(persistenceId: Rep[String]) = SnapshotTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc) val selectAll = Compiled(_selectAll _) private def _selectLatestByPersistenceId(persistenceId: Rep[String]) = _selectAll(persistenceId).take(1) val selectLatestByPersistenceId = Compiled(_selectLatestByPersistenceId _) private def _selectByPersistenceIdAndSequenceNr(persistenceId: Rep[String], sequenceNr: Rep[Long]) = _selectAll(persistenceId).filter(_.sequenceNumber === sequenceNr) val selectByPersistenceIdAndSequenceNr = Compiled(_selectByPersistenceIdAndSequenceNr _) private def _selectByPersistenceIdUpToMaxTimestamp(persistenceId: Rep[String], maxTimestamp: Rep[Long]) = _selectAll(persistenceId).filter(_.created <= maxTimestamp) val selectByPersistenceIdUpToMaxTimestamp = Compiled(_selectByPersistenceIdUpToMaxTimestamp _) private def _selectByPersistenceIdUpToMaxSequenceNr(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = _selectAll(persistenceId).filter(_.sequenceNumber <= maxSequenceNr) val selectByPersistenceIdUpToMaxSequenceNr = Compiled(_selectByPersistenceIdUpToMaxSequenceNr _) private def _selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp( persistenceId: Rep[String], maxSequenceNr: Rep[Long], maxTimestamp: Rep[Long]) = _selectByPersistenceIdUpToMaxSequenceNr(persistenceId, maxSequenceNr).filter(_.created <= maxTimestamp) val selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp = Compiled( _selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp _) private def _selectOneByPersistenceIdAndMaxTimestamp(persistenceId: Rep[String], maxTimestamp: Rep[Long]) = _selectAll(persistenceId).filter(_.created <= maxTimestamp).take(1) val selectOneByPersistenceIdAndMaxTimestamp = Compiled(_selectOneByPersistenceIdAndMaxTimestamp _) private def _selectOneByPersistenceIdAndMaxSequenceNr(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = _selectAll(persistenceId).filter(_.sequenceNumber <= maxSequenceNr).take(1) val selectOneByPersistenceIdAndMaxSequenceNr = Compiled(_selectOneByPersistenceIdAndMaxSequenceNr _) private def _selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp( persistenceId: Rep[String], maxSequenceNr: Rep[Long], maxTimestamp: Rep[Long]) = _selectByPersistenceIdUpToMaxSequenceNr(persistenceId, maxSequenceNr).filter(_.created <= maxTimestamp).take(1) val selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp = Compiled( _selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp _) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/SnapshotTables.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot.dao.legacy import akka.persistence.jdbc.config.LegacySnapshotTableConfiguration import akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.{ isOracleDriver, SnapshotRow } import akka.persistence.jdbc.util.InputStreamOps._ import slick.jdbc.JdbcProfile object SnapshotTables { case class SnapshotRow(persistenceId: String, sequenceNumber: Long, created: Long, snapshot: Array[Byte]) def isOracleDriver(profile: JdbcProfile): Boolean = profile match { case _: slick.jdbc.OracleProfile => true case _ => false } } trait SnapshotTables { val profile: slick.jdbc.JdbcProfile import profile.api._ def snapshotTableCfg: LegacySnapshotTableConfiguration class Snapshot(_tableTag: Tag) extends Table[SnapshotRow]( _tableTag, _schemaName = snapshotTableCfg.schemaName, _tableName = snapshotTableCfg.tableName) { def * = (persistenceId, sequenceNumber, created, snapshot).<>((SnapshotRow.apply _).tupled, SnapshotRow.unapply) val persistenceId: Rep[String] = column[String](snapshotTableCfg.columnNames.persistenceId, O.Length(255, varying = true)) val sequenceNumber: Rep[Long] = column[Long](snapshotTableCfg.columnNames.sequenceNumber) val created: Rep[Long] = column[Long](snapshotTableCfg.columnNames.created) val snapshot: Rep[Array[Byte]] = column[Array[Byte]](snapshotTableCfg.columnNames.snapshot) val pk = primaryKey(s"${tableName}_pk", (persistenceId, sequenceNumber)) } case class OracleSnapshot(_tableTag: Tag) extends Snapshot(_tableTag) { import java.sql.Blob import javax.sql.rowset.serial.SerialBlob private val columnType = MappedColumnType.base[Array[Byte], Blob](bytes => new SerialBlob(bytes), blob => blob.getBinaryStream.toArray) override val snapshot: Rep[Array[Byte]] = column[Array[Byte]](snapshotTableCfg.columnNames.snapshot)(columnType) } lazy val SnapshotTable = new TableQuery(tag => if (isOracleDriver(profile)) OracleSnapshot(tag) else new Snapshot(tag)) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/state/DurableStateQueries.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state import akka.annotation.InternalApi import slick.jdbc.{ JdbcProfile, SetParameter } import slick.jdbc.H2Profile import slick.jdbc.MySQLProfile import slick.jdbc.OracleProfile import slick.jdbc.PostgresProfile import slick.jdbc.SQLServerProfile import akka.persistence.jdbc.config.DurableStateTableConfiguration /** * INTERNAL API */ @InternalApi private[akka] class DurableStateQueries( val profile: JdbcProfile, override val durableStateTableCfg: DurableStateTableConfiguration) extends DurableStateTables { import profile.api._ // Identifiers must be quoted via the profile so the raw-SQL INSERT/UPDATE paths use the // same quoting as Slick's typed-query SELECT path. Without this, e.g. H2 in default mode // uppercases unquoted identifiers, which doesn't match the lowercase quoted identifiers // used by the schema and Slick's typed queries. val tableAndSchema = durableStateTableCfg.schemaName.fold(profile.quoteIdentifier(durableStateTableCfg.tableName))(schema => s"${profile.quoteIdentifier(schema)}.${profile.quoteIdentifier(durableStateTableCfg.tableName)}") private val persistenceIdColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.persistenceId) private val globalOffsetColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.globalOffset) private val revisionColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.revision) private val statePayloadColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.statePayload) private val stateSerIdColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.stateSerId) private val stateSerManifestColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.stateSerManifest) private val tagColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.tag) private val stateTimestampColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.stateTimestamp) private def slickProfileToSchemaType(profile: JdbcProfile): String = profile match { case PostgresProfile => "Postgres" case MySQLProfile => "MySQL" case OracleProfile => "Oracle" case SQLServerProfile => "SqlServer" case H2Profile => "H2" case _ => throw new IllegalArgumentException(s"Unknown JdbcProfile $profile encountered") } lazy val sequenceNextValUpdater = slickProfileToSchemaType(profile) match { case "H2" => new H2SequenceNextValUpdater(profile, durableStateTableCfg) case "Postgres" => new PostgresSequenceNextValUpdater(profile, durableStateTableCfg) case _ => ??? } implicit val uuidSetter: SetParameter[Array[Byte]] = SetParameter[Array[Byte]] { case (bytes, params) => params.setBytes(bytes) } private[jdbc] def selectFromDbByPersistenceId(persistenceId: Rep[String]) = durableStateTable.filter(_.persistenceId === persistenceId) private[jdbc] def insertDbWithDurableState(row: DurableStateTables.DurableStateRow, seqNextValue: String) = { sqlu"""INSERT INTO #$tableAndSchema ( #$persistenceIdColumn, #$globalOffsetColumn, #$revisionColumn, #$statePayloadColumn, #$stateSerIdColumn, #$stateSerManifestColumn, #$tagColumn, #$stateTimestampColumn ) VALUES ( ${row.persistenceId}, #${seqNextValue}, ${row.revision}, ${row.statePayload}, ${row.stateSerId}, ${row.stateSerManifest}, ${row.tag}, #${System.currentTimeMillis()} ) """ } private[jdbc] def updateDbWithDurableState(row: DurableStateTables.DurableStateRow, seqNextValue: String) = { sqlu"""UPDATE #$tableAndSchema SET #$globalOffsetColumn = #${seqNextValue}, #$revisionColumn = ${row.revision}, #$statePayloadColumn = ${row.statePayload}, #$stateSerIdColumn = ${row.stateSerId}, #$stateSerManifestColumn = ${row.stateSerManifest}, #$tagColumn = ${row.tag}, #$stateTimestampColumn = ${System.currentTimeMillis} WHERE #$persistenceIdColumn = ${row.persistenceId} AND #$revisionColumn = ${row.revision} - 1 """ } private[jdbc] def getSequenceNextValueExpr() = sequenceNextValUpdater.getSequenceNextValueExpr() def deleteFromDb(persistenceId: String) = { durableStateTable.filter(_.persistenceId === persistenceId).delete } def deleteAllFromDb() = { durableStateTable.delete } private[jdbc] val maxOffsetQuery = Compiled { durableStateTable.map(_.globalOffset).max.getOrElse(0L) } private def _changesByTag( tag: Rep[String], offset: ConstColumn[Long], maxOffset: ConstColumn[Long], max: ConstColumn[Long]) = { durableStateTable .filter(_.tag === tag) .sortBy(_.globalOffset.asc) .filter(row => row.globalOffset > offset && row.globalOffset <= maxOffset) .take(max) } private[jdbc] val changesByTag = Compiled(_changesByTag _) private def _stateStoreStateQuery(from: ConstColumn[Long], limit: ConstColumn[Long]) = durableStateTable // FIXME change this to a specialized query to only retrieve the 3 columns of interest .filter(_.globalOffset > from) .sortBy(_.globalOffset.asc) .take(limit) .map(s => (s.persistenceId, s.globalOffset, s.revision)) val stateStoreStateQuery = Compiled(_stateStoreStateQuery _) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/state/DurableStateTables.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state import akka.annotation.InternalApi import akka.persistence.jdbc.config.DurableStateTableConfiguration /** * INTERNAL API */ @InternalApi private[akka] object DurableStateTables { case class DurableStateRow( globalOffset: Long, persistenceId: String, revision: Long, statePayload: Array[Byte], tag: Option[String], stateSerId: Int, stateSerManifest: Option[String], stateTimestamp: Long) } /** * INTERNAL API */ @InternalApi private[akka] trait DurableStateTables { val profile: slick.jdbc.JdbcProfile import profile.api._ def durableStateTableCfg: DurableStateTableConfiguration import DurableStateTables._ class DurableState(_tableTag: Tag) extends Table[DurableStateRow]( _tableTag, _schemaName = durableStateTableCfg.schemaName, _tableName = durableStateTableCfg.tableName) { def * = (globalOffset, persistenceId, revision, statePayload, tag, stateSerId, stateSerManifest, stateTimestamp) .<>((DurableStateRow.apply _).tupled, DurableStateRow.unapply) val globalOffset: Rep[Long] = column[Long](durableStateTableCfg.columnNames.globalOffset, O.AutoInc) val persistenceId: Rep[String] = column[String](durableStateTableCfg.columnNames.persistenceId, O.PrimaryKey, O.Length(255, varying = true)) val revision: Rep[Long] = column[Long](durableStateTableCfg.columnNames.revision) val statePayload: Rep[Array[Byte]] = column[Array[Byte]](durableStateTableCfg.columnNames.statePayload) val tag: Rep[Option[String]] = column[Option[String]](durableStateTableCfg.columnNames.tag) val stateSerId: Rep[Int] = column[Int](durableStateTableCfg.columnNames.stateSerId) val stateSerManifest: Rep[Option[String]] = column[Option[String]](durableStateTableCfg.columnNames.stateSerManifest) val stateTimestamp: Rep[Long] = column[Long](durableStateTableCfg.columnNames.stateTimestamp) val globalOffsetIdx = index(s"${tableName}_globalOffset_idx", globalOffset, unique = true) } lazy val durableStateTable = new TableQuery(new DurableState(_)) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/state/JdbcDurableStateStoreProvider.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state import scala.concurrent.ExecutionContext import slick.jdbc.JdbcProfile import slick.jdbc.JdbcBackend._ import akka.actor.ExtendedActorSystem import akka.persistence.jdbc.config.DurableStateTableConfiguration import akka.persistence.state.scaladsl.DurableStateStore import akka.persistence.state.javadsl.{ DurableStateStore => JDurableStateStore } import akka.persistence.state.DurableStateStoreProvider import akka.persistence.jdbc.db.{ SlickDatabase, SlickExtension } import akka.serialization.SerializationExtension import akka.stream.{ Materializer, SystemMaterializer } import com.typesafe.config.Config class JdbcDurableStateStoreProvider[A](system: ExtendedActorSystem, cfg: Config, cfgPath: String) extends DurableStateStoreProvider { implicit val ec: ExecutionContext = system.dispatcher implicit val mat: Materializer = SystemMaterializer(system).materializer val config = system.settings.config val slickDb: SlickDatabase = SlickExtension(system).database(config.getConfig(cfgPath)) def db: Database = slickDb.database lazy val durableStateConfig = new DurableStateTableConfiguration(config.getConfig(cfgPath)) lazy val serialization = SerializationExtension(system) val profile: JdbcProfile = slickDb.profile private lazy val _scaladslDurableStateStore: DurableStateStore[Any] = new scaladsl.JdbcDurableStateStore[Any](cfgPath, db, profile, durableStateConfig, serialization)(system) override def scaladslDurableStateStore(): DurableStateStore[Any] = _scaladslDurableStateStore override def javadslDurableStateStore(): JDurableStateStore[AnyRef] = new javadsl.JdbcDurableStateStore[AnyRef]( profile, durableStateConfig, _scaladslDurableStateStore.asInstanceOf[scaladsl.JdbcDurableStateStore[AnyRef]]) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/state/OffsetOps.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state import akka.persistence.query._ object OffsetSyntax { implicit class OffsetOps(val that: Offset) extends AnyVal { def value = that match { case Sequence(offsetValue) => offsetValue case NoOffset => 0L case _ => throw new IllegalArgumentException( "akka-persistence-jdbc does not support " + that.getClass.getName + " offsets") } } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/state/SequenceNextValUpdater.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state import akka.annotation.InternalApi import akka.persistence.jdbc.config.DurableStateTableConfiguration import slick.jdbc.JdbcProfile import slick.dbio.Effect import slick.sql.SqlStreamingAction /** * INTERNAL API */ @InternalApi private[jdbc] trait SequenceNextValUpdater { def getSequenceNextValueExpr(): SqlStreamingAction[Vector[String], String, Effect] } /** * INTERNAL API */ @InternalApi private[jdbc] class H2SequenceNextValUpdater( profile: JdbcProfile, val durableStateTableCfg: DurableStateTableConfiguration) extends SequenceNextValUpdater { import profile.api._ // H2 dependent (https://stackoverflow.com/questions/36244641/h2-equivalent-of-postgres-serial-or-bigserial-column) def getSequenceNextValueExpr() = { sql"""SELECT COLUMN_DEFAULT FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '#${durableStateTableCfg.tableName}' AND COLUMN_NAME = '#${durableStateTableCfg.columnNames.globalOffset}' AND TABLE_SCHEMA = '#${durableStateTableCfg.schemaName.getOrElse("PUBLIC")}'""".as[String] } } /** * INTERNAL API */ @InternalApi private[jdbc] class PostgresSequenceNextValUpdater( profile: JdbcProfile, val durableStateTableCfg: DurableStateTableConfiguration) extends SequenceNextValUpdater { import profile.api._ private val schemaPrefix = durableStateTableCfg.schemaName.map(n => s"$n.").getOrElse("") final val nextValFetcher = s"""(SELECT nextval(pg_get_serial_sequence('$schemaPrefix${durableStateTableCfg.tableName}', '${durableStateTableCfg.columnNames.globalOffset}')))""" def getSequenceNextValueExpr() = sql"""#$nextValFetcher""".as[String] } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/state/javadsl/JdbcDurableStateStore.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state.javadsl import java.util.Optional import java.util.concurrent.CompletionStage import scala.jdk.FutureConverters._ import scala.concurrent.ExecutionContext import akka.annotation.ApiMayChange import slick.jdbc.JdbcProfile import akka.{ Done, NotUsed } import akka.persistence.state.javadsl.{ DurableStateUpdateStore, GetObjectResult } import akka.persistence.jdbc.state.DurableStateQueries import akka.persistence.jdbc.config.DurableStateTableConfiguration import akka.persistence.jdbc.state.scaladsl.{ JdbcDurableStateStore => ScalaJdbcDurableStateStore } import akka.persistence.query.{ DurableStateChange, Offset } import akka.persistence.query.javadsl.DurableStateStoreQuery import akka.stream.javadsl.Source import scala.annotation.nowarn object JdbcDurableStateStore { val Identifier = ScalaJdbcDurableStateStore.Identifier } /** * API may change */ @ApiMayChange class JdbcDurableStateStore[A]( profile: JdbcProfile, durableStateConfig: DurableStateTableConfiguration, scalaStore: akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore[A])(implicit ec: ExecutionContext) extends DurableStateUpdateStore[A] with DurableStateStoreQuery[A] { val queries = new DurableStateQueries(profile, durableStateConfig) def getObject(persistenceId: String): CompletionStage[GetObjectResult[A]] = scalaStore .getObject(persistenceId) .map(x => GetObjectResult(Optional.ofNullable(x.value.getOrElse(null.asInstanceOf[A])), x.revision)) .asJava def upsertObject(persistenceId: String, revision: Long, value: A, tag: String): CompletionStage[Done] = scalaStore.upsertObject(persistenceId, revision, value, tag).asJava @deprecated(message = "Use the deleteObject overload with revision instead.", since = "1.0.0") override def deleteObject(persistenceId: String): CompletionStage[Done] = deleteObject(persistenceId, revision = 0) @nowarn("msg=deprecated") override def deleteObject(persistenceId: String, revision: Long): CompletionStage[Done] = scalaStore.deleteObject(persistenceId).asJava def currentChanges(tag: String, offset: Offset): Source[DurableStateChange[A], NotUsed] = scalaStore.currentChanges(tag, offset).asJava def changes(tag: String, offset: Offset): Source[DurableStateChange[A], NotUsed] = scalaStore.changes(tag, offset).asJava } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/state/scaladsl/DurableStateSequenceActor.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state.scaladsl import scala.collection.immutable.NumericRange import akka.actor.{ Actor, ActorLogging, Props, Status, Timers } import akka.pattern.pipe import akka.persistence.jdbc.config.DurableStateSequenceRetrievalConfig import akka.stream.Materializer import akka.stream.scaladsl.Sink import scala.concurrent.duration.FiniteDuration import akka.annotation.InternalApi /** * INTERNAL API */ @InternalApi private[akka] object DurableStateSequenceActor { def props[A](stateStore: JdbcDurableStateStore[A], config: DurableStateSequenceRetrievalConfig)( implicit materializer: Materializer): Props = Props(new DurableStateSequenceActor(stateStore, config)) case class VisitedElement(pid: PersistenceId, offset: GlobalOffset, revision: Revision) { override def toString = s"($pid, $offset, $revision)" } private case object QueryState private case class NewStateInfo(originalOffset: Long, elements: List[VisitedElement]) private case class ScheduleAssumeMaxGlobalOffset(max: GlobalOffset) private case class AssumeMaxGlobalOffset(max: GlobalOffset) case object GetMaxGlobalOffset case class MaxGlobalOffset(maxOffset: GlobalOffset) private case object QueryGlobalOffsetsTimerKey private case object AssumeMaxGlobalOffsetTimerKey private type GlobalOffset = Long private type PersistenceId = String private type Revision = Long /** * Efficient representation of missing elements using NumericRanges. * It can be seen as a collection of GlobalOffset */ case class MissingElements(elements: Seq[NumericRange[GlobalOffset]]) { def addRange(from: GlobalOffset, until: GlobalOffset): MissingElements = { val newRange = from.until(until) MissingElements(elements :+ newRange) } def contains(id: GlobalOffset): Boolean = elements.exists(_.containsTyped(id)) def isEmpty: Boolean = elements.forall(_.isEmpty) def size: Int = elements.map(_.size).sum override def toString: String = { elements .collect { case range if range.nonEmpty => if (range.size == 1) range.start.toString else s"${range.start}-${range.end}" } .mkString(", ") } } private object MissingElements { def empty: MissingElements = MissingElements(Vector.empty) } } /** * This actor supports `changesByTag` query to ensure that we don't miss any offsets in the result. * In case some offsets are missing we need to re-query (with a delay) and try to fetch the * missing offsets. It may be so that the offsets are really missing, in which case we identify them * as genuine gaps and continue after `config.maxTries`. * * There can be three reasons for gaps: * * 1. The transaction was rolled back. The global offset sequence incremental is not part of the transaction. * 2. Global offset is assigned from incrementing a database sequence. The sequence is not part of the * transactions and may result in different order than the commit order. Meaning that in the queries we * may see a later offset before seeing earlier offset. Those missing offsets will be seen when we * re-query. See further explanation in for example * https://espadrine.github.io/blog/posts/two-postgresql-sequence-misconceptions.html * 3. There are multiple updates (revisions) to the same persistence id and the queries may only see the * latest revision. Meaning that the additional earlier revisions will be seen as offset gaps. * * If offset gaps have been detected we try to confirm the gaps by looking at revision changes of * individual persistence ids. We keep a cache of previously known revision per persistence ids. * If the total number of revision changes corresponds to the number of missing offsets they are * considered confirmed to be from case 3 and we can continue without re-query delays. * * Note that if we have seen revision 10 of p6 and we retrieve revision 13 of p6, we also know that there have been * revision 11 and 12 of p6. We are using READ COMMITTED transaction isolation level and we have a check of * sequentiality of revisions in `upsert` implementation. * * We have to delay and re-query for new persistence ids with revision > 1 that we don't know the previous revision, * because that could be gaps from case 1 or 2. * * If gaps cannot be confirmed it will re-query up to `config.maxTries` times before giving up and continue with * the highest offset. For example case 1. * * INTERNAL API */ @InternalApi private[akka] class DurableStateSequenceActor[A]( stateStore: JdbcDurableStateStore[A], config: DurableStateSequenceRetrievalConfig)(implicit materializer: Materializer) extends Actor with ActorLogging with Timers { import DurableStateSequenceActor._ import context.dispatcher import config.{ batchSize, maxBackoffQueryDelay, maxTries, queryDelay, revisionCacheCapacity } private val revisionCache = collection.mutable.Map.empty[PersistenceId, VisitedElement] override def receive: Receive = receive(0L, Map.empty, 0) override def preStart(): Unit = { self ! QueryState stateStore.maxStateStoreOffset().mapTo[Long].onComplete { case scala.util.Success(maxInDatabase) => self ! ScheduleAssumeMaxGlobalOffset(maxInDatabase) case scala.util.Failure(t) => log.info("Failed to recover fast, using state-by-state recovery instead. Cause: {}", t) } } /** * @param currentMaxGlobalOffset The highest offset value for which it is known that no missing elements exist * @param missingByCounter A map with missing offsets. The key of the map is the count at which the missing elements * can be assumed to be "skipped ids" (they are no longer assumed missing). Used together * with the `moduloCounter` to implement a "sliding window" where missing offsets are * re-tried up to `maxTries` before assumed ok. * @param moduloCounter A counter which is incremented every time a new query have been executed, modulo `maxTries` * @param previousDelay The last used delay (may change in case failures occur) */ final def receive( currentMaxGlobalOffset: GlobalOffset, missingByCounter: Map[Int, MissingElements], moduloCounter: Int, previousDelay: FiniteDuration = queryDelay): Receive = { case ScheduleAssumeMaxGlobalOffset(max) => // All elements smaller than max can be assumed missing after this delay val delay = queryDelay * maxTries timers.startSingleTimer(key = AssumeMaxGlobalOffsetTimerKey, AssumeMaxGlobalOffset(max), delay) case AssumeMaxGlobalOffset(max) => if (currentMaxGlobalOffset < max) { context.become(receive(max, missingByCounter, moduloCounter, previousDelay)) } case GetMaxGlobalOffset => sender() ! MaxGlobalOffset(currentMaxGlobalOffset) case QueryState => stateStore .stateStoreStateInfo(currentMaxGlobalOffset, batchSize) .runWith(Sink.seq) .map(result => NewStateInfo( currentMaxGlobalOffset, result.map { case (pid, offset, rev) => VisitedElement(pid, offset, rev) }.toList)) .pipeTo(self) case NewStateInfo(originalOffset, _) if originalOffset < currentMaxGlobalOffset => // search was done using an offset that became obsolete in the meantime // therefore we start a new query self ! QueryState case NewStateInfo(_, elements) => findGaps(elements, currentMaxGlobalOffset, missingByCounter, moduloCounter) case Status.Failure(t) => val newDelay = maxBackoffQueryDelay.min(previousDelay * 2) if (newDelay == maxBackoffQueryDelay) { log.warning("Failed to query max global offset because of {}, retrying in [{}]", t, newDelay.toCoarsest) } scheduleQuery(newDelay) context.become(receive(currentMaxGlobalOffset, missingByCounter, moduloCounter, newDelay)) } /** * This method that implements the "find gaps" algo. It's the meat and main purpose of this actor. */ final def findGaps( elements: List[VisitedElement], currentMaxOffset: GlobalOffset, missingByCounter: Map[Int, MissingElements], moduloCounter: Int): Unit = { // list of elements that will be considered as genuine gaps. // `givenUp` is either empty or is was filled on a previous iteration val givenUp = missingByCounter.getOrElse(moduloCounter, MissingElements.empty) val (nextMax, _, missingElems) = // using the global offset of the elements that were fetched, we verify if there are any gaps elements.foldLeft[(GlobalOffset, GlobalOffset, MissingElements)]( (currentMaxOffset, currentMaxOffset, MissingElements.empty)) { case ((currentMax, previousOffset, missing), currentElement) => // we must decide if we move the cursor forward val newMax = if ((currentMax + 1).until(currentElement.offset).forall(givenUp.contains)) { // we move the cursor forward when: // 1) they have been detected as missing on previous iteration, it's time now to give up // 2) current + 1 == currentElement (meaning no gap). Note that `forall` on an empty range always returns true currentElement.offset } else currentMax // we accumulate in newMissing the gaps we detect on each iteration val newMissing = if (previousOffset + 1 == currentElement.offset || newMax == currentElement.offset) missing else missing.addRange(previousOffset + 1, currentElement.offset) (newMax, currentElement.offset, newMissing) } // these offsets will be used as givenUp after one round when back to the same moduloCounter val newMissingByCounter = missingByCounter + (moduloCounter -> missingElems) // did we detect gaps in the current batch? val noGapsFound = missingElems.isEmpty // full batch means that we retrieved as much elements as the batchSize // that happens when we are not yet at the end of the stream val isFullBatch = elements.size == batchSize if (noGapsFound) { addToRevisionCache(elements, nextMax) if (isFullBatch) { // We can query again immediately, as this allows the actor to rapidly retrieve the real max offset. // Using same moduloCounter. self ! QueryState context.become(receive(nextMax, newMissingByCounter, moduloCounter)) } else { // keep querying but not immediately scheduleQuery(queryDelay) context.become(receive(nextMax, newMissingByCounter, (moduloCounter + 1) % maxTries)) } } else { // We detected gaps. When there are updates to the same persistence id we might not see all subsequent // changes but only the latest. Those changes will be seen as gaps. By looking at the difference in revisions // for persistence ids that we have seen before (included in the revisionCache) we try to confirm if // the offset gaps can be filled by the revision changes. val missingOffsetCount = missingElems.size val (inBetweenRevisionChanges, newMaxOffset, cacheMissed) = // in this fold we find the possibly new max offset and the total revision difference for all persistence ids elements.foldLeft((0L, nextMax, false)) { case ((revChg, currMaxOffset, cacheMiss), elem) => revisionCache.get(elem.pid) match { case Some(e) => // cache hit: find the revision difference val maxOffset = math.max(currMaxOffset, elem.offset) val revDiff = elem.revision - e.revision if (revDiff <= 1) { (revChg, maxOffset, cacheMiss) } else { val pidOffsets = (e.offset until elem.offset).tail // e.offset and elem.offset are known to not be missing val missingCount = math.min(pidOffsets.count(missingElems.contains), revDiff - 1) (revChg + missingCount, maxOffset, cacheMiss) } case None => // this persistence id was not present in the cache (revChg, math.max(elem.offset, currMaxOffset), cacheMiss || elem.revision != 1L) } } // in this case we want to keep querying but not immediately scheduleQuery(queryDelay) if (cacheMissed || missingOffsetCount != inBetweenRevisionChanges) { // gaps could not be confirmed if (log.isDebugEnabled) { log.debug( "Offset gaps detected [{}]. Current max offset [{}]. [{}] gaps could not be confirmed by revision changes.{}", missingElems, nextMax, missingOffsetCount - inBetweenRevisionChanges, if (cacheMissed) " Some new persistence ids without previously known revision." else "") } addToRevisionCache(elements, nextMax) context.become(receive(nextMax, newMissingByCounter, (moduloCounter + 1) % maxTries)) } else { addToRevisionCache(elements, newMaxOffset) context.become(receive(newMaxOffset, newMissingByCounter, (moduloCounter + 1) % maxTries)) } } } private def addToRevisionCache(elements: List[VisitedElement], upToOffset: GlobalOffset): Unit = { revisionCache ++= elements.iterator.collect { case e if e.offset <= upToOffset => e.pid -> e } evictRevisionCacheIfNeeded() } private def evictRevisionCacheIfNeeded(): Unit = { def divRoundUp(num: Int, divisor: Int): Int = (num + divisor - 1) / divisor if (revisionCache.size > revisionCacheCapacity) { val sortedEntries = revisionCache.toVector.sortBy { case (_, elem) => elem.offset } // keep 90% of capacity val numberOfEntriesToRemove = (sortedEntries.size - revisionCacheCapacity) + divRoundUp(revisionCacheCapacity, 10) revisionCache --= sortedEntries.iterator.take(numberOfEntriesToRemove).map(_._1) } } def scheduleQuery(delay: FiniteDuration): Unit = { timers.startSingleTimer(key = QueryGlobalOffsetsTimerKey, QueryState, delay) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/state/scaladsl/JdbcDurableStateStore.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state.scaladsl import scala.concurrent.{ ExecutionContext, Future } import scala.concurrent.duration._ import scala.util.Try import slick.jdbc.{ JdbcBackend, JdbcProfile } import akka.{ Done, NotUsed } import akka.actor.ExtendedActorSystem import akka.pattern.ask import akka.persistence.state.scaladsl.{ DurableStateUpdateStore, GetObjectResult } import akka.persistence.jdbc.AkkaSerialization import akka.persistence.jdbc.state.DurableStateQueries import akka.persistence.jdbc.config.DurableStateTableConfiguration import akka.persistence.jdbc.state.{ DurableStateTables, OffsetSyntax } import akka.persistence.query.{ DurableStateChange, Offset } import akka.persistence.query.scaladsl.DurableStateStoreQuery import akka.persistence.jdbc.journal.dao.FlowControl import akka.serialization.Serialization import akka.stream.scaladsl.{ Sink, Source } import akka.stream.{ Materializer, SystemMaterializer } import akka.util.Timeout import OffsetSyntax._ import akka.annotation.ApiMayChange import akka.persistence.query.UpdatedDurableState object JdbcDurableStateStore { val Identifier = "jdbc-durable-state-store" } /** * API may change */ @ApiMayChange class JdbcDurableStateStore[A]( val configPath: String, db: JdbcBackend#Database, val profile: JdbcProfile, durableStateConfig: DurableStateTableConfiguration, serialization: Serialization)(implicit val system: ExtendedActorSystem) extends DurableStateUpdateStore[A] with DurableStateStoreQuery[A] { import DurableStateSequenceActor._ import FlowControl._ import profile.api._ implicit val ec: ExecutionContext = system.dispatcher implicit val mat: Materializer = SystemMaterializer(system).materializer lazy val queries = new DurableStateQueries(profile, durableStateConfig) // Started lazily to prevent the actor for querying the db if no changesByTag queries are used private[jdbc] lazy val stateSequenceActor = system.systemActorOf( DurableStateSequenceActor.props(this, durableStateConfig.stateSequenceConfig), s"$configPath.akka-persistence-jdbc-durable-state-sequence-actor") def getObject(persistenceId: String): Future[GetObjectResult[A]] = { db.run(queries.selectFromDbByPersistenceId(persistenceId).result).flatMap { rows => rows.headOption match { case None => Future.successful(GetObjectResult(None, 0)) case Some(row) => Future.fromTry(AkkaSerialization.fromDurableStateRow(serialization)(row).map { anyRef => GetObjectResult(Some(anyRef.asInstanceOf[A]), row.revision) }) } } } def upsertObject(persistenceId: String, revision: Long, value: A, tag: String): Future[Done] = { require(revision > 0) val row = AkkaSerialization.serialize(serialization, value).map { serialized => DurableStateTables.DurableStateRow( 0, // insert 0 for autoinc columns persistenceId, revision, serialized.payload, Option(tag).filter(_.trim.nonEmpty), serialized.serId, Option(serialized.serManifest).filter(_.trim.nonEmpty), System.currentTimeMillis) } Future .fromTry(row) .flatMap { r => val action = if (revision == 1) insertDurableState(r) else updateDurableState(r) db.run(action) } .map { rowsAffected => if (rowsAffected == 0) throw new IllegalStateException( s"Incorrect revision number [$revision] provided: It has to be 1 more than the value existing in the database for persistenceId [$persistenceId]") else Done } } @deprecated(message = "Use the deleteObject overload with revision instead.", since = "5.2.0") override def deleteObject(persistenceId: String): Future[Done] = deleteObject(persistenceId, revision = 0) override def deleteObject(persistenceId: String, revision: Long): Future[Done] = { // FIXME #686 use revision db.run(queries.deleteFromDb(persistenceId).map(_ => Done)) } def currentChanges(tag: String, offset: Offset): Source[DurableStateChange[A], NotUsed] = { Source .futureSource(maxStateStoreOffset().map { maxOrderingInDb => changesByTag(tag, offset.value, terminateAfterOffset = Some(maxOrderingInDb)) }) .mapMaterializedValue(_ => NotUsed) } def changes(tag: String, offset: Offset): Source[DurableStateChange[A], NotUsed] = changesByTag(tag, offset.value, terminateAfterOffset = None) private def currentChangesByTag( tag: String, from: Long, batchSize: Long, queryUntil: MaxGlobalOffset): Source[DurableStateChange[A], NotUsed] = { if (queryUntil.maxOffset < from) Source.empty else changesByTagFromDb(tag, from, queryUntil.maxOffset, batchSize).mapAsync(1)(Future.fromTry) } private def changesByTagFromDb( tag: String, offset: Long, maxOffset: Long, batchSize: Long): Source[Try[DurableStateChange[A]], NotUsed] = { Source .fromPublisher(db.stream(queries.changesByTag((tag, offset, maxOffset, batchSize)).result)) .map(toDurableStateChange) } private[jdbc] def changesByTag( tag: String, offset: Long, terminateAfterOffset: Option[Long]): Source[DurableStateChange[A], NotUsed] = { val batchSize = durableStateConfig.batchSize implicit val askTimeout: Timeout = Timeout(durableStateConfig.stateSequenceConfig.askTimeout) Source .unfoldAsync[(Long, FlowControl), Seq[DurableStateChange[A]]]((offset, Continue)) { case (from, control) => def retrieveNextBatch() = { for { queryUntil <- stateSequenceActor.ask(GetMaxGlobalOffset).mapTo[MaxGlobalOffset] xs <- currentChangesByTag(tag, from, batchSize, queryUntil).runWith(Sink.seq) } yield { val hasMoreEvents = xs.size == batchSize val nextControl: FlowControl = terminateAfterOffset match { // we may stop if target is behind queryUntil and we don't have more events to fetch case Some(target) if !hasMoreEvents && target <= queryUntil.maxOffset => Stop // We may also stop if we have found an event with an offset >= target case Some(target) if xs.exists(_.offset.value >= target) => Stop // otherwise, disregarding if Some or None, we must decide how to continue case _ => if (hasMoreEvents) Continue else ContinueDelayed } val nextStartingOffset = if (xs.isEmpty) { math.max(from.value, queryUntil.maxOffset) } else { // Continue querying from the largest offset xs.map(_.offset.value).max } Some(((nextStartingOffset, nextControl), xs)) } } control match { case Stop => Future.successful(None) case Continue => retrieveNextBatch() case ContinueDelayed => akka.pattern.after(durableStateConfig.refreshInterval, system.scheduler)(retrieveNextBatch()) } } .mapConcat(identity) } private[jdbc] def maxStateStoreOffset(): Future[Long] = db.run(queries.maxOffsetQuery.result) private[jdbc] def stateStoreStateInfo(offset: Long, limit: Long): Source[(String, Long, Long), NotUsed] = Source.fromPublisher(db.stream(queries.stateStoreStateQuery((offset, limit)).result)) private def toDurableStateChange(row: DurableStateTables.DurableStateRow): Try[DurableStateChange[A]] = { AkkaSerialization .fromDurableStateRow(serialization)(row) .map(payload => new UpdatedDurableState( row.persistenceId, row.revision, payload.asInstanceOf[A], Offset.sequence(row.globalOffset), row.stateTimestamp)) } private def updateDurableState(row: DurableStateTables.DurableStateRow) = { for { s <- queries.getSequenceNextValueExpr() u <- queries.updateDbWithDurableState(row, s.head) } yield u } private def insertDurableState(row: DurableStateTables.DurableStateRow) = { for { s <- queries.getSequenceNextValueExpr() u <- queries.insertDbWithDurableState(row, s.head) } yield u } def deleteAllFromDb() = db.run(queries.deleteAllFromDb()) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/testkit/internal/SchemaType.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.testkit.internal import akka.annotation.InternalApi /** * INTERNAL API */ @InternalApi private[jdbc] sealed trait SchemaType /** * INTERNAL API */ @InternalApi private[jdbc] case object Postgres extends SchemaType /** * INTERNAL API */ @InternalApi private[jdbc] case object H2 extends SchemaType /** * INTERNAL API */ @InternalApi private[jdbc] case object MySQL extends SchemaType /** * INTERNAL API */ @InternalApi private[jdbc] case object Oracle extends SchemaType /** * INTERNAL API */ @InternalApi private[jdbc] case object SqlServer extends SchemaType ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/testkit/internal/SchemaUtilsImpl.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.testkit.internal import java.sql.Statement import scala.concurrent.Future import akka.Done import akka.actor.ClassicActorSystemProvider import akka.annotation.InternalApi import akka.dispatch.Dispatchers import akka.persistence.jdbc.db.SlickDatabase import akka.persistence.jdbc.db.SlickExtension import com.typesafe.config.Config import org.slf4j.Logger import slick.jdbc.H2Profile import slick.jdbc.JdbcBackend.Database import slick.jdbc.JdbcProfile import slick.jdbc.MySQLProfile import slick.jdbc.OracleProfile import slick.jdbc.PostgresProfile import slick.jdbc.SQLServerProfile /** * INTERNAL API */ @InternalApi private[jdbc] object SchemaUtilsImpl { def legacy(configKey: String, config: Config): Boolean = config.getConfig(configKey).getString("dao") != "akka.persistence.jdbc.journal.dao.DefaultJournalDao" /** * INTERNAL API */ @InternalApi private[jdbc] def dropIfExists(configKey: String, logger: Logger)( implicit actorSystem: ClassicActorSystemProvider): Future[Done] = { val slickDb: SlickDatabase = loadSlickDatabase(configKey) val (fileToLoad, separator) = dropScriptFor( slickProfileToSchemaType(slickDb.profile), legacy(configKey, actorSystem.classicSystem.settings.config)) val blockingEC = actorSystem.classicSystem.dispatchers.lookup(Dispatchers.DefaultBlockingDispatcherId) Future(applyScriptWithSlick(fromClasspathAsString(fileToLoad), separator, logger, slickDb.database))(blockingEC) } /** * INTERNAL API */ @InternalApi private[jdbc] def createIfNotExists(configKey: String, logger: Logger)( implicit actorSystem: ClassicActorSystemProvider): Future[Done] = { val slickDb: SlickDatabase = loadSlickDatabase(configKey) val (fileToLoad, separator) = createScriptFor( slickProfileToSchemaType(slickDb.profile), legacy(configKey, actorSystem.classicSystem.settings.config)) val blockingEC = actorSystem.classicSystem.dispatchers.lookup(Dispatchers.DefaultBlockingDispatcherId) Future(applyScriptWithSlick(fromClasspathAsString(fileToLoad), separator, logger, slickDb.database))(blockingEC) } /** * INTERNAL API */ @InternalApi private[jdbc] def applyScript(script: String, separator: String, configKey: String, logger: Logger)( implicit actorSystem: ClassicActorSystemProvider): Future[Done] = { val blockingEC = actorSystem.classicSystem.dispatchers.lookup(Dispatchers.DefaultBlockingDispatcherId) Future(applyScriptWithSlick(script, separator, logger, loadSlickDatabase(configKey).database))(blockingEC) } /** * INTERNAL API */ @InternalApi private[jdbc] def dropWithSlick(schemaType: SchemaType, logger: Logger, db: Database, legacy: Boolean): Done = { val (fileToLoad, separator) = dropScriptFor(schemaType, legacy) SchemaUtilsImpl.applyScriptWithSlick(SchemaUtilsImpl.fromClasspathAsString(fileToLoad), separator, logger, db) } /** * INTERNAL API */ @InternalApi private[jdbc] def createWithSlick(schemaType: SchemaType, logger: Logger, db: Database, legacy: Boolean): Done = { val (fileToLoad, separator) = createScriptFor(schemaType, legacy) SchemaUtilsImpl.applyScriptWithSlick(SchemaUtilsImpl.fromClasspathAsString(fileToLoad), separator, logger, db) } private def applyScriptWithSlick(script: String, separator: String, logger: Logger, database: Database): Done = { def withStatement(f: Statement => Unit): Done = { val session = database.createSession() try session.withStatement()(f) finally session.close() Done } withStatement { stmt => val lines = script.split(separator).map(_.trim) for { line <- lines if line.nonEmpty } yield { logger.debug(s"applying DDL: $line") try stmt.executeUpdate(line) catch { case t: java.sql.SQLException => logger.debug(s"Exception while applying SQL script", t) } } } } private def dropScriptFor(schemaType: SchemaType, legacy: Boolean): (String, String) = { val suffix = if (legacy) "-legacy" else "" schemaType match { case Postgres => (s"schema/postgres/postgres-drop-schema$suffix.sql", ";") case MySQL => (s"schema/mysql/mysql-drop-schema$suffix.sql", ";") case Oracle => (s"schema/oracle/oracle-drop-schema$suffix.sql", "/") case SqlServer => (s"schema/sqlserver/sqlserver-drop-schema$suffix.sql", ";") case H2 => (s"schema/h2/h2-drop-schema$suffix.sql", ";") } } private def createScriptFor(schemaType: SchemaType, legacy: Boolean): (String, String) = { val suffix = if (legacy) "-legacy" else "" schemaType match { case Postgres => (s"schema/postgres/postgres-create-schema$suffix.sql", ";") case MySQL => (s"schema/mysql/mysql-create-schema$suffix.sql", ";") case Oracle => (s"schema/oracle/oracle-create-schema$suffix.sql", "/") case SqlServer => (s"schema/sqlserver/sqlserver-create-schema$suffix.sql", ";") case H2 => (s"schema/h2/h2-create-schema$suffix.sql", ";") } } private def slickProfileToSchemaType(profile: JdbcProfile): SchemaType = profile match { case PostgresProfile => Postgres case MySQLProfile => MySQL case OracleProfile => Oracle case SQLServerProfile => SqlServer case H2Profile => H2 case _ => throw new IllegalArgumentException(s"Invalid profile $profile encountered") } /** * INTERNAL API */ @InternalApi private[jdbc] def fromClasspathAsString(fileName: String): String = { val is = getClass.getClassLoader.getResourceAsStream(fileName) io.Source.fromInputStream(is).mkString } private def loadSlickDatabase(configKey: String)(implicit actorSystem: ClassicActorSystemProvider) = { val journalConfig = actorSystem.classicSystem.settings.config.getConfig(configKey) SlickExtension(actorSystem).database(journalConfig) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/testkit/javadsl/SchemaUtils.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.testkit.javadsl import java.util.concurrent.CompletionStage import scala.jdk.FutureConverters._ import akka.Done import akka.actor.ClassicActorSystemProvider import akka.annotation.ApiMayChange import akka.persistence.jdbc.testkit.internal.SchemaUtilsImpl import org.slf4j.LoggerFactory object SchemaUtils { private val logger = LoggerFactory.getLogger("akka.persistence.jdbc.testkit.javadsl.SchemaUtils") /** * Drops the schema for both the journal and the snapshot table using the default schema definition. * * For information about the different schemas and supported databases consult * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema * * This utility method is intended to be used for testing only. * For production, it's recommended to run any DDL statements before the system is started. * * This method will automatically detects the configured database using the settings from `jdbc-journal` config. * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. */ @ApiMayChange def dropIfExists(actorSystem: ClassicActorSystemProvider): CompletionStage[Done] = dropIfExists(configKey = "jdbc-journal", actorSystem) /** * Drops the schema for both the journal and the snapshot table using the default schema definition. * * For information about the different schemas and supported databases consult * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema * * This utility method is intended to be used for testing only. * For production, it's recommended to run any DDL statements before the system is started. * * This method will automatically detects the configured database using the settings from `configKey` config. * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. * * @param configKey the database journal configuration key to use. */ @ApiMayChange def dropIfExists(configKey: String, actorSystem: ClassicActorSystemProvider): CompletionStage[Done] = SchemaUtilsImpl.dropIfExists(configKey, logger)(actorSystem).asJava /** * Creates the schema for both the journal and the snapshot table using the default schema definition. * * For information about the different schemas and supported databases consult * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema * * This utility method is intended to be used for testing only. * For production, it's recommended to create run DDL statements before the system is started. * * This method will automatically detects the configured database using the settings from `jdbc-journal` config. * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. */ @ApiMayChange def createIfNotExists(actorSystem: ClassicActorSystemProvider): CompletionStage[Done] = createIfNotExists("jdbc-journal", actorSystem) /** * Creates the schema for both the journal and the snapshot table using the default schema definition. * * For information about the different schemas and supported databases consult * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema * * This utility method is intended to be used for testing only. * For production, it's recommended to create run DDL statements before the system is started. * * This method will automatically detects the configured database using the settings from `configKey` config. * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. * * @param configKey the database journal configuration key to use. */ @ApiMayChange def createIfNotExists(configKey: String, actorSystem: ClassicActorSystemProvider): CompletionStage[Done] = SchemaUtilsImpl.createIfNotExists(configKey, logger)(actorSystem).asJava /** * This method can be used to load alternative DDL scripts. * * This utility method is intended to be used for testing only. * For production, it's recommended to run any DDL statements before the system is started. * * It will use the database settings found under `jdbc-journal`, or `akka-persistence-jdbc.shared-databases` if configured so. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. * * @param script the DDL script. The passed script can contain more then one SQL statements separated by a ; (semi-colon). */ @ApiMayChange def applyScript(script: String, actorSystem: ClassicActorSystemProvider): CompletionStage[Done] = applyScript(script, separator = ";", configKey = "jdbc-journal", actorSystem) /** * This method can be used to load alternative DDL scripts. * * This utility method is intended to be used for testing only. * For production, it's recommended to run any DDL statements before the system is started. * * It will use the database settings found under `configKey`, or `akka-persistence-jdbc.shared-databases` if configured so. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. * * @param script the DDL script. The passed `script` can contain more then one SQL statements. * @param separator used to separate the different DDL statements. * @param configKey the database configuration key to use. Can be `jdbc-journal` or `jdbc-snapshot-store`. */ @ApiMayChange def applyScript( script: String, separator: String, configKey: String, actorSystem: ClassicActorSystemProvider): CompletionStage[Done] = SchemaUtilsImpl.applyScript(script, separator, configKey, logger)(actorSystem).asJava } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/testkit/scaladsl/SchemaUtils.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.testkit.scaladsl import scala.concurrent.Future import akka.Done import akka.actor.ClassicActorSystemProvider import akka.annotation.ApiMayChange import akka.persistence.jdbc.testkit.internal.SchemaUtilsImpl import org.slf4j.LoggerFactory object SchemaUtils { private val logger = LoggerFactory.getLogger("akka.persistence.jdbc.testkit.scaladsl.SchemaUtils") /** * Drops the schema for both the journal and the snapshot table using the default schema definition. * * For information about the different schemas and supported databases consult * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema * * This utility method is intended to be used for testing only. * For production, it's recommended to run any DDL statements before the system is started. * * This method will automatically detects the configured database using the settings from `jdbc-journal` config. * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. */ @ApiMayChange def dropIfExists()(implicit actorSystem: ClassicActorSystemProvider): Future[Done] = dropIfExists(configKey = "jdbc-journal") /** * Drops the schema for both the journal and the snapshot table using the default schema definition. * * For information about the different schemas and supported databases consult * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema * * This utility method is intended to be used for testing only. * For production, it's recommended to run any DDL statements before the system is started. * * This method will automatically detects the configured database using the settings from `configKey` config. * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. * * @param configKey the database journal configuration key to use. */ @ApiMayChange def dropIfExists(configKey: String)(implicit actorSystem: ClassicActorSystemProvider): Future[Done] = SchemaUtilsImpl.dropIfExists(configKey, logger) /** * Creates the schema for both the journal and the snapshot table using the default schema definition. * * For information about the different schemas and supported databases consult * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema * * This utility method is intended to be used for testing only. * For production, it's recommended to run any DDL statements before the system is started. * * This method will automatically detects the configured database using the settings from `jdbc-journal` config. * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. */ @ApiMayChange def createIfNotExists()(implicit actorSystem: ClassicActorSystemProvider): Future[Done] = createIfNotExists(configKey = "jdbc-journal") /** * Creates the schema for both the journal and the snapshot table using the default schema definition. * * For information about the different schemas and supported databases consult * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema * * This utility method is intended to be used for testing only. * For production, it's recommended to run any DDL statements before the system is started. * * This method will automatically detects the configured database using the settings from `configKey` config. * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. * * @param configKey the database journal configuration key to use. */ @ApiMayChange def createIfNotExists(configKey: String)(implicit actorSystem: ClassicActorSystemProvider): Future[Done] = SchemaUtilsImpl.createIfNotExists(configKey, logger) /** * This method can be used to load alternative DDL scripts. * * This utility method is intended to be used for testing only. * For production, it's recommended to run any DDL statements before the system is started. * * It will use the database settings found under `jdbc-journal`, or `akka-persistence-jdbc.shared-databases` if configured so. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. * * @param script the DDL script. The passed script can contain more then one SQL statements separated by a ; (semi-colon). */ @ApiMayChange def applyScript(script: String)(implicit actorSystem: ClassicActorSystemProvider): Future[Done] = applyScript(script, separator = ";", configKey = "jdbc-journal") /** * This method can be used to load alternative DDL scripts. * * This utility method is intended to be used for testing only. * For production, it's recommended to create the table with DDL statements before the system is started. * * It will use the database settings found under `configKey`, or `akka-persistence-jdbc.shared-databases` if configured so. * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details. * * @param script the DDL script. The passed `script` can contain more then one SQL statements. * @param separator used to separate the different DDL statements. * @param configKey the database configuration key to use. Can be `jdbc-journal` or `jdbc-snapshot-store`. */ @ApiMayChange def applyScript(script: String, separator: String, configKey: String)( implicit actorSystem: ClassicActorSystemProvider): Future[Done] = SchemaUtilsImpl.applyScript(script, separator, configKey, logger) } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/util/BlockingOps.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.util import scala.concurrent.duration.{ FiniteDuration, _ } import scala.concurrent.{ Await, Future } object BlockingOps { implicit class BlockingFutureImplicits[T](val that: Future[T]) extends AnyVal { def futureValue(implicit awaitDuration: FiniteDuration = 24.hour): T = Await.result(that, awaitDuration) def printFutureValue(implicit awaitDuration: FiniteDuration = 24.hour): Unit = println(that.futureValue) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/util/ByteArrayOps.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.util import java.io.{ ByteArrayInputStream, InputStream } import java.util.Base64 object ByteArrayOps { implicit class ByteArrayImplicits(val that: Array[Byte]) extends AnyVal { def encodeBase64: String = Base64.getEncoder.encodeToString(that) def toInputStream: InputStream = new ByteArrayInputStream(that) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/util/ConfigOps.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.util import com.typesafe.config.Config import java.util.concurrent.TimeUnit import scala.concurrent.duration.FiniteDuration object ConfigOps { implicit class ConfigOperations(val config: Config) extends AnyVal { def asStringOption(key: String): Option[String] = if (config.hasPath(key)) { val value = config.getString(key).trim if (value.isEmpty) None else Some(value) } else None def asFiniteDuration(key: String): FiniteDuration = FiniteDuration(config.getDuration(key).toMillis, TimeUnit.MILLISECONDS) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/util/InputStreamOps.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.util import java.io.{ ByteArrayOutputStream, InputStream } import scala.concurrent.blocking object InputStreamOps { implicit class InputStreamImplicits(val is: InputStream) extends AnyVal { def toArray: Array[Byte] = blocking { /* based on https://stackoverflow.com/a/17861016/865265 * Thanks to * - https://stackoverflow.com/users/1435969/ivan-gammel * - https://stackoverflow.com/users/2619133/oliverkn */ val bos: ByteArrayOutputStream = new ByteArrayOutputStream val buffer: Array[Byte] = new Array[Byte](0xffff) var len: Int = is.read(buffer) while (len != -1) { bos.write(buffer, 0, len) len = is.read(buffer) } bos.toByteArray } } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/util/PluginVersionChecker.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.util import akka.annotation.InternalApi @InternalApi private[jdbc] object PluginVersionChecker { def check(): Unit = try { Class.forName("akka.persistence.jdbc.util.DefaultSlickDatabaseProvider") throw new RuntimeException( "Old version of Akka Persistence JDBC found on the classpath. Remove `com.github.dnvriend:akka-persistence-jdbc` from the classpath..") } catch { case _: ClassNotFoundException => // All good! That's intentional. // It's good if we don't have akka.persistence.jdbc.util.DefaultSlickDatabaseProvider around } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/util/StringOps.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.util import java.util.Base64 object StringOps { implicit class StringImplicits(val that: String) extends AnyVal { def toByteArray: Array[Byte] = Base64.getDecoder.decode(that) } } ================================================ FILE: core/src/main/scala/akka/persistence/jdbc/util/TrySeq.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.util import scala.annotation.nowarn import scala.collection.immutable._ import scala.util.{ Failure, Success, Try } object TrySeq { def sequence[A](seq: Seq[Try[A]]): Try[Seq[A]] = { @nowarn("msg=exhaustive") def recurse(remaining: Seq[Try[A]], processed: Seq[A]): Try[Seq[A]] = { remaining match { case Seq() => Success(processed) case Success(head) +: tail => recurse(remaining = tail, processed :+ head) case Failure(t) +: _ => Failure(t) } } recurse(seq, Vector.empty) } } ================================================ FILE: core/src/test/LICENSE ================================================ LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT THIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS "AGREEMENT") IS A LEGAL AGREEMENT BETWEEN YOU ("USER") AND LIGHTBEND, INC. ("LICENSOR"). BY CLICKING THE "I ACCEPT" BUTTON, OR INSTALLING, COPYING OR OTHERWISE USING LIGHTBEND’S SOFTWARE (THE “SOFTWARE”) AND ITS ASSOCIATED USER DOCUMENTATION, USER ACKNOWLEDGES THAT USER HAS REVIEWED AND ACCEPTS THIS AGREEMENT AND AGREES TO BE BOUND BY ALL OF ITS TERMS. IF YOU ARE AGREEING TO THIS AGREEMENT AS AN INDIVIDUAL, “USER” REFERS TO YOU INDIVIDUALLY. IF YOU ARE AGREEING TO THIS AGREEMENT AS A REPRESENTATIVE OF AN ENTITY, YOU REPRESENT THAT YOU HAVE THE AUTHORITY TO BIND THAT ENTITY AND “USER” REFERS TO THAT ENTITY AND ALL THE USERS ACCESSING THE SOFTWARE BY, THROUGH OR ON BEHALF OF THAT ENTITY. IF USER DOES NOT AGREE WITH ALL OF THE TERMS OF THIS AGREEMENT, DO NOT INSTALL, COPY OR OTHERWISE USE THE SOFTWARE OR ITS DOCUMENTATION. 1. DEFINITIONS. 1. “User System” means User’s website(s), computers, servers and other equipment and software upon and with which the Software is run. 2. “Documentation” means the user instructions and help files made available by Licensor for use with the Software, as may be updated from time to time by Licensor. 3. “Intellectual Property Rights” means all intellectual property rights or similar proprietary rights, including (a) patent rights and utility models, (b) copyrights and database rights, (c) trademarks, trade names, domain names and trade dress and the goodwill associated therewith, (d) trade secrets, (e) mask works, and (f) industrial design rights; in each case, including any registrations of, applications to register, and renewals and extensions of, any of the foregoing in any jurisdiction in the world. 4. “Open Source Software” means all software that is available under the GNU Affero General Public License (AGPL), GNU General Public License (GPL), GNU Lesser General Public License (LGPL), Mozilla Public License (MPL), Apache License, BSD licenses, or any other license that is approved by or similar to those approved by the Open Source Initiative (www.opensource.org). 2. LICENSES AND RESTRICTIONS. 1. License. Subject to User’s compliance with the terms and conditions of this Agreement, Licensor hereby grants to User, during the term of this Agreement, a limited, non-exclusive, non-transferable and non-sublicensable right to (i) install and execute one (1) copy of the Software in accordance with the Documentation, solely in binary form, and not for the benefit of any other person or entity, and (ii) access and use the Documentation, solely for User’s own internally purposes in support of End User’s permitted use of the Software. 2. Restrictions. User shall not, directly or indirectly, or permit any User or third party to: (a) reverse engineer, decompile, disassemble or otherwise attempt to discover the source code or underlying ideas or algorithms of the Software; (b) modify, translate, or create derivative works based on any element of the Software or any related Documentation (except to the extent applicable laws specifically prohibit such restriction for interoperability purposes, in which case you agree to first contact Licensor and provide Licensor an opportunity to create such changes as are needed for interoperability purposes); (c) use, rent, lease, distribute, sell, resell, assign, or otherwise transfer the Software or any copy thereof; (d) use the Software for timesharing purposes or otherwise for the benefit of any person or entity other than for the benefit of User and Users; (e) remove any proprietary notices from the Software or the Documentation or attempt to defeat any copy protection device included with the Software; or (f) use the Software for any purpose other than its intended purpose. 3. Reservation of Rights. Nothing in this Agreement shall be construed to give User a right to use or otherwise obtain access to any source code from which the Software is compiled or interpreted. Except as expressly granted in this Agreement, there are no other licenses granted to User, express, implied or by way of estoppel. All rights not granted in this Agreement are reserved by Licensor. 4. Open Source Software. Notwithstanding the foregoing, certain items of software included with the Software are Open Source Software and remains subject Open Source Software licenses. Such Open Source Software is not subject to the terms and conditions of this Agreement. Instead, each such item of Open Source Software is licensed under the terms of the end user license that accompanies such Open Source Software and nothing in this Agreement limits your rights under, or grants you rights that supersede, the terms and conditions of any applicable end user license for such Open Source Software. If required by any license for particular Open Source Software, Licensor makes such Open Source Software, and any Licensor modifications to that Open Source Software, available as further described in the Documentation. USE OF THE SOFTWARE IN ANY MANNER OTHER THAN AS PROVIDED IN THIS AGREEMENT IS STRICTLY PROHIBITED AND MAY INFRINGE ON THE INTELLECTUAL PROPERTY RIGHTS OF LICENSOR AND/OR ITS LICENSOR(S), SUBJECTING USER TO CIVIL AND CRIMINAL PENALTIES, INCLUDING WITHOUT LIMITATION MONETARY DAMAGES AND IMPRISONMENT FOR COPYRIGHT INFRINGEMENT. 3. USER OBLIGATIONS. 1. User System. User is responsible for (a) obtaining, deploying and maintaining the User System, and all computer hardware, software, modems, routers and other communications equipment necessary for User and its Users to install and use the Software; and (b) paying all third party fees and access charges incurred in connection with the foregoing. Licensor shall not be responsible for supplying any hardware, software or other equipment to User under this Agreement. 2. Compliance with Laws. User agrees to use the Software in compliance with all applicable laws, including local laws of the country or region in which User resides, and in compliance with all United States export laws and regulations. User shall not use the Software for any purpose prohibited by applicable law. 3. Trademarks and Tradenames. With regard to all copies of the Software permitted herein, User shall reproduce on such copies all Licensor copyright notices, and other proprietary notices appearing on and in the original copy of the software received from Licensor. Except as set forth in the foregoing sentence, User will not, during the term of this Agreement or thereafter, use any trademark of Licensor, or any word and/or symbol likely to be confused with any Licensor trademark, either alone or in any combination with other words and/or symbols. 4. SUPPORT AND MAINTENANCE. 1. Support. Licensor is not responsible for maintenance or support of the Software, or the equipment on which the Software resides or is used, under this Agreement. By accepting the license granted under this Agreement, User agrees that Licensor will be under no obligation to provide any support, maintenance or service in connection with the Software or such equipment. 2. Upgrades and Updates. Licensor may from time to time in its sole discretion develop and provide updates for the Software, which may include upgrades, bug fixes, patches, other error corrections, and/or new features (collectively, including related documentation, “Updates”). Updates may also modify or delete in their entirety certain features and functionality. You agree that Licensor has no obligation to provide any Updates or to continue to provide or enable any particular features or functionality. 5. REPRESENTATIONS AND WARRANTIES; DISCLAIMER. 1. Mutual Representations and Warranties. Each party represents, warrants and covenants that: (a) it has the full power and authority to enter into this Agreement and to perform its obligations hereunder, without the need for any consents, approvals or immunities not yet obtained; and (b) its acceptance of and performance under this Agreement shall not breach any oral or written agreement with any third party or any obligation owed by it to any third party to keep any information or materials in confidence or in trust. 2. Disclaimer. EXCEPT FOR THE WARRANTIES SET FORTH IN THIS SECTION 5, THE SOFTWARE AND THE DOCUMENTATION ARE PROVIDED ON AN AS-IS BASIS. USER’S USE OF THE SOFTWARE AND THE DOCUMENTATION IS AT USER’S OWN RISK. LICENSOR DOES NOT MAKE, AND HEREBY DISCLAIMS, ANY AND ALL OTHER EXPRESS, STATUTORY AND IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE, QUALITY, SUITABILITY, OPERABILITY, CONDITION, SYSTEM INTEGRATION, NON-INTERFERENCE, WORKMANSHIP, TRUTH, ACCURACY (OF DATA OR ANY OTHER INFORMATION OR CONTENT), ABSENCE OF DEFECTS, WHETHER LATENT OR PATENT, AND ANY WARRANTIES ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE. LICENSOR ALSO DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN, PERFORMED AND/OR PROVIDED BY THE SOFTWARE WILL MEET USER’S REQUIREMENTS, THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, THAT THE SOFTWARE WILL BE COMPATIBLE OR WORK WITH ANY THIRD-PARTY SOFTWARE, APPLICATIONS OR DEVICES, OR THAT DEFECTS IN THE SOFTWARE WILL BE CORRECTED. USER EXPRESSLY ACKNOWLEDGES AND AGREES THAT, TO THE EXTENT PERMITTED BY APPLICABLE LAW, ITS USE OF THE SOFTWARE IS AT ITS SOLE RISK AND THAT THE ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT IS WITH USER. USER FURTHER ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS NOT INTENDED OR SUITABLE FOR USE IN SITUATIONS OR ENVIRONMENTS WHERE THE FAILURE OR TIME DELAYS OF, OR ERRORS OR INACCURACIES IN THE CONTENT, DATA OR INFORMATION PROVIDED BY THE SOFTWARE COULD LEAD TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. ANY WARRANTIES MADE BY LICENSOR ARE FOR THE BENEFIT OF USER ONLY AND NOT FOR THE BENEFIT OF ANY THIRD PARTY. THE SOFTWARE AND THE DOCUMENTATION ARE LICENSED AND NOT SOLD. NO AGENT OF LICENSOR IS AUTHORIZED TO ALTER OR EXPAND THE WARRANTIES OF LICENSOR AS SET FORTH HEREIN. 6. INDEMNIFICATION. User shall defend Licensor and its licensors and their respective officers, directors and employees (“Licensor Indemnified Parties”) from and against any and all Third-Party Claims which arise out of or relate to: (a) User’s use or alleged use of the Software other than as permitted under this Agreement; or (b) arising out of or relating to any violation of Section 2.2, or any violation of applicable laws. User shall pay all damages, costs and expenses, including attorneys’ fees and costs (whether by settlement or award of by a final judicial judgment) incurred by the Licensor Indemnified Parties from any such Third-Party Claim. In no event shall Licensor settle any claim without User’s prior written approval. Licensor may, at its own expense, engage separate counsel to advise Licensor regarding a Third-Party Claim and to participate in the defense of the claim, subject to User’s right to control the defense and settlement. If you are a New Jersey resident, this indemnification clause is to be only as broad and inclusive as is permitted by the law of the state of New Jersey. 7. CONFIDENTIALITY. 1. Confidential Information. User acknowledges that the Software contains valuable proprietary information and trade secrets and that unauthorized or improper use of the Software will result in irreparable harm to Licensor for which monetary damages would be inadequate and for which Licensor may be entitled to immediate injunctive relief. Accordingly, you will maintain the confidentiality of the proprietary information and not sell, license, publish, display, distribute, disclose or otherwise make available such proprietary information to any third party, nor use such information except as authorized by this Agreement. 2. Injunctive Relief. User agrees that any unauthorized disclosure of confidential information may cause immediate and irreparable injury to Licensor and that, in the event of such breach, Licensor will be entitled, in addition to any other available remedies, to seek immediate injunctive and other equitable relief, without bond and without the necessity of showing actual monetary damages. 8. PROPRIETARY RIGHTS. 1. Licensor. As between Licensor and User, all right, title and interest in the Software, the Documentation, and any other Licensor materials furnished or made available hereunder, and all modifications and enhancements thereof, and all suggestions, ideas and feedback proposed by User regarding the Software, including all copyright rights, patent rights and other Intellectual Property Rights in each of the foregoing, belong to and are retained solely by Licensor or Licensor’s licensors and providers, as applicable. User hereby does and will irrevocably assign to Licensor all evaluations, ideas, feedback and suggestions made by User to Licensor regarding the Software or the Documentation (collectively, “Feedback”) and all Intellectual Property Rights in the Feedback. 9. LIMITATION OF LIABILITY. 1. No Consequential Damages. NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR ANY DAMAGES FOR LOST DATA, BUSINESS INTERRUPTION, LOST PROFITS, LOST REVENUE OR LOST BUSINESS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT, EVEN IF LICENSOR OR ITS LICENSORS OR USER HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, INCLUDING WITHOUT LIMITATION, ANY SUCH DAMAGES ARISING OUT OF THE LICENSING, PROVISION OR USE OF THE SOFTWARE OR THE RESULTS OF THE USE OF THE SOFTWARE. LICENSOR WILL NOT BE LIABLE FOR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES. 2. LIMITS ON LIABILITY. NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR CUMULATIVE, AGGREGATE DAMAGES GREATER THAN FIVE HUNDRED DOLLARS (US $500). 3. ESSENTIAL PURPOSE. USER ACKNOWLEDGES THAT THE TERMS IN THIS SECTION 9 (LIMITATION OF LIABILITY) SHALL APPLY TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW AND SHALL APPLY EVEN IF AN EXCLUSIVE OR LIMITED REMEDY STATED HEREIN FAILS OF ITS ESSENTIAL PURPOSE. SOME JURISDICTIONS DO NOT ALLOW CERTAIN LIMITATIONS OF LIABILITY, SO SOME OR ALL OF THE ABOVE LIMITATIONS OF LIABILITY MAY NOT APPLY TO YOU. IF YOU ARE A NEW JERSEY RESIDENT, THIS LIMITATION OF LIABILITY SECTION IS TO BE ONLY AS BROAD AND INCLUSIVE AS IS PERMITTED BY THE LAW OF THE STATE OF NEW JERSEY. 10. TERM AND TERMINATION. 1. Term. This Agreement and User’s right to use the Software commences on earlier of the date that User: (a) installs the Software, (b) begins using the Software or (c) otherwise demonstrates assent to this Agreement. User’s right to use the Software shall continue until such time as this Agreement is terminated (the “Term”). 2. Termination for Cause. A party may terminate this Agreement, upon written notice to the other party in the event the other party files a petition for bankruptcy or has a petition for bankruptcy filed against it that is not dismissed within sixty (60) calendar days after filing or admits its inability to pay its debts as they mature, makes an assignment for the benefit of its creditors or ceases to function as a going concern or to conduct its operations in the normal course of business and such termination shall occur immediately upon notice. Licensor may terminate this Agreement at any time without notice if it ceases to support the Software, which Licensor may do in its sole discretion. In addition, this Agreement will terminate immediately and automatically without any notice if User breaches any of its terms and conditions. 3. Termination for Convenience. Either party may terminate this Agreement for convenience on at least thirty (30) calendar days prior written notice to the other party. User may also terminate this Agreement by ceasing all use of the Software. 4. Effects of Termination. Upon expiration or termination of this Agreement, User’s shall cease all use of the Software and the Documentation and shall destroy all copies of the Software in User’s possession or control. 5. Survival. This Section and Sections 1, 2.2 (Restrictions), 2.3 (Reservation of Rights), 2.4 (Open Source Software), 5.2 (Disclaimer), 6 (Indemnification), 7 (Confidentiality), 8 (Proprietary Rights), 9 (Limitation of Liability), 10.4 (Effects of Termination) and 11 (Miscellaneous) shall survive any termination or expiration of this Agreement. 11. MISCELLANEOUS. 1. Notices. Licensor may give notice to User by means of a general notice through electronic mail to User’s e-mail address, or by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service to User’s address on record with Licensor. User may give notice to Licensor by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service addressed to Licensor, Lightbend Inc., 580 California, #1231, San Francisco, CA 94104, Attention: User Support. Notice shall be deemed to have been given upon receipt or, if earlier, two (2) business days after mailing, as applicable. All communications and notices to be made or given pursuant to this Agreement shall be in the English language. 2. Governing Law. This Agreement and the rights and obligations of the parties to and under this agreement shall be governed by and construed under the laws of the United States and the State of California as applied to agreements entered into and to be performed in such State without giving effect to conflicts of laws rules or principles. The parties agree that the United Nations Convention on Contracts for the International Sale of Goods is specifically excluded from application to this Agreement and that the application of the Uniform Computer Information Transactions Act (UCITA) is specifically disclaimed. Any dispute arising out of or in connection with this Agreement, including but not limited to any question regarding its existence, interpretation, validity, performance, or termination, or any dispute between the parties arising from the parties' relationship created by this Agreement, shall be referred to and finally resolved by arbitration administered by the American Arbitration Association under its rules. The number of arbitrators shall be one (1). The parties shall endeavor to agree upon the sole arbitrator and jointly nominate the arbitrator. If the parties cannot agree upon the sole arbitrator within a time prescribed by AAA, the parties shall request the AAA to propose five (5) arbitrators and each party shall rank the proposed arbitrators. The AAA shall appoint an arbitrator from the list of five (5), based upon the parties' rankings. 3. U.S. Government Users. If User is a Federal Government entity, Licensor provides the Software and the Documentation, including related software and technology, for ultimate Federal Government end use solely in accordance with the following: Government technical data rights include only those rights customarily provided to the public with a commercial item or process and Government software rights related to the Software and the Documentation include only those rights customarily provided to the public, as defined in this Agreement. The technical data rights and customary commercial software license is provided in accordance with FAR 12.211 (Technical Data) and FAR 12.212 (Software) and, for Department of Defense transactions, DFAR 252.227-7015 (Technical Data – Commercial Items) and DFAR 227.7202-3 (Rights in Commercial Computer Software or Computer Software Documentation). If greater rights are needed, a mutually acceptable written addendum specifically conveying such rights must be included in this Agreement. 4. Export. The Software utilizes software and technology that may be subject to United States and foreign export controls. User acknowledges and agrees that the Software shall not be used, and none of the underlying information, software, or technology may be transferred or otherwise exported or re-exported to countries as to which the United States maintains an embargo (collectively, “Embargoed Countries”), or to or by a national or resident thereof, or any person or entity on the U.S. Department of Treasury’s List of Specially Designated Nationals or the U.S. Department of Commerce’s Table of Denial Orders (collectively, “Designated Nationals”). The lists of Embargoed Countries and Designated Nationals are subject to change without notice. By using the Software, User represents and warrants that it is not located in, under the control of, or a national or resident of an Embargoed Country or Designated National. The Software may use encryption technology that is subject to licensing requirements under the U.S. Export Administration Regulations, 15 C.F.R. Parts 730-774 and Council Regulation (EC) No. 1334/2000. User agrees to comply strictly with all applicable export laws and assume sole responsibility for obtaining licenses to export or re-export as may be required. Licensor and its licensors make no representation that the Software is appropriate or available for use in other locations. By using the Software, User represents and warrants that it is not located in any such country or on any such list. 5. General. User shall not assign its rights hereunder, or delegate the performance of any of its duties or obligations hereunder, whether by merger, acquisition, sale of assets, operation of law, or otherwise, without the prior written consent of Licensor. Any purported assignment in violation of the preceding sentence is null and void. Subject to the foregoing, this Agreement shall be binding upon, and inure to the benefit of, the successors and assigns of the parties thereto. Except as otherwise specified in this Agreement, this Agreement may be amended or supplemented only by a writing that refers explicitly to this Agreement and that is signed on behalf of both parties. No waiver will be implied from conduct or failure to enforce rights. No waiver will be effective unless in a writing signed on behalf of the party against whom the waiver is asserted. If any of this Agreement is found invalid or unenforceable that term will be enforced to the maximum extent permitted by law and the remainder of this Agreement will remain in full force. Nothing contained herein shall be construed as creating an agency, partnership, or other form of joint enterprise between the parties. This Agreement constitutes the entire agreement between the parties relating to this subject matter and supersedes all prior or simultaneous understandings, representations, discussions, negotiations, and agreements, whether written or oral. Neither party shall be liable to the other party or any third party for failure or delay in performing its obligations under this Agreement when such failure or delay is due to any cause beyond the control of the party concerned, including, without limitation, force majeure, governmental orders or restrictions, fire, or flood, provided that upon cessation of such events such party shall thereupon promptly perform or complete the performance of its obligations hereunder. ================================================ FILE: core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc; import akka.Done; import akka.NotUsed; import akka.actor.ActorSystem; // #create import akka.persistence.jdbc.testkit.javadsl.SchemaUtils; // #create // #read-journal import akka.persistence.query.*; import akka.persistence.jdbc.query.javadsl.JdbcReadJournal; // #read-journal // #persistence-ids import akka.stream.javadsl.Source; import akka.persistence.query.PersistenceQuery; import akka.persistence.jdbc.query.javadsl.JdbcReadJournal; // #persistence-ids // #events-by-persistence-id import akka.stream.javadsl.Source; import akka.persistence.query.PersistenceQuery; import akka.persistence.query.EventEnvelope; import akka.persistence.jdbc.query.javadsl.JdbcReadJournal; // #events-by-persistence-id // #events-by-tag import akka.stream.javadsl.Source; import akka.persistence.query.PersistenceQuery; import akka.persistence.query.EventEnvelope; import akka.persistence.jdbc.query.javadsl.JdbcReadJournal; // #events-by-tag import java.util.concurrent.CompletionStage; final class JavadslSnippets { void create() { // #create ActorSystem actorSystem = ActorSystem.create("example"); CompletionStage done = SchemaUtils.createIfNotExists(actorSystem); // #create } void readJournal() { ActorSystem system = ActorSystem.create("example"); // #read-journal final JdbcReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor(JdbcReadJournal.class, JdbcReadJournal.Identifier()); // #read-journal } void persistenceIds() { ActorSystem system = ActorSystem.create(); // #persistence-ids JdbcReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor(JdbcReadJournal.class, JdbcReadJournal.Identifier()); Source willNotCompleteTheStream = readJournal.persistenceIds(); Source willCompleteTheStream = readJournal.currentPersistenceIds(); // #persistence-ids } void eventsByPersistenceIds() { ActorSystem system = ActorSystem.create(); // #events-by-persistence-id JdbcReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor(JdbcReadJournal.class, JdbcReadJournal.Identifier()); Source willNotCompleteTheStream = readJournal.eventsByPersistenceId("some-persistence-id", 0L, Long.MAX_VALUE); Source willCompleteTheStream = readJournal.currentEventsByPersistenceId("some-persistence-id", 0L, Long.MAX_VALUE); // #events-by-persistence-id } void eventsByTag() { ActorSystem system = ActorSystem.create(); // #events-by-tag JdbcReadJournal readJournal = PersistenceQuery.get(system) .getReadJournalFor(JdbcReadJournal.class, JdbcReadJournal.Identifier()); Source willNotCompleteTheStream = readJournal.eventsByTag("apple", Offset.sequence(0L)); Source willCompleteTheStream = readJournal.currentEventsByTag("apple", Offset.sequence(0L)); // #events-by-tag } } ================================================ FILE: core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state; import java.util.concurrent.CompletionStage; import akka.actor.ActorSystem; import akka.Done; import akka.NotUsed; // #create import akka.persistence.jdbc.testkit.javadsl.SchemaUtils; // #create // #jdbc-durable-state-store import akka.persistence.state.DurableStateStoreRegistry; import akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore; // #jdbc-durable-state-store // #get-object import akka.persistence.state.DurableStateStoreRegistry; import akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore; import akka.persistence.state.javadsl.GetObjectResult; // #get-object // #upsert-get-object import akka.persistence.state.DurableStateStoreRegistry; import akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore; import akka.persistence.state.javadsl.GetObjectResult; // #upsert-get-object // #delete-object import akka.persistence.state.DurableStateStoreRegistry; import akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore; // #delete-object // #current-changes import akka.NotUsed; import akka.stream.javadsl.Source; import akka.persistence.state.DurableStateStoreRegistry; import akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore; import akka.persistence.query.DurableStateChange; import akka.persistence.query.NoOffset; // #current-changes // #changes import akka.NotUsed; import akka.stream.javadsl.Source; import akka.persistence.state.DurableStateStoreRegistry; import akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore; import akka.persistence.query.DurableStateChange; import akka.persistence.query.NoOffset; // #changes final class JavadslSnippets { void create() { // #create ActorSystem system = ActorSystem.create("example"); CompletionStage done = SchemaUtils.createIfNotExists(system); // #create } void durableStatePlugin() { ActorSystem system = ActorSystem.create("example"); // #jdbc-durable-state-store @SuppressWarnings("unchecked") JdbcDurableStateStore store = DurableStateStoreRegistry.get(system) .getDurableStateStoreFor( JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier()); // #jdbc-durable-state-store } void getObject() { ActorSystem system = ActorSystem.create("example"); // #get-object @SuppressWarnings("unchecked") JdbcDurableStateStore store = DurableStateStoreRegistry.get(system) .getDurableStateStoreFor( JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier()); CompletionStage> futureResult = store.getObject("InvalidPersistenceId"); try { GetObjectResult result = futureResult.toCompletableFuture().get(); assert !result.value().isPresent(); } catch (Exception e) { // handle exceptions } // #get-object } void upsertAndGetObject() { ActorSystem system = ActorSystem.create("example"); // #upsert-get-object @SuppressWarnings("unchecked") JdbcDurableStateStore store = DurableStateStoreRegistry.get(system) .getDurableStateStoreFor( JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier()); CompletionStage> r = store .upsertObject("p234", 1, "a valid string", "t123") .thenCompose(d -> store.getObject("p234")) .thenCompose(o -> store.upsertObject("p234", 2, "updated valid string", "t123")) .thenCompose(d -> store.getObject("p234")); try { assert r.toCompletableFuture().get().value().get().equals("updated valid string"); } catch (Exception e) { // handle exceptions } // #upsert-get-object } void deleteObject() { ActorSystem system = ActorSystem.create("example"); // #delete-object @SuppressWarnings("unchecked") JdbcDurableStateStore store = DurableStateStoreRegistry.get(system) .getDurableStateStoreFor( JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier()); CompletionStage futureResult = store.deleteObject("p123"); try { assert futureResult.toCompletableFuture().get().equals(Done.getInstance()); } catch (Exception e) { // handle exceptions } // #delete-object } void currentChanges() { ActorSystem system = ActorSystem.create("example"); // #current-changes @SuppressWarnings("unchecked") JdbcDurableStateStore store = DurableStateStoreRegistry.get(system) .getDurableStateStoreFor( JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier()); Source, NotUsed> willCompleteTheStream = store.currentChanges("tag-1", NoOffset.getInstance()); // #current-changes } void changes() { ActorSystem system = ActorSystem.create("example"); // #changes @SuppressWarnings("unchecked") JdbcDurableStateStore store = DurableStateStoreRegistry.get(system) .getDurableStateStoreFor( JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier()); Source, NotUsed> willNotCompleteTheStream = store.changes("tag-1", NoOffset.getInstance()); // #changes } } ================================================ FILE: core/src/test/resources/general.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. // This file contains the general settings which are shared in all akka-persistence-jdbc tests akka { stdout-loglevel = off // defaults to WARNING can be disabled with off. The stdout-loglevel is only in effect during system startup and shutdown log-dead-letters-during-shutdown = on loglevel = debug log-dead-letters = on log-config-on-start = off // Log the complete configuration at INFO level when the actor system is started loggers = ["akka.event.slf4j.Slf4jLogger"] logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" actor { // Required until https://github.com/akka/akka/pull/28333 is available allow-java-serialization = on debug { receive = on // log all messages sent to an actor if that actors receive method is a LoggingReceive autoreceive = off // log all special messages like Kill, PoisoffPill etc sent to all actors lifecycle = off // log all actor lifecycle events of all actors fsm = off // enable logging of all events, transitioffs and timers of FSM Actors that extend LoggingFSM event-stream = off // enable logging of subscriptions (subscribe/unsubscribe) on the ActorSystem.eventStream } } } docker { host = "localhost" host = ${?VM_HOST} } jdbc-journal { event-adapters { test-write-event-adapter = "akka.persistence.jdbc.query.EventAdapterTest$TestWriteEventAdapter" test-read-event-adapter = "akka.persistence.jdbc.query.EventAdapterTest$TestReadEventAdapter" } event-adapter-bindings { "akka.persistence.jdbc.query.EventAdapterTest$Event" = test-write-event-adapter "akka.persistence.jdbc.query.EventAdapterTest$TaggedEvent" = test-write-event-adapter "akka.persistence.jdbc.query.EventAdapterTest$TaggedAsyncEvent" = test-write-event-adapter "akka.persistence.jdbc.query.EventAdapterTest$EventAdapted" = test-read-event-adapter } } jdbc-read-journal { refresh-interval = "10ms" max-buffer-size = "500" } slick.db.idleTimeout = 10000 // 10 seconds ================================================ FILE: core/src/test/resources/h2-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. // general.conf is included only for shared settings used for the akka-persistence-jdbc tests include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } jdbc-journal { slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { slick = ${slick} } # the akka-persistence-jdbc provider in use for durable state store jdbc-durable-state-store { slick = ${slick} } another-jdbc-durable-state-store = ${jdbc-durable-state-store} slick { profile = "slick.jdbc.H2Profile$" db { url = "jdbc:h2:mem:test-database;DATABASE_TO_UPPER=false;" user = "root" password = "root" driver = "org.h2.Driver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: core/src/test/resources/h2-default-mode-application.conf ================================================ # Copyright (C) 2019 - 2025 Lightbend Inc. // general.conf is included only for shared settings used for the akka-persistence-jdbc tests include "general.conf" # Same as h2-application.conf but without DATABASE_TO_UPPER=false. This exercises H2 in its # default mode, where unquoted identifiers are uppercased — the failure mode that the durable # state identifier-quoting fix addresses. akka { persistence { journal { plugin = "jdbc-journal" } snapshot-store { plugin = "jdbc-snapshot-store" } } } jdbc-journal { slick = ${slick} } jdbc-snapshot-store { slick = ${slick} } jdbc-read-journal { slick = ${slick} } jdbc-durable-state-store { slick = ${slick} } another-jdbc-durable-state-store = ${jdbc-durable-state-store} slick { profile = "slick.jdbc.H2Profile$" db { url = "jdbc:h2:mem:test-database" user = "root" password = "root" driver = "org.h2.Driver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: core/src/test/resources/h2-shared-db-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } akka-persistence-jdbc { shared-databases { slick { profile = "slick.jdbc.H2Profile$" db { url = "jdbc:h2:mem:test-database;DATABASE_TO_UPPER=false;" user = "root" password = "root" driver = "org.h2.Driver" numThreads = 5 maxConnections = 5 minConnections = 1 } } } } jdbc-journal { use-shared-db = "slick" } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { use-shared-db = "slick" } # the akka-persistence-query provider in use jdbc-read-journal { use-shared-db = "slick" } ================================================ FILE: core/src/test/resources/h2-two-read-journals-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. include "h2-application.conf" // In this case we use exactly the same config for the second journal // (this includes the defaults form reference.conf) jdbc-read-journal-number-two = ${jdbc-read-journal} ================================================ FILE: core/src/test/resources/jndi-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. // general.conf is included only for shared settings used for the akka-persistence-jdbc tests include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } jdbc-journal { slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { slick = ${slick} } slick { profile = "slick.jdbc.PostgresProfile$" jndiName = "java:/jboss/datasources/bla" } ================================================ FILE: core/src/test/resources/jndi-shared-db-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } akka-persistence-jdbc { shared-databases { slick { profile = "slick.jdbc.PostgresProfile$" jndiName = "java:/jboss/datasources/bla" } } } jdbc-journal { use-shared-db = "slick" } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { use-shared-db = "slick" } # the akka-persistence-query provider in use jdbc-read-journal { use-shared-db = "slick" } ================================================ FILE: core/src/test/resources/logback-test.xml ================================================ debug %date{ISO8601} - %logger -> %-5level[%thread] [%X{akkaSource} %X{sourceActorSystem} %logger{0} - %msg%n ================================================ FILE: core/src/test/resources/mysql-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. // general.conf is included only for shared settings used for the akka-persistence-jdbc tests include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } jdbc-journal { slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { slick = ${slick} } # the akka-persistence-jdbc provider in use for durable state store jdbc-durable-state-store { slick = ${slick} } slick { profile = "slick.jdbc.MySQLProfile$" db { host = ${docker.host} host = ${?DB_HOST} url = "jdbc:mysql://"${slick.db.host}":3306/docker?cachePrepStmts=true&cacheCallableStmts=true&cacheServerConfiguration=true&useLocalSessionState=true&elideSetAutoCommits=true&alwaysSendSetIsolation=false&enableQueryTimeouts=false&connectionAttributes=none&verifyServerCertificate=false&useSSL=false&allowPublicKeyRetrieval=true&useUnicode=true&useLegacyDatetimeCode=false&serverTimezone=UTC&rewriteBatchedStatements=true" user = "root" password = "root" driver = "com.mysql.cj.jdbc.Driver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: core/src/test/resources/mysql-shared-db-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } akka-persistence-jdbc { shared-databases { slick { profile = "slick.jdbc.MySQLProfile$" db { host = ${docker.host} host = ${?DB_HOST} url = "jdbc:mysql://"${akka-persistence-jdbc.shared-databases.slick.db.host}":3306/docker?cachePrepStmts=true&cacheCallableStmts=true&cacheServerConfiguration=true&useLocalSessionState=true&elideSetAutoCommits=true&alwaysSendSetIsolation=false&enableQueryTimeouts=false&connectionAttributes=none&verifyServerCertificate=false&useSSL=false&allowPublicKeyRetrieval=true&useUnicode=true&useLegacyDatetimeCode=false&serverTimezone=UTC&rewriteBatchedStatements=true" user = "root" password = "root" driver = "com.mysql.cj.jdbc.Driver" numThreads = 5 maxConnections = 5 minConnections = 1 } } } } jdbc-journal { use-shared-db = "slick" } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { use-shared-db = "slick" } # the akka-persistence-query provider in use jdbc-read-journal { use-shared-db = "slick" } # the akka-persistence-jdbc provider in use for durable state store jdbc-durable-state-store { use-shared-db = "slick" } ================================================ FILE: core/src/test/resources/oracle-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. // general.conf is included only for shared settings used for the akka-persistence-jdbc tests include "general.conf" include "oracle-schema-overrides.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } jdbc-journal { slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { slick = ${slick} } slick { profile = "slick.jdbc.OracleProfile$" db { host = ${docker.host} host = ${?DB_HOST} url = "jdbc:oracle:thin:@//"${slick.db.host}":1521/FREEPDB1" user = "system" password = "oracle" driver = "oracle.jdbc.OracleDriver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: core/src/test/resources/oracle-schema-overrides.conf ================================================ # Oracle does not support returning a column with a case senstive name so all the column names and table names need # to be caps. See: # https://github.com/slick/slick/issues/47 # https://groups.google.com/g/scalaquery/c/U431n-Z2cwM jdbc-snapshot-store { tables { snapshot { tableName = "SNAPSHOT" schemaName = "SYSTEM" columnNames { persistenceId = "PERSISTENCE_ID" sequenceNumber = "SEQUENCE_NUMBER" created = "CREATED" snapshotPayload = "SNAPSHOT_PAYLOAD" snapshotSerId = "SNAPSHOT_SER_ID" snapshotSerManifest = "SNAPSHOT_SER_MANIFEST" metaPayload = "META_PAYLOAD" metaSerId = "META_SER_ID" metaSerManifest = "META_SER_MANIFEST" } } } } jdbc-read-journal { tables { event_journal { tableName = "EVENT_JOURNAL" schemaName = "SYSTEM" } } } jdbc-journal { tables { event_journal { tableName = "EVENT_JOURNAL" schemaName = "SYSTEM" columnNames { ordering = "ORDERING" deleted = "DELETED" persistenceId = "PERSISTENCE_ID" sequenceNumber = "SEQUENCE_NUMBER" writer = "WRITER", writeTimestamp = "WRITE_TIMESTAMP" adapterManifest = "ADAPTER_MANIFEST" eventPayload = "EVENT_PAYLOAD" eventSerId = "EVENT_SER_ID" eventSerManifest = "EVENT_SER_MANIFEST" metaPayload = "META_PAYLOAD" metaSerId = "META_SER_ID" metaSerManifest = "META_SER_MANIFEST" } } event_tag { tableName = "EVENT_TAG" schemaName = "SYSTEM" columnNames { eventId = "EVENT_ID" persistenceId = "PERSISTENCE_ID" sequenceNumber = "SEQUENCE_NUMBER" tag = "TAG" } } } } ================================================ FILE: core/src/test/resources/oracle-shared-db-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. include "general.conf" include "oracle-schema-overrides.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } akka-persistence-jdbc { shared-databases { slick { profile = "slick.jdbc.OracleProfile$" db { host = ${docker.host} host = ${?DB_HOST} url = "jdbc:oracle:thin:@//"${akka-persistence-jdbc.shared-databases.slick.db.host}":1521/FREEPDB1" user = "system" password = "oracle" driver = "oracle.jdbc.OracleDriver" numThreads = 5 maxConnections = 5 minConnections = 1 } } } } jdbc-journal { use-shared-db = "slick" } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { use-shared-db = "slick" } # the akka-persistence-query provider in use jdbc-read-journal { use-shared-db = "slick" } ================================================ FILE: core/src/test/resources/postgres-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. // general.conf is included only for shared settings used for the akka-persistence-jdbc tests include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } jdbc-journal { slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { slick = ${slick} } # the akka-persistence-jdbc provider in use for durable state store jdbc-durable-state-store { slick = ${slick} } slick { profile = "slick.jdbc.PostgresProfile$" db { host = "localhost" host = ${?DB_HOST} url = "jdbc:postgresql://"${slick.db.host}":5432/docker?reWriteBatchedInserts=true" user = "docker" password = "docker" driver = "org.postgresql.Driver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: core/src/test/resources/postgres-shared-db-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } akka-persistence-jdbc { shared-databases { slick { profile = "slick.jdbc.PostgresProfile$" db { host = "localhost" host = ${?DB_HOST} url = "jdbc:postgresql://"${akka-persistence-jdbc.shared-databases.slick.db.host}":5432/docker?reWriteBatchedInserts=true" user = "docker" password = "docker" driver = "org.postgresql.Driver" numThreads = 5 maxConnections = 5 minConnections = 1 } } } } jdbc-journal { use-shared-db = "slick" } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { use-shared-db = "slick" } # the akka-persistence-query provider in use jdbc-read-journal { use-shared-db = "slick" } # the akka-persistence-jdbc provider in use for durable state store jdbc-durable-state-store { use-shared-db = "slick" } another-jdbc-durable-state-store = ${jdbc-durable-state-store} ================================================ FILE: core/src/test/resources/sqlserver-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } jdbc-journal { tables { journal { schemaName = "dbo" } } slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { tables { snapshot { schemaName = "dbo" } } slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { tables { journal { schemaName = "dbo" } } slick = ${slick} } slick { profile = "slick.jdbc.SQLServerProfile$" db { host = ${docker.host} host = ${?DB_HOST} url = "jdbc:sqlserver://"${slick.db.host}":1433;databaseName=docker;integratedSecurity=false" user = "sa" password = "docker123abc#" driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: core/src/test/resources/sqlserver-shared-db-application.conf ================================================ # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } akka-persistence-jdbc { shared-databases { slick { profile = "slick.jdbc.SQLServerProfile$" db { host = ${docker.host} host = ${?DB_HOST} url = "jdbc:sqlserver://"${akka-persistence-jdbc.shared-databases.slick.db.host}":1433;databaseName=docker;integratedSecurity=false;" user = "sa" password = "docker123abc#" driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver" numThreads = 5 maxConnections = 5 minConnections = 1 } } } } jdbc-journal { use-shared-db = "slick" } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { use-shared-db = "slick" } # the akka-persistence-query provider in use jdbc-read-journal { use-shared-db = "slick" } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc import akka.{ Done, NotUsed } import akka.actor.ActorSystem import scala.annotation.nowarn import scala.concurrent.Future @nowarn("msg=never used") object ScaladslSnippets { def create(): Unit = { // #create import akka.persistence.jdbc.testkit.scaladsl.SchemaUtils implicit val system: ActorSystem = ActorSystem("example") val done: Future[Done] = SchemaUtils.createIfNotExists() // #create } def readJournal(): Unit = { implicit val system: ActorSystem = ActorSystem() // #read-journal import akka.persistence.query.PersistenceQuery import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal val readJournal: JdbcReadJournal = PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier) // #read-journal } def persistenceIds(): Unit = { implicit val system: ActorSystem = ActorSystem() // #persistence-ids import akka.stream.scaladsl.Source import akka.persistence.query.PersistenceQuery import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal val readJournal: JdbcReadJournal = PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier) val willNotCompleteTheStream: Source[String, NotUsed] = readJournal.persistenceIds() val willCompleteTheStream: Source[String, NotUsed] = readJournal.currentPersistenceIds() // #persistence-ids } def eventsByPersistenceId(): Unit = { implicit val system: ActorSystem = ActorSystem() // #events-by-persistence-id import akka.stream.scaladsl.Source import akka.persistence.query.{ EventEnvelope, PersistenceQuery } import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal val readJournal: JdbcReadJournal = PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier) val willNotCompleteTheStream: Source[EventEnvelope, NotUsed] = readJournal.eventsByPersistenceId("some-persistence-id", 0L, Long.MaxValue) val willCompleteTheStream: Source[EventEnvelope, NotUsed] = readJournal.currentEventsByPersistenceId("some-persistence-id", 0L, Long.MaxValue) // #events-by-persistence-id } def eventsByTag(): Unit = { implicit val system: ActorSystem = ActorSystem() // #events-by-tag import akka.stream.scaladsl.Source import akka.persistence.query.{ EventEnvelope, PersistenceQuery } import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal val readJournal: JdbcReadJournal = PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier) val willNotCompleteTheStream: Source[EventEnvelope, NotUsed] = readJournal.eventsByTag("apple", 0L) val willCompleteTheStream: Source[EventEnvelope, NotUsed] = readJournal.currentEventsByTag("apple", 0L) // #events-by-tag } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/SharedActorSystemTestSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc import akka.actor.ActorSystem import akka.persistence.jdbc.config.{ JournalConfig, ReadJournalConfig } import akka.persistence.jdbc.query.javadsl.JdbcReadJournal import akka.persistence.jdbc.util.DropCreate import akka.persistence.jdbc.db.SlickExtension import akka.serialization.SerializationExtension import akka.util.Timeout import com.typesafe.config.{ Config, ConfigFactory, ConfigValue } import org.scalatest.BeforeAndAfterAll import scala.concurrent.ExecutionContext import scala.concurrent.duration._ abstract class SharedActorSystemTestSpec(val config: Config) extends SimpleSpec with DropCreate with BeforeAndAfterAll { def this(config: String = "postgres-application.conf", configOverrides: Map[String, ConfigValue] = Map.empty) = this(configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) => conf.withValue(path, configValue) }) implicit lazy val system: ActorSystem = ActorSystem("test", config) implicit lazy val ec: ExecutionContext = system.dispatcher implicit val pc: PatienceConfig = PatienceConfig(timeout = 1.minute) implicit val timeout: Timeout = Timeout(1.minute) lazy val serialization = SerializationExtension(system) val cfg = config.getConfig("jdbc-journal") val journalConfig = new JournalConfig(cfg) lazy val db = SlickExtension(system).database(cfg).database val readJournalConfig = new ReadJournalConfig(config.getConfig(JdbcReadJournal.Identifier)) override protected def afterAll(): Unit = { db.close() system.terminate().futureValue } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/SimpleSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc import akka.actor.{ ActorRef, ActorSystem } import akka.persistence.jdbc.util.ClasspathResources import akka.testkit.TestProbe import org.scalatest._ import org.scalatest.concurrent.{ Eventually, ScalaFutures } import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers trait SimpleSpec extends AnyFlatSpec with Matchers with ScalaFutures with TryValues with OptionValues with Eventually with ClasspathResources with BeforeAndAfterAll with BeforeAndAfterEach with GivenWhenThen { /** * Sends the PoisonPill command to an actor and waits for it to die */ def killActors(actors: ActorRef*)(implicit system: ActorSystem): Unit = { val tp = TestProbe() actors.foreach { (actor: ActorRef) => tp.watch(actor) system.stop(actor) tp.expectTerminated(actor) } } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/SingleActorSystemPerTestSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc import akka.actor.ActorSystem import akka.persistence.jdbc.config.{ JournalConfig, ReadJournalConfig, SlickConfiguration } import akka.persistence.jdbc.query.javadsl.JdbcReadJournal import akka.persistence.jdbc.util.DropCreate import akka.persistence.jdbc.db.SlickDatabase import akka.util.Timeout import com.typesafe.config.{ Config, ConfigFactory, ConfigValue } import org.scalatest.BeforeAndAfterEach import slick.jdbc.JdbcBackend.Database import scala.concurrent.duration._ abstract class SingleActorSystemPerTestSpec(val config: Config) extends SimpleSpec with DropCreate with BeforeAndAfterEach { def this(config: String = "postgres-application.conf", configOverrides: Map[String, ConfigValue] = Map.empty) = this(configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) => conf.withValue(path, configValue) }) override implicit val patienceConfig: PatienceConfig = PatienceConfig(timeout = 1.minute) implicit val timeout: Timeout = Timeout(1.minute) val cfg = config.getConfig("jdbc-journal") val journalConfig = new JournalConfig(cfg) val journalTableName = if (newDao) journalConfig.eventJournalTableConfiguration.tableName else journalConfig.journalTableConfiguration.tableName val tables = if (newDao) List(journalConfig.eventTagTableConfiguration.tableName, journalConfig.eventJournalTableConfiguration.tableName) else List(journalConfig.journalTableConfiguration.tableName) val profile = if (cfg.hasPath("slick.profile")) { SlickDatabase.profile(cfg, "slick") } else SlickDatabase.profile(config, "akka-persistence-jdbc.shared-databases.slick") val readJournalConfig = new ReadJournalConfig(config.getConfig(JdbcReadJournal.Identifier)) // The db is initialized in the before and after each bocks var dbOpt: Option[Database] = None def db: Database = { dbOpt.getOrElse { val newDb = if (cfg.hasPath("slick.profile")) { SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig("slick")), "slick.db") } else SlickDatabase.database( config, new SlickConfiguration(config.getConfig("akka-persistence-jdbc.shared-databases.slick")), "akka-persistence-jdbc.shared-databases.slick.db") dbOpt = Some(newDb) newDb } } def closeDb(): Unit = { dbOpt.foreach(_.close()) dbOpt = None } override protected def afterEach(): Unit = { super.afterEach() closeDb() } override protected def afterAll(): Unit = { super.afterAll() closeDb() } def withActorSystem(f: ActorSystem => Unit): Unit = { implicit val system: ActorSystem = ActorSystem("test", config) f(system) system.terminate().futureValue } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/TablesTestSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc import akka.persistence.jdbc.config._ import com.typesafe.config.ConfigFactory import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import scala.annotation.nowarn abstract class TablesTestSpec extends AnyFlatSpec with Matchers { def toColumnName[A](tableName: String)(columnName: String): String = s"$tableName.$columnName" @nowarn("msg=possible missing interpolator") val config = ConfigFactory .parseString(""" |akka-persistence-jdbc.slick.db { | host = | port = | name = |} | |jdbc-journal { | class = "akka.persistence.jdbc.journal.JdbcAsyncWriteJournal" | | tables { | journal { | tableName = "journal" | schemaName = "" | columnNames { | persistenceId = "persistence_id" | sequenceNumber = "sequence_number" | created = "created" | tags = "tags" | message = "message" | } | } | | deletedTo { | tableName = "deleted_to" | schemaName = "" | columnNames = { | persistenceId = "persistence_id" | deletedTo = "deleted_to" | } | } | } | | tagSeparator = "," | | serialization = on // alter only when using a custom dao | | dao = "akka.persistence.jdbc.dao.bytea.ByteArrayJournalDao" | | slick { | profile = "slick.jdbc.PostgresProfile" | db { | host = "localhost" | host = ${?POSTGRES_HOST} | port = "5432" | port = ${?POSTGRES_PORT} | name = "docker" | | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} | user = "docker" | password = "docker" | driver = "org.postgresql.Driver" | | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP | | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing | // slick will use an async executor with a fixed size queue of 10.000 objects | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. | queueSize = 10000 // number of objects that can be queued by the async exector | | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 | | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true | | keepAliveConnection = on // ensures that the database does not get dropped while we are using it | | numThreads = 4 // number of cores | maxConnections = 4 // same as numThreads | minConnections = 4 // same as numThreads | } | } |} | |# the akka-persistence-snapshot-store in use |jdbc-snapshot-store { | class = "akka.persistence.jdbc.snapshot.JdbcSnapshotStore" | | tables { | snapshot { | tableName = "snapshot" | schemaName = "" | columnNames { | persistenceId = "persistence_id" | sequenceNumber = "sequence_number" | created = "created" | snapshot = "snapshot" | } | } | } | | serialization = on // alter only when using a custom dao | | dao = "akka.persistence.jdbc.dao.bytea.ByteArraySnapshotDao" | | slick { | profile = "slick.jdbc.PostgresProfile" | db { | host = "localhost" | host = ${?POSTGRES_HOST} | port = "5432" | port = ${?POSTGRES_PORT} | name = "docker" | | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} | user = "docker" | password = "docker" | driver = "org.postgresql.Driver" | | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP | | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing | // slick will use an async executor with a fixed size queue of 10.000 objects | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. | queueSize = 10000 // number of objects that can be queued by the async exector | | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 | | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true | | keepAliveConnection = on // ensures that the database does not get dropped while we are using it | | numThreads = 4 // number of cores | maxConnections = 4 // same as numThreads | minConnections = 4 // same as numThreads | } | } |} | |# the akka-persistence-query provider in use |jdbc-read-journal { | class = "akka.persistence.jdbc.query.JdbcReadJournalProvider" | | # New events are retrieved (polled) with this interval. | refresh-interval = "1s" | | # How many events to fetch in one query (replay) and keep buffered until they | # are delivered downstreams. | max-buffer-size = "500" | | serialization = on // alter only when using a custom dao | | dao = "akka.persistence.jdbc.dao.bytea.ByteArrayJournalDao" | | tables { | journal { | tableName = "journal" | schemaName = "" | columnNames { | persistenceId = "persistence_id" | sequenceNumber = "sequence_number" | created = "created" | tags = "tags" | message = "message" | } | } | } | | tagSeparator = "," | | slick { | profile = "slick.jdbc.PostgresProfile" | db { | host = "localhost" | host = ${?POSTGRES_HOST} | port = "5432" | port = ${?POSTGRES_PORT} | name = "docker" | | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} | user = "docker" | password = "docker" | driver = "org.postgresql.Driver" | | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP | | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing | // slick will use an async executor with a fixed size queue of 10.000 objects | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. | queueSize = 10000 // number of objects that can be queued by the async exector | | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 | | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true | | keepAliveConnection = on // ensures that the database does not get dropped while we are using it | | numThreads = 4 // number of cores | maxConnections = 4 // same as numThreads | minConnections = 4 // same as numThreads | } | } |} """.stripMargin) .withFallback(ConfigFactory.load("reference")) .resolve() val journalConfig = new JournalConfig(config.getConfig("jdbc-journal")) val snapshotConfig = new SnapshotConfig(config.getConfig("jdbc-snapshot-store")) val readJournalConfig = new ReadJournalConfig(config.getConfig("jdbc-read-journal")) } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/cleanup/scaladsl/EventSourcedCleanupTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.cleanup.scaladsl import akka.persistence.jdbc.query.{ H2Cleaner, QueryTestSpec } import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ import akka.pattern.ask import akka.persistence.jdbc.query.EventAdapterTest.Snapshot abstract class EventSourcedCleanupTest(config: String) extends QueryTestSpec(config) with Matchers { implicit val askTimeout: FiniteDuration = 500.millis it should "delete all events and reset sequence number" in withActorSystem { implicit system => withTestActors(replyToMessages = true) { (actor1, _, _) => (actor1 ? 1).futureValue (actor1 ? 2).futureValue (actor1 ? 3).futureValue } new EventSourcedCleanup(system).deleteAllEvents("my-1", true).futureValue withTestActors(replyToMessages = true) { (actor1, _, _) => (actor1 ? "state").futureValue.asInstanceOf[Int] shouldBe 0 } } it should "delete snapshots as well as events" in withActorSystem { implicit system => withTestActors(replyToMessages = true) { (actor1, _, _) => (actor1 ? 1).futureValue (actor1 ? 2).futureValue (actor1 ? Snapshot).futureValue } new EventSourcedCleanup(system).deleteAll("my-1", true).futureValue withTestActors(replyToMessages = true) { (actor1, _, _) => (actor1 ? "state").futureValue.asInstanceOf[Int] shouldBe 0 } } } class H2EventSourcedCleanupTest extends EventSourcedCleanupTest("h2-application.conf") with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/configuration/AkkaPersistenceConfigTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.configuration import akka.persistence.jdbc.config._ import com.typesafe.config.{ Config, ConfigFactory } import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import scala.annotation.nowarn import scala.concurrent.duration._ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers { private val referenceConfig: Config = ConfigFactory.load("reference") @nowarn("msg=possible missing interpolator") val config: Config = ConfigFactory .parseString(""" |akka-persistence-jdbc.slick.db { | host = | port = | name = |} | |jdbc-journal { | class = "akka.persistence.jdbc.journal.JdbcAsyncWriteJournal" | | tables { | journal { | tableName = "journal" | schemaName = "" | columnNames { | ordering = "ordering" | persistenceId = "persistence_id" | sequenceNumber = "sequence_number" | deleted = "deleted" | tags = "tags" | message = "message" | } | } | } | | tagSeparator = "," | | dao = "akka.persistence.jdbc.dao.bytea.journal.ByteArrayJournalDao" | | slick { | profile = "slick.jdbc.PostgresProfile$" | db { | host = "localhost" | host = ${?POSTGRES_HOST} | port = "5432" | port = ${?POSTGRES_PORT} | name = "docker" | | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} | user = "docker" | password = "docker" | driver = "org.postgresql.Driver$" | | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP | | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing | // slick will use an async executor with a fixed size queue of 10.000 objects | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. | queueSize = 10000 // number of objects that can be queued by the async exector | | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 | | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true | | keepAliveConnection = on // ensures that the database does not get dropped while we are using it | | numThreads = 4 // number of cores | maxConnections = 4 // same as numThreads | minConnections = 4 // same as numThreads | } | } |} | |# the akka-persistence-snapshot-store in use |jdbc-snapshot-store { | class = "akka.persistence.jdbc.snapshot.JdbcSnapshotStore" | | tables { | snapshot { | tableName = "snapshot" | schemaName = "" | columnNames { | persistenceId = "persistence_id" | sequenceNumber = "sequence_number" | created = "created" | snapshot = "snapshot" | } | } | } | | dao = "akka.persistence.jdbc.dao.bytea.snapshot.ByteArraySnapshotDao" | | slick { | profile = "slick.jdbc.MySQLProfile$" | db { | host = "localhost" | host = ${?POSTGRES_HOST} | port = "5432" | port = ${?POSTGRES_PORT} | name = "docker" | | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} | user = "docker" | password = "docker" | driver = "org.postgresql.Driver" | | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP | | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing | // slick will use an async executor with a fixed size queue of 10.000 objects | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. | queueSize = 10000 // number of objects that can be queued by the async exector | | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 | | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true | | keepAliveConnection = on // ensures that the database does not get dropped while we are using it | | numThreads = 4 // number of cores | maxConnections = 4 // same as numThreads | minConnections = 4 // same as numThreads | } | } |} | |# the akka-persistence-query provider in use |jdbc-read-journal { | class = "akka.persistence.jdbc.query.JdbcReadJournalProvider" | | # New events are retrieved (polled) with this interval. | refresh-interval = "300ms" | | # How many events to fetch in one query (replay) and keep buffered until they | # are delivered downstreams. | max-buffer-size = "10" | | dao = "akka.persistence.jdbc.dao.bytea.readjournal.ByteArrayReadJournalDao" | | tables { | journal { | tableName = "journal" | schemaName = "" | columnNames { | ordering = "ordering" | persistenceId = "persistence_id" | sequenceNumber = "sequence_number" | created = "created" | tags = "tags" | message = "message" | } | } | } | | tagSeparator = "," | | slick { | profile = "slick.jdbc.OracleProfile$" | db { | host = "localhost" | host = ${?POSTGRES_HOST} | port = "5432" | port = ${?POSTGRES_PORT} | name = "docker" | | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} | user = "docker" | password = "docker" | driver = "org.postgresql.Driver" | | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP | | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing | // slick will use an async executor with a fixed size queue of 10.000 objects | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. | queueSize = 10000 // number of objects that can be queued by the async exector | | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 | | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true | | keepAliveConnection = on // ensures that the database does not get dropped while we are using it | | numThreads = 4 // number of cores | maxConnections = 4 // same as numThreads | minConnections = 4 // same as numThreads | } | } |} """.stripMargin) .withFallback(referenceConfig) .resolve() "reference config" should "parse JournalConfig" in { val cfg = new JournalConfig(referenceConfig.getConfig("jdbc-journal")) val slickConfiguration = new SlickConfiguration(referenceConfig.getConfig("jdbc-journal.slick")) slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None cfg.pluginConfig.dao shouldBe "akka.persistence.jdbc.journal.dao.DefaultJournalDao" cfg.pluginConfig.tagSeparator shouldBe "," cfg.journalTableConfiguration.tableName shouldBe "journal" cfg.journalTableConfiguration.schemaName shouldBe None cfg.journalTableConfiguration.columnNames.ordering shouldBe "ordering" cfg.journalTableConfiguration.columnNames.created shouldBe "created" cfg.journalTableConfiguration.columnNames.message shouldBe "message" cfg.journalTableConfiguration.columnNames.persistenceId shouldBe "persistence_id" cfg.journalTableConfiguration.columnNames.sequenceNumber shouldBe "sequence_number" cfg.journalTableConfiguration.columnNames.tags shouldBe "tags" } it should "parse SnapshotConfig" in { val cfg = new SnapshotConfig(referenceConfig.getConfig("jdbc-snapshot-store")) val slickConfiguration = new SlickConfiguration(referenceConfig.getConfig("jdbc-journal.slick")) slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None cfg.pluginConfig.dao shouldBe "akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao" cfg.legacySnapshotTableConfiguration.tableName shouldBe "snapshot" cfg.legacySnapshotTableConfiguration.schemaName shouldBe None cfg.legacySnapshotTableConfiguration.columnNames.persistenceId shouldBe "persistence_id" cfg.legacySnapshotTableConfiguration.columnNames.created shouldBe "created" cfg.legacySnapshotTableConfiguration.columnNames.sequenceNumber shouldBe "sequence_number" cfg.legacySnapshotTableConfiguration.columnNames.snapshot shouldBe "snapshot" } it should "parse ReadJournalConfig" in { val cfg = new ReadJournalConfig(referenceConfig.getConfig("jdbc-read-journal")) val slickConfiguration = new SlickConfiguration(referenceConfig.getConfig("jdbc-journal.slick")) slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None cfg.pluginConfig.dao shouldBe "akka.persistence.jdbc.query.dao.DefaultReadJournalDao" cfg.pluginConfig.tagSeparator shouldBe "," cfg.refreshInterval shouldBe 1.second cfg.maxBufferSize shouldBe 500 cfg.journalTableConfiguration.tableName shouldBe "journal" cfg.journalTableConfiguration.schemaName shouldBe None cfg.journalTableConfiguration.columnNames.ordering shouldBe "ordering" cfg.journalTableConfiguration.columnNames.created shouldBe "created" cfg.journalTableConfiguration.columnNames.message shouldBe "message" cfg.journalTableConfiguration.columnNames.persistenceId shouldBe "persistence_id" cfg.journalTableConfiguration.columnNames.sequenceNumber shouldBe "sequence_number" cfg.journalTableConfiguration.columnNames.tags shouldBe "tags" } "full config" should "parse JournalConfig" in { val cfg = new JournalConfig(config.getConfig("jdbc-journal")) val slickConfiguration = new SlickConfiguration(config.getConfig("jdbc-journal.slick")) slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None cfg.pluginConfig.dao shouldBe "akka.persistence.jdbc.dao.bytea.journal.ByteArrayJournalDao" cfg.pluginConfig.tagSeparator shouldBe "," cfg.journalTableConfiguration.tableName shouldBe "journal" cfg.journalTableConfiguration.schemaName shouldBe None cfg.journalTableConfiguration.columnNames.ordering shouldBe "ordering" cfg.journalTableConfiguration.columnNames.created shouldBe "created" cfg.journalTableConfiguration.columnNames.message shouldBe "message" cfg.journalTableConfiguration.columnNames.persistenceId shouldBe "persistence_id" cfg.journalTableConfiguration.columnNames.sequenceNumber shouldBe "sequence_number" cfg.journalTableConfiguration.columnNames.tags shouldBe "tags" } it should "parse SnapshotConfig" in { val cfg = new SnapshotConfig(config.getConfig("jdbc-snapshot-store")) val slickConfiguration = new SlickConfiguration(config.getConfig("jdbc-snapshot-store.slick")) slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None cfg.pluginConfig.dao shouldBe "akka.persistence.jdbc.dao.bytea.snapshot.ByteArraySnapshotDao" cfg.legacySnapshotTableConfiguration.tableName shouldBe "snapshot" cfg.legacySnapshotTableConfiguration.schemaName shouldBe None cfg.legacySnapshotTableConfiguration.columnNames.persistenceId shouldBe "persistence_id" cfg.legacySnapshotTableConfiguration.columnNames.created shouldBe "created" cfg.legacySnapshotTableConfiguration.columnNames.sequenceNumber shouldBe "sequence_number" cfg.legacySnapshotTableConfiguration.columnNames.snapshot shouldBe "snapshot" } it should "parse ReadJournalConfig" in { val cfg = new ReadJournalConfig(config.getConfig("jdbc-read-journal")) val slickConfiguration = new SlickConfiguration(config.getConfig("jdbc-read-journal.slick")) slickConfiguration.jndiName shouldBe None slickConfiguration.jndiDbName shouldBe None cfg.pluginConfig.dao shouldBe "akka.persistence.jdbc.dao.bytea.readjournal.ByteArrayReadJournalDao" cfg.pluginConfig.tagSeparator shouldBe "," cfg.refreshInterval shouldBe 300.millis cfg.maxBufferSize shouldBe 10 cfg.journalTableConfiguration.tableName shouldBe "journal" cfg.journalTableConfiguration.schemaName shouldBe None cfg.journalTableConfiguration.columnNames.ordering shouldBe "ordering" cfg.journalTableConfiguration.columnNames.created shouldBe "created" cfg.journalTableConfiguration.columnNames.message shouldBe "message" cfg.journalTableConfiguration.columnNames.persistenceId shouldBe "persistence_id" cfg.journalTableConfiguration.columnNames.sequenceNumber shouldBe "sequence_number" cfg.journalTableConfiguration.columnNames.tags shouldBe "tags" } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/configuration/ConfigOpsTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.configuration import akka.persistence.jdbc.SimpleSpec import akka.persistence.jdbc.util.ConfigOps import ConfigOps._ import com.typesafe.config.ConfigFactory class ConfigOpsTest extends SimpleSpec { it should "parse field values to Options" in { val cfg = ConfigFactory.parseString(""" | person { | firstName = "foo" | lastName = "bar" | pet = "" | car = " " | } """.stripMargin) cfg.asStringOption("person.firstName").get shouldBe "foo" cfg.asStringOption("person.lastName").get shouldBe "bar" cfg.asStringOption("person.pet") shouldBe None cfg.asStringOption("person.car") shouldBe None cfg.asStringOption("person.bike") shouldBe None cfg.asStringOption("person.bike") shouldBe None } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/configuration/JNDIConfigTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.configuration import akka.actor.ActorSystem import akka.persistence.jdbc.SimpleSpec import akka.persistence.jdbc.db.SlickExtension import com.typesafe.config.ConfigFactory class JNDIConfigTest extends SimpleSpec { "JNDI config" should "read the config and throw NoInitialContextException in case the JNDI resource is not available" in { withActorSystem("jndi-application.conf") { system => val jdbcJournalConfig = system.settings.config.getConfig("jdbc-journal") val slickExtension = SlickExtension(system) intercept[javax.naming.NoInitialContextException] { // Since the JNDI resource is not actually available we expect a NoInitialContextException // This is an indication that the application actually attempts to load the configured JNDI resource slickExtension.database(jdbcJournalConfig).database } } } "JNDI config for shared databases" should "read the config and throw NoInitialContextException in case the JNDI resource is not available" in { withActorSystem("jndi-shared-db-application.conf") { system => val jdbcJournalConfig = system.settings.config.getConfig("jdbc-journal") val slickExtension = SlickExtension(system) intercept[javax.naming.NoInitialContextException] { // Since the JNDI resource is not actually available we expect a NoInitialContextException // This is an indication that the application actually attempts to load the configured JNDI resource slickExtension.database(jdbcJournalConfig).database } } } def withActorSystem(config: String)(f: ActorSystem => Unit): Unit = { val cfg = ConfigFactory.load(config) val system = ActorSystem("test", cfg) try { f(system) } finally { system.terminate().futureValue } } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalPerfSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal import akka.actor.Props import akka.persistence.CapabilityFlag import akka.persistence.jdbc.config._ import akka.persistence.jdbc.db.SlickExtension import akka.persistence.jdbc.testkit.internal.{ H2, SchemaType } import akka.persistence.jdbc.util.{ ClasspathResources, DropCreate } import akka.persistence.journal.JournalPerfSpec import akka.persistence.journal.JournalPerfSpec.{ BenchActor, Cmd, ResetCounter } import akka.testkit.TestProbe import com.typesafe.config.{ Config, ConfigFactory } import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import org.scalatest.concurrent.ScalaFutures import scala.concurrent.ExecutionContext import scala.concurrent.duration._ abstract class JdbcJournalPerfSpec(config: Config, schemaType: SchemaType) extends JournalPerfSpec(config) with BeforeAndAfterAll with BeforeAndAfterEach with ScalaFutures with ClasspathResources with DropCreate { override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = true implicit lazy val ec: ExecutionContext = system.dispatcher implicit def pc: PatienceConfig = PatienceConfig(timeout = 10.minutes) override def eventsCount: Int = 1000 override def awaitDurationMillis: Long = 10.minutes.toMillis override def measurementIterations: Int = 1 lazy val cfg = system.settings.config.getConfig("jdbc-journal") lazy val journalConfig = new JournalConfig(cfg) lazy val db = SlickExtension(system).database(cfg).database override def beforeAll(): Unit = { dropAndCreate(schemaType) super.beforeAll() } override def afterAll(): Unit = { db.close() super.afterAll() } def actorCount = 100 private val commands = Vector(1 to eventsCount: _*) "A PersistentActor's performance" must { s"measure: persist()-ing $eventsCount events for $actorCount actors" in { val testProbe = TestProbe() val replyAfter = eventsCount def createBenchActor(actorNumber: Int) = system.actorOf(Props(classOf[BenchActor], s"$pid--$actorNumber", testProbe.ref, replyAfter)) val actors = 1.to(actorCount).map(createBenchActor) measure(d => s"Persist()-ing $eventsCount * $actorCount took ${d.toMillis} ms") { for (cmd <- commands; actor <- actors) { actor ! Cmd("p", cmd) } for (_ <- actors) { testProbe.expectMsg(awaitDurationMillis.millis, commands.last) } for (actor <- actors) { actor ! ResetCounter } } } } "A PersistentActor's performance" must { s"measure: persistAsync()-ing $eventsCount events for $actorCount actors" in { val testProbe = TestProbe() val replyAfter = eventsCount def createBenchActor(actorNumber: Int) = system.actorOf(Props(classOf[BenchActor], s"$pid--$actorNumber", testProbe.ref, replyAfter)) val actors = 1.to(actorCount).map(createBenchActor) measure(d => s"persistAsync()-ing $eventsCount * $actorCount took ${d.toMillis} ms") { for (cmd <- commands; actor <- actors) { actor ! Cmd("pa", cmd) } for (_ <- actors) { testProbe.expectMsg(awaitDurationMillis.millis, commands.last) } for (actor <- actors) { actor ! ResetCounter } } } } } class H2JournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load("h2-application.conf"), H2) class H2JournalPerfSpecSharedDb extends JdbcJournalPerfSpec(ConfigFactory.load("h2-shared-db-application.conf"), H2) ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal import akka.persistence.CapabilityFlag import akka.persistence.jdbc.config._ import akka.persistence.jdbc.db.SlickExtension import akka.persistence.jdbc.testkit.internal.{ H2, SchemaType } import akka.persistence.jdbc.util.{ ClasspathResources, DropCreate } import akka.persistence.journal.JournalSpec import com.typesafe.config.{ Config, ConfigFactory } import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import org.scalatest.concurrent.ScalaFutures import scala.concurrent.ExecutionContext import scala.concurrent.duration._ abstract class JdbcJournalSpec(config: Config, schemaType: SchemaType) extends JournalSpec(config) with BeforeAndAfterAll with BeforeAndAfterEach with ScalaFutures with ClasspathResources with DropCreate { override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = true implicit val pc: PatienceConfig = PatienceConfig(timeout = 10.seconds) implicit lazy val ec: ExecutionContext = system.dispatcher lazy val cfg = system.settings.config.getConfig("jdbc-journal") lazy val journalConfig = new JournalConfig(cfg) lazy val db = SlickExtension(system).database(cfg).database protected override def supportsSerialization: CapabilityFlag = newDao protected override def supportsMetadata: CapabilityFlag = newDao override def beforeAll(): Unit = { dropAndCreate(schemaType) super.beforeAll() } override def afterAll(): Unit = { db.close() super.afterAll() } } class H2JournalSpec extends JdbcJournalSpec(ConfigFactory.load("h2-application.conf"), H2) class H2JournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load("h2-shared-db-application.conf"), H2) ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/journal/dao/ByteArrayJournalSerializerTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc package journal.dao.legacy import akka.persistence.{ AtomicWrite, PersistentRepr } import scala.collection.immutable._ class ByteArrayJournalSerializerTest extends SharedActorSystemTestSpec() { it should "serialize a serializable message and indicate whether or not the serialization succeeded" in { val serializer = new ByteArrayJournalSerializer(serialization, ",") val result = serializer.serialize(Seq(AtomicWrite(PersistentRepr("foo")))) result should have size 1 (result.head should be).a(Symbol("success")) } it should "not serialize a non-serializable message and indicate whether or not the serialization succeeded" in { class Test val serializer = new ByteArrayJournalSerializer(serialization, ",") val result = serializer.serialize(Seq(AtomicWrite(PersistentRepr(new Test)))) result should have size 1 (result.head should be).a(Symbol("failure")) } it should "serialize non-serializable and serializable messages and indicate whether or not the serialization succeeded" in { class Test val serializer = new ByteArrayJournalSerializer(serialization, ",") val result = serializer.serialize(List(AtomicWrite(PersistentRepr(new Test)), AtomicWrite(PersistentRepr("foo")))) result should have size 2 (result.head should be).a(Symbol("failure")) (result.last should be).a(Symbol("success")) } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/journal/dao/JournalTablesTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao.legacy import akka.persistence.jdbc.TablesTestSpec import slick.jdbc.JdbcProfile class JournalTablesTest extends TablesTestSpec { val journalTableConfiguration = journalConfig.journalTableConfiguration object TestByteAJournalTables extends JournalTables { override val profile: JdbcProfile = slick.jdbc.PostgresProfile override val journalTableCfg = journalTableConfiguration } "JournalTable" should "be configured with a schema name" in { TestByteAJournalTables.JournalTable.baseTableRow.schemaName shouldBe journalTableConfiguration.schemaName } it should "be configured with a table name" in { TestByteAJournalTables.JournalTable.baseTableRow.tableName shouldBe journalTableConfiguration.tableName } it should "be configured with column names" in { val colName = toColumnName(journalTableConfiguration.tableName)(_) TestByteAJournalTables.JournalTable.baseTableRow.persistenceId.toString shouldBe colName( journalTableConfiguration.columnNames.persistenceId) TestByteAJournalTables.JournalTable.baseTableRow.deleted.toString shouldBe colName( journalTableConfiguration.columnNames.deleted) TestByteAJournalTables.JournalTable.baseTableRow.sequenceNumber.toString shouldBe colName( journalTableConfiguration.columnNames.sequenceNumber) // TestByteAJournalTables.JournalTable.baseTableRow.tags.toString() shouldBe colName(journalTableConfiguration.columnNames.tags) } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/journal/dao/TagsSerializationTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao.legacy import akka.persistence.jdbc.SharedActorSystemTestSpec class TagsSerializationTest extends SharedActorSystemTestSpec { "Encode" should "no tags" in { encodeTags(Set.empty[String], ",") shouldBe None } it should "one tag" in { encodeTags(Set("foo"), ",").value shouldBe "foo" } it should "two tags" in { encodeTags(Set("foo", "bar"), ",").value shouldBe "foo,bar" } it should "three tags" in { encodeTags(Set("foo", "bar", "baz"), ",").value shouldBe "foo,bar,baz" } "decode" should "no tags" in { decodeTags(None, ",") shouldBe Set() } it should "one tag with separator" in { decodeTags(Some("foo"), ",") shouldBe Set("foo") } it should "two tags with separator" in { decodeTags(Some("foo,bar"), ",") shouldBe Set("foo", "bar") } it should "three tags with separator" in { decodeTags(Some("foo,bar,baz"), ",") shouldBe Set("foo", "bar", "baz") } "TagsSerialization" should "be bijective" in { val tags: Set[String] = Set("foo", "bar", "baz") decodeTags(encodeTags(tags, ","), ",") shouldBe tags } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/journal/dao/TrySeqTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.journal.dao import akka.persistence.jdbc.util.TrySeq import akka.persistence.jdbc.SimpleSpec import scala.collection.immutable._ import scala.util.{ Failure, Success } class TrySeqTest extends SimpleSpec { def failure(text: String) = Failure(new RuntimeException(text)) it should "sequence an empty immutable.Seq" in { TrySeq.sequence(Seq.empty) shouldBe Success(Seq.empty) } it should "sequence an empty immutable.Vector" in { TrySeq.sequence(Vector.empty) shouldBe Success(Seq.empty) } it should "sequence a immutable.Seq of success/success" in { TrySeq.sequence(Seq(Success("a"), Success("b"))) shouldBe Success(Seq("a", "b")) } it should "sequence an immutable Seq of success/failure" in { val result = TrySeq.sequence(List(Success("a"), failure("b"))) result should matchPattern { case Failure(cause) if cause.getMessage.contains("b") => } } it should "sequence an immutable Seq of failure/success" in { val result = TrySeq.sequence(List(failure("a"), Success("b"))) result should matchPattern { case Failure(cause) if cause.getMessage.contains("a") => } } it should "sequence an immutable.Seq of failure/failure" in { val result = TrySeq.sequence(Seq(failure("a"), failure("b"))) result should matchPattern { case Failure(cause) if cause.getMessage.contains("a") => } } it should "sequence an immutable.Vector of failure/failure" in { val result = TrySeq.sequence(Vector(failure("a"), failure("b"))) result should matchPattern { case Failure(cause) if cause.getMessage.contains("a") => } } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/AllPersistenceIdsTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import scala.concurrent.duration._ abstract class AllPersistenceIdsTest(config: String) extends QueryTestSpec(config) { it should "not terminate the stream when there are not pids" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) journalOps.withPersistenceIds() { tp => tp.request(1) tp.expectNoMessage(100.millis) tp.cancel() tp.expectNoMessage(100.millis) } } it should "find persistenceIds for actors" in withActorSystem { implicit system => val journalOps = new JavaDslJdbcReadJournalOperations(system) withTestActors() { (actor1, actor2, actor3) => journalOps.withPersistenceIds() { tp => tp.request(10) tp.expectNoMessage(100.millis) actor1 ! 1 tp.expectNext(ExpectNextTimeout, "my-1") tp.expectNoMessage(100.millis) actor2 ! 1 tp.expectNext(ExpectNextTimeout, "my-2") tp.expectNoMessage(100.millis) actor3 ! 1 tp.expectNext(ExpectNextTimeout, "my-3") tp.expectNoMessage(100.millis) actor1 ! 1 tp.expectNoMessage(100.millis) actor2 ! 1 tp.expectNoMessage(100.millis) actor3 ! 1 tp.expectNoMessage(100.millis) tp.cancel() tp.expectNoMessage(100.millis) } } } } class H2ScalaAllPersistenceIdsTest extends AllPersistenceIdsTest("h2-application.conf") with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/CurrentEventsByPersistenceIdTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.Done import akka.persistence.Persistence import akka.persistence.jdbc.journal.JdbcAsyncWriteJournal import akka.persistence.query.Offset import akka.persistence.query.{ EventEnvelope, Sequence } import akka.testkit.TestProbe abstract class CurrentEventsByPersistenceIdTest(config: String) extends QueryTestSpec(config) { import QueryTestSpec.EventEnvelopeProbeOps it should "find events from sequenceNr" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors() { (actor1, _, _) => actor1 ! 1 actor1 ! 2 actor1 ! 3 actor1 ! 4 eventually { journalOps.countJournal.futureValue shouldBe 4 } journalOps.withCurrentEventsByPersistenceId()("my-1", 0, 1) { tp => tp.request(Int.MaxValue) tp.expectNextEventEnvelope("my-1", 1, 1) tp.expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 1, 1) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 1, 2) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-1", 2, 2, timestamp = 0L)) tp.expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 2, 2) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(2), "my-1", 2, 2, timestamp = 0L)) tp.expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 2, 3) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(2), "my-1", 2, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-1", 3, 3, timestamp = 0L)) tp.expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 3, 3) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(3), "my-1", 3, 3, timestamp = 0L)) tp.expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 0, 3) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-1", 2, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-1", 3, 3, timestamp = 0L)) tp.expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 1, 3) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-1", 2, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-1", 3, 3, timestamp = 0L)) tp.expectComplete() } } } it should "not find any events for unknown pid" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) journalOps.withCurrentEventsByPersistenceId()("unkown-pid", 0L, Long.MaxValue) { tp => tp.request(Int.MaxValue) tp.expectComplete() } } it should "include ordering Offset in EventEnvelope" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors() { (actor1, actor2, actor3) => actor1 ! 1 actor1 ! 2 actor1 ! 3 eventually { journalOps.countJournal.futureValue shouldBe 3 } actor2 ! 4 eventually { journalOps.countJournal.futureValue shouldBe 4 } actor3 ! 5 eventually { journalOps.countJournal.futureValue shouldBe 5 } actor1 ! 6 eventually { journalOps.countJournal.futureValue shouldBe 6 } journalOps.withCurrentEventsByPersistenceId()("my-1", 0, Long.MaxValue) { tp => tp.request(Int.MaxValue) tp.expectNextEventEnvelope("my-1", 1, 1) tp.expectNextEventEnvelope("my-1", 2, 2) val env3 = tp.expectNext(ExpectNextTimeout) val ordering3 = env3.offset match { case Sequence(value) => value case _ => fail() } val env6 = tp.expectNext(ExpectNextTimeout) env6.persistenceId shouldBe "my-1" env6.sequenceNr shouldBe 4 env6.event shouldBe 6 // event 4 and 5 persisted before 6 by different actors, increasing the ordering env6.offset shouldBe Offset.sequence(ordering3 + 3) tp.expectComplete() } } } it should "find events for actors" in withActorSystem { implicit system => val journalOps = new JavaDslJdbcReadJournalOperations(system) withTestActors() { (actor1, _, _) => actor1 ! 1 actor1 ! 2 actor1 ! 3 eventually { journalOps.countJournal.futureValue shouldBe 3 } journalOps.withCurrentEventsByPersistenceId()("my-1", 1, 1) { tp => tp.request(Int.MaxValue).expectNextEventEnvelope("my-1", 1, 1).expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 2, 2) { tp => tp.request(Int.MaxValue).expectNextEventEnvelope("my-1", 2, 2).expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 3, 3) { tp => tp.request(Int.MaxValue).expectNextEventEnvelope("my-1", 3, 3).expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 2, 3) { tp => tp.request(Int.MaxValue) .expectNextEventEnvelope("my-1", 2, 2) .expectNextEventEnvelope("my-1", 3, 3) .expectComplete() } } } it should "allow updating events (for data migrations)" in withActorSystem { implicit system => if (newDao) pending // https://github.com/akka/akka-persistence-jdbc/issues/469 val journalOps = new JavaDslJdbcReadJournalOperations(system) val journal = Persistence(system).journalFor("") withTestActors() { (actor1, _, _) => actor1 ! 1 actor1 ! 2 actor1 ! 3 eventually { journalOps.countJournal.futureValue shouldBe 3 } val pid = "my-1" journalOps.withCurrentEventsByPersistenceId()(pid, 1, 3) { tp => tp.request(Int.MaxValue) .expectNextEventEnvelope(pid, 1, 1) .expectNextEventEnvelope(pid, 2, 2) .expectNextEventEnvelope(pid, 3, 3) .expectComplete() } // perform in-place update val journalP = TestProbe() journal.tell(JdbcAsyncWriteJournal.InPlaceUpdateEvent(pid, 1, Integer.valueOf(111)), journalP.ref) journalP.expectMsg(Done) journalOps.withCurrentEventsByPersistenceId()(pid, 1, 3) { tp => tp.request(Int.MaxValue) .expectNextEventEnvelope(pid, 1, Integer.valueOf(111)) .expectNextEventEnvelope(pid, 2, 2) .expectNextEventEnvelope(pid, 3, 3) .expectComplete() } } } } // Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config class H2ScalaCurrentEventsByPersistenceIdTest extends CurrentEventsByPersistenceIdTest("h2-shared-db-application.conf") with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/CurrentEventsByTagTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.persistence.query.{ EventEnvelope, NoOffset, Sequence } import akka.pattern.ask import com.typesafe.config.{ ConfigValue, ConfigValueFactory } import scala.concurrent.duration._ import akka.Done import akka.persistence.jdbc.query.EventAdapterTest.{ Event, TaggedAsyncEvent } import scala.concurrent.Future import CurrentEventsByTagTest._ object CurrentEventsByTagTest { val maxBufferSize = 20 val refreshInterval = 500.milliseconds val configOverrides: Map[String, ConfigValue] = Map( "jdbc-read-journal.max-buffer-size" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString), "jdbc-read-journal.refresh-interval" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString())) } abstract class CurrentEventsByTagTest(config: String) extends QueryTestSpec(config, configOverrides) { it should "not find an event by tag for unknown tag" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(1, "one")).futureValue (actor2 ? withTags(2, "two")).futureValue (actor3 ? withTags(3, "three")).futureValue eventually { journalOps.countJournal.futureValue shouldBe 3 } journalOps.withCurrentEventsByTag()("unknown", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectComplete() } } } it should "find all events by tag" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(1, "number")).futureValue (actor2 ? withTags(2, "number")).futureValue (actor3 ? withTags(3, "number")).futureValue eventually { journalOps.countJournal.futureValue shouldBe 3 } journalOps.withCurrentEventsByTag()("number", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("number", Sequence(0)) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("number", Sequence(1)) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("number", Sequence(2)) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("number", Sequence(3)) { tp => tp.request(Int.MaxValue) tp.expectComplete() } } } it should "persist and find a tagged event with multiple tags" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => withClue("Persisting multiple tagged events") { (actor1 ? withTags(1, "one", "1", "prime")).futureValue (actor1 ? withTags(2, "two", "2", "prime")).futureValue (actor1 ? withTags(3, "three", "3", "prime")).futureValue (actor1 ? withTags(4, "four", "4")).futureValue (actor1 ? withTags(5, "five", "5", "prime")).futureValue (actor2 ? withTags(3, "three", "3", "prime")).futureValue (actor3 ? withTags(3, "three", "3", "prime")).futureValue (actor1 ? 1).futureValue (actor1 ? 1).futureValue eventually { journalOps.countJournal.futureValue shouldBe 9 } } journalOps.withCurrentEventsByTag()("one", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("prime", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(5), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(6), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(7), _, _, _) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("3", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(6), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(7), _, _, _) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("4", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(4), _, _, _) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("four", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(4), _, _, _) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("5", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(5), _, _, _) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("five", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(5), _, _, _) => } tp.expectComplete() } } } it should "complete without any gaps in case events are being persisted when the query is executed" in withActorSystem { implicit system => val journalOps = new JavaDslJdbcReadJournalOperations(system) import system.dispatcher withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => def sendMessagesWithTag(tag: String, numberOfMessagesPerActor: Int): Future[Done] = { val futures = for (actor <- Seq(actor1, actor2, actor3); i <- 1 to numberOfMessagesPerActor) yield { actor ? TaggedAsyncEvent(Event(i.toString), tag) } Future.sequence(futures).map(_ => Done) } val tag = "someTag" // send a batch of 3 * 200 val batch1 = sendMessagesWithTag(tag, 200) // wait for acknowledgement of the first batch only batch1.futureValue // Sanity check, all events in the first batch must be in the journal journalOps.countJournal.futureValue should be >= 600L // Try to persist a large batch of events per actor. Some of these may be returned, but not all! val batch2 = sendMessagesWithTag(tag, 5000) // start the query before the last batch completes journalOps.withCurrentEventsByTag()(tag, NoOffset) { tp => // The stream must complete within the given amount of time // This make take a while in case the journal sequence actor detects gaps val allEvents = tp.toStrict(atMost = 40.seconds) allEvents.size should be >= 600 val expectedOffsets = 1L.to(allEvents.size).map(Sequence.apply) allEvents.map(_.offset) shouldBe expectedOffsets } batch2.futureValue } } } // Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config class H2ScalaCurrentEventsByTagTest extends CurrentEventsByTagTest("h2-shared-db-application.conf") with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/CurrentPersistenceIdsTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query abstract class CurrentPersistenceIdsTest(config: String) extends QueryTestSpec(config) { it should "not find any persistenceIds for empty journal" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) journalOps.withCurrentPersistenceIds() { tp => tp.request(1) tp.expectComplete() } } it should "find persistenceIds for actors" in withActorSystem { implicit system => val journalOps = new JavaDslJdbcReadJournalOperations(system) withTestActors() { (actor1, actor2, actor3) => actor1 ! 1 actor2 ! 1 actor3 ! 1 eventually { journalOps.withCurrentPersistenceIds() { tp => tp.request(3) tp.expectNextUnordered("my-1", "my-2", "my-3") tp.expectComplete() } } } } } // Note: these tests use the shared-db configs, the test for all persistence ids use the regular db config class H2ScalaCurrentPersistenceIdsTest extends CurrentPersistenceIdsTest("h2-shared-db-application.conf") with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/EventAdapterTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.persistence.query.{ EventEnvelope, NoOffset, Sequence } import scala.concurrent.duration._ import akka.pattern.ask import akka.persistence.journal.{ EventSeq, ReadEventAdapter, Tagged, WriteEventAdapter } import org.scalatest.Assertions.fail object EventAdapterTest { case class Event(value: String) { def adapted = EventAdapted(value) } case class TaggedEvent(event: Event, tag: String) case class TaggedAsyncEvent(event: Event, tag: String) case class EventAdapted(value: String) { def restored = EventRestored(value) } case class EventRestored(value: String) case object Snapshot class TestReadEventAdapter extends ReadEventAdapter { override def fromJournal(event: Any, manifest: String): EventSeq = event match { case e: EventAdapted => EventSeq.single(e.restored) case _ => fail() } } class TestWriteEventAdapter extends WriteEventAdapter { override def manifest(event: Any): String = "" override def toJournal(event: Any): Any = event match { case e: Event => e.adapted case TaggedEvent(e: Event, tag) => Tagged(e.adapted, Set(tag)) case TaggedAsyncEvent(e: Event, tag) => Tagged(e.adapted, Set(tag)) case _ => event } } } /** * Tests that check persistence queries when event adapter is configured for persisted event. */ abstract class EventAdapterTest(config: String) extends QueryTestSpec(config) { import EventAdapterTest._ final val NoMsgTime: FiniteDuration = 100.millis it should "apply event adapter when querying events for actor with pid 'my-1'" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors() { (actor1, _, _) => journalOps.withEventsByPersistenceId()("my-1", 0) { tp => tp.request(10) tp.expectNoMessage(100.millis) actor1 ! Event("1") tp.expectNext(ExpectNextTimeout, EventEnvelope(Sequence(1), "my-1", 1, EventRestored("1"), timestamp = 0L)) tp.expectNoMessage(100.millis) actor1 ! Event("2") tp.expectNext(ExpectNextTimeout, EventEnvelope(Sequence(2), "my-1", 2, EventRestored("2"), timestamp = 0L)) tp.expectNoMessage(100.millis) tp.cancel() } } } it should "apply event adapters when querying events by tag from an offset" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? TaggedEvent(Event("1"), "event")).futureValue (actor2 ? TaggedEvent(Event("2"), "event")).futureValue (actor3 ? TaggedEvent(Event("3"), "event")).futureValue eventually { journalOps.countJournal.futureValue shouldBe 3 } journalOps.withEventsByTag(10.seconds)("event", Sequence(1)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, EventRestored("2"), timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, EventRestored("3"), timestamp = 0L)) tp.expectNoMessage(NoMsgTime) actor1 ? TaggedEvent(Event("1"), "event") tp.expectNext(EventEnvelope(Sequence(4), "my-1", 2, EventRestored("1"), timestamp = 0L)) tp.cancel() tp.expectNoMessage(NoMsgTime) } } } it should "apply event adapters when querying current events for actors" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors() { (actor1, _, _) => actor1 ! Event("1") actor1 ! Event("2") actor1 ! Event("3") eventually { journalOps.countJournal.futureValue shouldBe 3 } journalOps.withCurrentEventsByPersistenceId()("my-1", 1, 1) { tp => tp.request(Int.MaxValue) .expectNext(EventEnvelope(Sequence(1), "my-1", 1, EventRestored("1"), timestamp = 0L)) .expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 2, 2) { tp => tp.request(Int.MaxValue) .expectNext(EventEnvelope(Sequence(2), "my-1", 2, EventRestored("2"), timestamp = 0L)) .expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 3, 3) { tp => tp.request(Int.MaxValue) .expectNext(EventEnvelope(Sequence(3), "my-1", 3, EventRestored("3"), timestamp = 0L)) .expectComplete() } journalOps.withCurrentEventsByPersistenceId()("my-1", 2, 3) { tp => tp.request(Int.MaxValue) .expectNext(EventEnvelope(Sequence(2), "my-1", 2, EventRestored("2"), timestamp = 0L)) .expectNext(EventEnvelope(Sequence(3), "my-1", 3, EventRestored("3"), timestamp = 0L)) .expectComplete() } } } it should "apply event adapters when querying all current events by tag" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? TaggedEvent(Event("1"), "event")).futureValue (actor2 ? TaggedEvent(Event("2"), "event")).futureValue (actor3 ? TaggedEvent(Event("3"), "event")).futureValue eventually { journalOps.countJournal.futureValue shouldBe 3 } journalOps.withCurrentEventsByTag()("event", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, EventRestored("1")) => } tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, EventRestored("2")) => } tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, EventRestored("3")) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("event", Sequence(0)) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, EventRestored("1")) => } tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, EventRestored("2")) => } tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, EventRestored("3")) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("event", Sequence(1)) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, EventRestored("2")) => } tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, EventRestored("3")) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("event", Sequence(2)) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, EventRestored("3")) => } tp.expectComplete() } journalOps.withCurrentEventsByTag()("event", Sequence(3)) { tp => tp.request(Int.MaxValue) tp.expectComplete() } } } } class H2ScalaEventAdapterTest extends EventAdapterTest("h2-application.conf") with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/EventsByPersistenceIdTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.Done import akka.persistence.jdbc.query.EventAdapterTest.{ Event, TaggedAsyncEvent } import akka.persistence.query.{ EventEnvelope, Sequence } import scala.concurrent.Future import scala.concurrent.duration._ import akka.pattern.ask import akka.persistence.query.Offset abstract class EventsByPersistenceIdTest(config: String) extends QueryTestSpec(config) { import QueryTestSpec.EventEnvelopeProbeOps it should "not find any events for unknown pid" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) journalOps.withEventsByPersistenceId()("unkown-pid", 0L, Long.MaxValue) { tp => tp.request(1) tp.expectNoMessage(100.millis) tp.cancel() tp.expectNoMessage(100.millis) } } it should "find events from sequenceNr" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors() { (actor1, _, _) => actor1 ! withTags(1, "number") actor1 ! withTags(2, "number") actor1 ! withTags(3, "number") actor1 ! withTags(4, "number") eventually { journalOps.countJournal.futureValue shouldBe 4 } journalOps.withEventsByPersistenceId()("my-1", 0, 0) { tp => tp.request(1) tp.expectComplete() tp.cancel() } journalOps.withEventsByPersistenceId()("my-1", 0, 1) { tp => tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 1, 1) tp.request(1) tp.expectComplete() tp.cancel() } journalOps.withEventsByPersistenceId()("my-1", 1, 1) { tp => tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 1, 1) tp.request(1) tp.expectComplete() tp.cancel() } journalOps.withEventsByPersistenceId()("my-1", 1, 2) { tp => tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 1, 1) tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 2, 2) tp.request(1) tp.expectComplete() tp.cancel() } journalOps.withEventsByPersistenceId()("my-1", 2, 2) { tp => tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 2, 2) tp.request(1) tp.expectComplete() tp.cancel() } journalOps.withEventsByPersistenceId()("my-1", 2, 3) { tp => tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 2, 2) tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 3, 3) tp.request(1) tp.expectComplete() tp.cancel() } journalOps.withEventsByPersistenceId()("my-1", 3, 3) { tp => tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 3, 3) tp.request(1) tp.expectComplete() tp.cancel() } journalOps.withEventsByPersistenceId()("my-1", 0, 3) { tp => tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 1, 1) tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 2, 2) tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 3, 3) tp.request(1) tp.expectComplete() tp.cancel() } journalOps.withEventsByPersistenceId()("my-1", 1, 3) { tp => tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 1, 1) tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 2, 2) tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 3, 3) tp.request(1) tp.expectComplete() tp.cancel() } } } it should "include ordering Offset in EventEnvelope" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors() { (actor1, actor2, actor3) => actor1 ! withTags(1, "ordering") actor1 ! withTags(2, "ordering") actor1 ! withTags(3, "ordering") eventually { journalOps.countJournal.futureValue shouldBe 3 } journalOps.withEventsByPersistenceId()("my-1", 0, Long.MaxValue) { tp => tp.request(100) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 1, 1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 2, 2) val env3 = tp.expectNext(ExpectNextTimeout) val ordering3 = env3.offset match { case Sequence(value) => value case _ => fail() } actor2 ! withTags(4, "ordering") eventually { journalOps.countJournal.futureValue shouldBe 4 } actor3 ! withTags(5, "ordering") eventually { journalOps.countJournal.futureValue shouldBe 5 } actor1 ! withTags(6, "ordering") eventually { journalOps.countJournal.futureValue shouldBe 6 } val env6 = tp.expectNext(ExpectNextTimeout) env6.persistenceId shouldBe "my-1" env6.sequenceNr shouldBe 4 env6.event shouldBe 6 // event 4 and 5 persisted before 6 by different actors, increasing the ordering env6.offset shouldBe Offset.sequence(ordering3 + 3) tp.cancel() } } } it should "deliver EventEnvelopes non-zero timestamps" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) val testStartTime = System.currentTimeMillis() withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(1, "number")).futureValue (actor2 ? withTags(2, "number")).futureValue (actor3 ? withTags(3, "number")).futureValue def assertTimestamp(timestamp: Long, clue: String) = { withClue(clue) { timestamp should !==(0L) // we want to prove that the event got a non-zero timestamp // but also a timestamp that between some boundaries around this test run (timestamp - testStartTime) should be < 120000L (timestamp - testStartTime) should be > 0L } } journalOps.withEventsByPersistenceId()("my-1", 0, 1) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case ev @ EventEnvelope(Sequence(1), "my-1", 1, 1) => assertTimestamp(ev.timestamp, "my-1") } tp.cancel() } journalOps.withEventsByPersistenceId()("my-2", 0, 1) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case ev @ EventEnvelope(_, "my-2", 1, 2) => assertTimestamp(ev.timestamp, "my-2") } tp.cancel() } journalOps.withEventsByPersistenceId()("my-3", 0, 1) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case ev @ EventEnvelope(_, "my-3", 1, 3) => assertTimestamp(ev.timestamp, "my-3") } tp.cancel() } } } it should "find events for actor with pid 'my-1'" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors() { (actor1, _, _) => journalOps.withEventsByPersistenceId()("my-1", 0) { tp => tp.request(10) tp.expectNoMessage(100.millis) actor1 ! 1 tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 1, 1) tp.expectNoMessage(100.millis) actor1 ! 2 tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 2, 2) tp.expectNoMessage(100.millis) tp.cancel() } } } it should "find events for actor with pid 'my-1' and persisting messages to other actor" in withActorSystem { implicit system => val journalOps = new JavaDslJdbcReadJournalOperations(system) withTestActors() { (actor1, actor2, _) => journalOps.withEventsByPersistenceId()("my-1", 0, Long.MaxValue) { tp => tp.request(10) tp.expectNoMessage(100.millis) actor1 ! 1 tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 1, 1) tp.expectNoMessage(100.millis) actor1 ! 2 tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 2, 2) tp.expectNoMessage(100.millis) actor2 ! 1 actor2 ! 2 actor2 ! 3 tp.expectNoMessage(100.millis) actor1 ! 3 tp.expectNextEventEnvelope(ExpectNextTimeout, "my-1", 3, 3) tp.expectNoMessage(100.millis) tp.cancel() tp.expectNoMessage(100.millis) } } } it should "find events for actor with pid 'my-2'" in withActorSystem { implicit system => val journalOps = new JavaDslJdbcReadJournalOperations(system) withTestActors() { (_, actor2, _) => actor2 ! 1 actor2 ! 2 actor2 ! 3 eventually { journalOps.countJournal.futureValue shouldBe 3 } journalOps.withEventsByPersistenceId()("my-2", 0, Long.MaxValue) { tp => tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-2", 1, 1) tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-2", 2, 2) tp.request(1) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-2", 3, 3) tp.expectNoMessage(100.millis) actor2 ! 5 actor2 ! 6 actor2 ! 7 eventually { journalOps.countJournal.futureValue shouldBe 6 } tp.request(3) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-2", 4, 5) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-2", 5, 6) tp.expectNextEventEnvelope(ExpectNextTimeout, "my-2", 6, 7) tp.expectNoMessage(100.millis) tp.cancel() tp.expectNoMessage(100.millis) } } } it should "find a large number of events quickly" in withActorSystem { implicit system => import akka.pattern.ask import system.dispatcher val journalOps = new JavaDslJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, _, _) => def sendMessagesWithTag(tag: String, numberOfMessages: Int): Future[Done] = { val futures = for (i <- 1 to numberOfMessages) yield { actor1 ? TaggedAsyncEvent(Event(i.toString), tag) } Future.sequence(futures).map(_ => Done) } val tag = "someTag" val numberOfEvents = 1000 // send a batch with a large number of events val batch = sendMessagesWithTag(tag, numberOfEvents) // wait for acknowledgement of the batch batch.futureValue journalOps.withEventsByPersistenceId()("my-1", 1, numberOfEvents) { tp => val allEvents = tp.toStrict(atMost = 20.seconds) allEvents.size shouldBe numberOfEvents } } } } class H2ScalaEventsByPersistenceIdTest extends EventsByPersistenceIdTest("h2-application.conf") with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/EventsByTagMigrationTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.actor.ActorSystem import akka.pattern.ask import akka.persistence.jdbc.query.EventsByTagMigrationTest.{ legacyTagKeyConfigOverride, migrationConfigOverride } import akka.persistence.query.{ EventEnvelope, Sequence } import com.typesafe.config.{ ConfigFactory, ConfigValue, ConfigValueFactory } import scala.concurrent.duration._ object EventsByTagMigrationTest { val maxBufferSize = 20 val refreshInterval = 500.milliseconds val legacyTagKey = true val legacyTagKeyConfigOverride: Map[String, ConfigValue] = Map( "jdbc-read-journal.max-buffer-size" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString), "jdbc-read-journal.refresh-interval" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString), "jdbc-journal.tables.event_tag.legacy-tag-key" -> ConfigValueFactory.fromAnyRef(legacyTagKey)) val migrationConfigOverride: Map[String, ConfigValue] = Map( "jdbc-read-journal.max-buffer-size" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString), "jdbc-read-journal.refresh-interval" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString)) } abstract class EventsByTagMigrationTest(configS: String) extends QueryTestSpec(configS, migrationConfigOverride) { final val NoMsgTime: FiniteDuration = 100.millis val tagTableCfg = journalConfig.eventTagTableConfiguration val journalTableCfg = journalConfig.eventJournalTableConfiguration val joinSQL: String = s"JOIN ${journalTableName} ON ${tagTableCfg.tableName}.${tagTableCfg.columnNames.eventId} = ${journalTableName}.${journalTableCfg.columnNames.ordering}" val fromSQL: String = s"FROM ${journalTableName} WHERE ${tagTableCfg.tableName}.${tagTableCfg.columnNames.eventId} = ${journalTableName}.${journalTableCfg.columnNames.ordering}" def dropConstraint( tableName: String = tagTableCfg.tableName, constraintTableName: String = "INFORMATION_SCHEMA.TABLE_CONSTRAINTS", constraintType: String, constraintDialect: String = "CONSTRAINT", constraintNameDialect: String = ""): Unit = { withStatement { stmt => // SELECT AND DROP old CONSTRAINT val constraintNameQuery = s""" |SELECT CONSTRAINT_NAME |FROM $constraintTableName |WHERE TABLE_NAME = '$tableName' AND CONSTRAINT_TYPE = '$constraintType' """.stripMargin val resultSet = stmt.executeQuery(constraintNameQuery) if (resultSet.next()) { val constraintName = resultSet.getString("CONSTRAINT_NAME") stmt.execute(s"ALTER TABLE $tableName DROP $constraintDialect $constraintName $constraintNameDialect") } } } def addPKConstraint( tableName: String = tagTableCfg.tableName, pidColumnName: String = tagTableCfg.columnNames.persistenceId, seqNrColumnName: String = tagTableCfg.columnNames.sequenceNumber, tagColumnName: String = tagTableCfg.columnNames.tag, constraintNameDialect: String = "pk_event_tag"): Unit = { withStatement { stmt => stmt.execute(s""" |ALTER TABLE $tableName |ADD CONSTRAINT $constraintNameDialect |PRIMARY KEY ($pidColumnName, $seqNrColumnName, $tagColumnName) """.stripMargin) } } def addFKConstraint( tableName: String = tagTableCfg.tableName, pidColumnName: String = tagTableCfg.columnNames.persistenceId, seqNrColumnName: String = tagTableCfg.columnNames.sequenceNumber, journalTableName: String = journalTableCfg.tableName, journalPidColumnName: String = tagTableCfg.columnNames.persistenceId, journalSeqNrColumnName: String = tagTableCfg.columnNames.sequenceNumber, constraintNameDialect: String = "fk_event_journal_on_pk"): Unit = { withStatement { stmt => stmt.execute(s""" |ALTER TABLE $tableName |ADD CONSTRAINT $constraintNameDialect |FOREIGN KEY ($pidColumnName, $seqNrColumnName) |REFERENCES $journalTableName ($journalPidColumnName, $journalSeqNrColumnName) |ON DELETE CASCADE """.stripMargin) } } def alterColumn( tableName: String = tagTableCfg.tableName, alterDialect: String = "ALTER COLUMN", columnName: String = tagTableCfg.columnNames.eventId, changeToDialect: String = "BIGINT NULL"): Unit = { withStatement { stmt => stmt.execute(s"ALTER TABLE $tableName $alterDialect $columnName $changeToDialect") } } def fillNewColumn( joinDialect: String = "", pidSetDialect: String = s"${tagTableCfg.columnNames.persistenceId} = ${journalTableName}.${journalTableCfg.columnNames.persistenceId}", seqNrSetDialect: String = s"${tagTableCfg.columnNames.sequenceNumber} = ${journalTableName}.${journalTableCfg.columnNames.sequenceNumber}", fromDialect: String = ""): Unit = { withStatement { stmt => stmt.execute(s""" |UPDATE ${tagTableCfg.tableName} ${joinDialect} |SET ${pidSetDialect}, |${seqNrSetDialect} |${fromDialect}""".stripMargin) } } /** * add new column to event_tag table. */ def addNewColumn(): Unit = {} /** * fill new column for exists rows. */ def migrateLegacyRows(): Unit = { fillNewColumn(fromDialect = fromSQL); } /** * drop old FK constraint */ def dropLegacyFKConstraint(): Unit = dropConstraint(constraintType = "FOREIGN KEY") /** * drop old PK constraint */ def dropLegacyPKConstraint(): Unit = dropConstraint(constraintType = "PRIMARY KEY") /** * create new PK constraint for PK column. */ def addNewPKConstraint(): Unit = addPKConstraint() /** * create new FK constraint for PK column. */ def addNewFKConstraint(): Unit = addFKConstraint() // override this, so we can reset the value. def withRollingUpdateActorSystem(f: ActorSystem => Unit): Unit = { val legacyTagKeyConfig = legacyTagKeyConfigOverride.foldLeft(ConfigFactory.load(configS)) { case (conf, (path, configValue)) => conf.withValue(path, configValue) } implicit val system: ActorSystem = ActorSystem("migrator-test", legacyTagKeyConfig) f(system) system.terminate().futureValue } it should "migrate event tag to new way" in { // 1. Mock legacy tag column on here, but actually using new tag write. withRollingUpdateActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(1, "number")).futureValue (actor2 ? withTags(2, "number")).futureValue (actor3 ? withTags(3, "number")).futureValue journalOps.withEventsByTag()("number", Sequence(Long.MinValue)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.cancel() } }(system) } // Assume that the user could alter table for the addition of the new column manually, then we don't need to maintain // the legacy table schema creation. if (newDao) { addNewColumn(); migrateLegacyRows(); } // 2. write and read redundancy withRollingUpdateActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(4, "number")).futureValue (actor2 ? withTags(5, "number")).futureValue (actor3 ? withTags(6, "number")).futureValue // Delay events that have not yet been projected can still be read. journalOps.withEventsByTag()("number", Sequence(Long.MinValue)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(4), "my-1", 2, 4, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(5), "my-2", 2, 5, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(6), "my-3", 2, 6, timestamp = 0L)) tp.cancel() } }(system) } // 3. Migrate the old constraints so that we can change read and write from the new PK. if (newDao) { dropLegacyFKConstraint(); dropLegacyPKConstraint() addNewPKConstraint() addNewFKConstraint() } // 4. check the migration completed. withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(7, "number")).futureValue (actor2 ? withTags(8, "number")).futureValue (actor3 ? withTags(9, "number")).futureValue journalOps.withEventsByTag()("number", Sequence(3)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(4), "my-1", 2, 4, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(5), "my-2", 2, 5, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(6), "my-3", 2, 6, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(7), "my-1", 3, 7, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(8), "my-2", 3, 8, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(9), "my-3", 3, 9, timestamp = 0L)) tp.cancel() } }(system) } } } class H2ScalaEventsByTagMigrationTest extends EventsByTagMigrationTest("h2-application.conf") with H2Cleaner { override def migrateLegacyRows(): Unit = { fillNewColumn( pidSetDialect = s"""${tagTableCfg.columnNames.persistenceId} = ( | SELECT ${journalTableCfg.columnNames.persistenceId} | ${fromSQL} |)""".stripMargin, seqNrSetDialect = s"""${tagTableCfg.columnNames.sequenceNumber} = ( | SELECT ${journalTableCfg.columnNames.sequenceNumber} | ${fromSQL} |)""".stripMargin) } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/EventsByTagTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.Done import akka.persistence.query.{ EventEnvelope, NoOffset, Sequence } import akka.pattern.ask import akka.persistence.jdbc.query.EventAdapterTest.{ Event, EventRestored, TaggedAsyncEvent, TaggedEvent } import com.typesafe.config.{ ConfigValue, ConfigValueFactory } import scala.concurrent.duration._ import scala.concurrent.Future import EventsByTagTest._ object EventsByTagTest { val maxBufferSize = 20 val refreshInterval = 500.milliseconds val configOverrides: Map[String, ConfigValue] = Map( "jdbc-read-journal.max-buffer-size" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString), "jdbc-read-journal.refresh-interval" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString())) } abstract class EventsByTagTest(config: String) extends QueryTestSpec(config, configOverrides) { final val NoMsgTime: FiniteDuration = 100.millis it should "not find events for unknown tags" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors() { (actor1, actor2, actor3) => actor1 ! withTags(1, "one") actor2 ! withTags(2, "two") actor3 ! withTags(3, "three") eventually { journalOps.countJournal.futureValue shouldBe 3 } journalOps.withEventsByTag()("unknown", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNoMessage(NoMsgTime) tp.cancel() } } } it should "find all events by tag" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(1, "number")).futureValue (actor2 ? withTags(2, "number")).futureValue (actor3 ? withTags(3, "number")).futureValue journalOps.withEventsByTag()("number", Sequence(Long.MinValue)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.cancel() } journalOps.withEventsByTag()("number", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.cancel() } journalOps.withEventsByTag()("number", Sequence(0)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.cancel() } journalOps.withEventsByTag()("number", Sequence(1)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.cancel() } journalOps.withEventsByTag()("number", Sequence(2)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.cancel() } journalOps.withEventsByTag()("number", Sequence(3)) { tp => tp.request(Int.MaxValue) tp.expectNoMessage(NoMsgTime) tp.cancel() tp.expectNoMessage(NoMsgTime) } journalOps.withEventsByTag()("number", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) actor1 ? withTags(1, "number") tp.expectNext(EventEnvelope(Sequence(4), "my-1", 2, 1, timestamp = 0L)) actor1 ? withTags(1, "number") tp.expectNext(EventEnvelope(Sequence(5), "my-1", 3, 1, timestamp = 0L)) actor1 ? withTags(1, "number") tp.expectNext(EventEnvelope(Sequence(6), "my-1", 4, 1, timestamp = 0L)) tp.cancel() tp.expectNoMessage(NoMsgTime) } } } it should "deliver EventEnvelopes non-zero timestamps" in withActorSystem { implicit system => val testStartTime = System.currentTimeMillis() val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(1, "number")).futureValue (actor2 ? withTags(2, "number")).futureValue (actor3 ? withTags(3, "number")).futureValue def assertTimestamp(timestamp: Long, clue: String) = { withClue(clue) { timestamp should !==(0L) // we want to prove that the event got a non-zero timestamp // but also a timestamp that between some boundaries around this test run (timestamp - testStartTime) should be < 120000L (timestamp - testStartTime) should be > 0L } } journalOps.withEventsByTag()("number", Sequence(Long.MinValue)) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case ev @ EventEnvelope(Sequence(1), "my-1", 1, 1) => assertTimestamp(ev.timestamp, "my-1") } tp.expectNextPF { case ev @ EventEnvelope(Sequence(2), "my-2", 1, 2) => assertTimestamp(ev.timestamp, "my-2") } tp.expectNextPF { case ev @ EventEnvelope(Sequence(3), "my-3", 1, 3) => assertTimestamp(ev.timestamp, "my-3") } tp.cancel() } } } it should "select events by tag with exact match" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(1, "number", "sharded-1")).futureValue (actor2 ? withTags(2, "number", "sharded-10")).futureValue (actor3 ? withTags(3, "number", "sharded-100")).futureValue journalOps.withEventsByTag()("number", Sequence(Long.MinValue)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.cancel() } journalOps.withEventsByTag()("sharded-1", Sequence(Long.MinValue)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) tp.cancel() } journalOps.withEventsByTag()("sharded-10", Sequence(Long.MinValue)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 2, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) tp.cancel() } journalOps.withEventsByTag()("sharded-100", Sequence(Long.MinValue)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) tp.cancel() } } } it should "find all events by tag even when lots of events are persisted concurrently" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) val msgCountPerActor = 20 val numberOfActors = 100 val totalNumberOfMessages = msgCountPerActor * numberOfActors withManyTestActors(numberOfActors) { actors => val actorsWithIndexes = actors.zipWithIndex for { messageNumber <- 0 until msgCountPerActor (actor, actorIdx) <- actorsWithIndexes } actor ! TaggedEvent(Event(s"$actorIdx-$messageNumber"), "myEvent") journalOps.withEventsByTag()("myEvent", NoOffset) { tp => tp.request(Int.MaxValue) (1 to totalNumberOfMessages).foldLeft(Map.empty[Int, Int]) { (map, _) => val mgsParts = tp.expectNext().event.asInstanceOf[EventRestored].value.split("-") val actorIdx = mgsParts(0).toInt val msgNumber = mgsParts(1).toInt val expectedCount = map.getOrElse(actorIdx, 0) assertResult(expected = expectedCount)(msgNumber) // keep track of the next message number we expect for this actor idx map.updated(actorIdx, msgNumber + 1) } tp.cancel() tp.expectNoMessage(NoMsgTime) } } } it should "find events by tag from an offset" in withActorSystem { implicit system => val journalOps = new JavaDslJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(1, "number")).futureValue (actor2 ? withTags(2, "number")).futureValue (actor3 ? withTags(3, "number")).futureValue eventually { journalOps.countJournal.futureValue shouldBe 3 } journalOps.withEventsByTag()("number", Sequence(1)) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 3, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) actor1 ? withTags(1, "number") tp.expectNext(EventEnvelope(Sequence(4), "my-1", 2, 1, timestamp = 0L)) tp.cancel() tp.expectNoMessage(NoMsgTime) } } } it should "persist and find tagged event for one tag" in withActorSystem { implicit system => val journalOps = new JavaDslJdbcReadJournalOperations(system) withTestActors() { (actor1, actor2, actor3) => journalOps.withEventsByTag(10.seconds)("one", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNoMessage(NoMsgTime) actor1 ! withTags(1, "one") // 1 tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) actor2 ! withTags(1, "one") // 2 tp.expectNext(EventEnvelope(Sequence(2), "my-2", 1, 1, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) actor3 ! withTags(1, "one") // 3 tp.expectNext(EventEnvelope(Sequence(3), "my-3", 1, 1, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) actor1 ! withTags(2, "two") // 4 tp.expectNoMessage(NoMsgTime) actor2 ! withTags(2, "two") // 5 tp.expectNoMessage(NoMsgTime) actor3 ! withTags(2, "two") // 6 tp.expectNoMessage(NoMsgTime) actor1 ! withTags(1, "one") // 7 tp.expectNext(EventEnvelope(Sequence(7), "my-1", 3, 1, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) actor2 ! withTags(1, "one") // 8 tp.expectNext(EventEnvelope(Sequence(8), "my-2", 3, 1, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) actor3 ! withTags(1, "one") // 9 tp.expectNext(EventEnvelope(Sequence(9), "my-3", 3, 1, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) tp.cancel() tp.expectNoMessage(NoMsgTime) } } } it should "persist and find tagged events when stored with multiple tags" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => (actor1 ? withTags(1, "one", "1", "prime")).futureValue (actor1 ? withTags(2, "two", "2", "prime")).futureValue (actor1 ? withTags(3, "three", "3", "prime")).futureValue (actor1 ? withTags(4, "four", "4")).futureValue (actor1 ? withTags(5, "five", "5", "prime")).futureValue (actor2 ? withTags(3, "three", "3", "prime")).futureValue (actor3 ? withTags(3, "three", "3", "prime")).futureValue (actor1 ? 6).futureValue (actor1 ? 7).futureValue (actor1 ? 8).futureValue (actor1 ? 9).futureValue (actor1 ? 10).futureValue eventually { journalOps.countJournal.futureValue shouldBe 12 } journalOps.withEventsByTag(10.seconds)("prime", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(2), "my-1", 2, 2, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(3), "my-1", 3, 3, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(5), "my-1", 5, 5, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(6), "my-2", 1, 3, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(7), "my-3", 1, 3, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) tp.cancel() } journalOps.withEventsByTag(10.seconds)("three", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(3), "my-1", 3, 3, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(6), "my-2", 1, 3, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(7), "my-3", 1, 3, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) tp.cancel() } journalOps.withEventsByTag(10.seconds)("3", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(3), "my-1", 3, 3, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(6), "my-2", 1, 3, timestamp = 0L)) tp.expectNext(EventEnvelope(Sequence(7), "my-3", 1, 3, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) tp.cancel() } journalOps.withEventsByTag(10.seconds)("one", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(1), "my-1", 1, 1, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) tp.cancel() } journalOps.withEventsByTag(10.seconds)("four", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(4), "my-1", 4, 4) => } tp.expectNoMessage(NoMsgTime) tp.cancel() } journalOps.withEventsByTag(10.seconds)("five", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNext(EventEnvelope(Sequence(5), "my-1", 5, 5, timestamp = 0L)) tp.expectNoMessage(NoMsgTime) tp.cancel() tp.expectNoMessage(NoMsgTime) } } } def timeoutMultiplier: Int = 1 it should "show the configured performance characteristics" in withActorSystem { implicit system => import system.dispatcher val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, actor2, actor3) => def sendMessagesWithTag(tag: String, numberOfMessagesPerActor: Int): Future[Done] = { val futures = for (actor <- Seq(actor1, actor2, actor3); i <- 1 to numberOfMessagesPerActor) yield { actor ? TaggedAsyncEvent(Event(i.toString), tag) } Future.sequence(futures).map(_ => Done) } val tag1 = "someTag" // send a batch of 3 * 50 sendMessagesWithTag(tag1, 50) // start the query before the future completes journalOps.withEventsByTag()(tag1, NoOffset) { tp => tp.within(5.seconds) { tp.request(Int.MaxValue) tp.expectNextN(150) } tp.expectNoMessage(NoMsgTime) // Send a small batch of 3 * 5 messages sendMessagesWithTag(tag1, 5) // Since queries are executed `refreshInterval`, there must be a small delay before this query gives a result tp.within(min = refreshInterval / 2, max = 2.seconds * timeoutMultiplier) { tp.expectNextN(15) } tp.expectNoMessage(NoMsgTime) // another large batch should be retrieved fast // send a second batch of 3 * 100 sendMessagesWithTag(tag1, 100) tp.within(min = refreshInterval / 2, max = 10.seconds * timeoutMultiplier) { tp.request(Int.MaxValue) tp.expectNextN(300) } tp.expectNoMessage(NoMsgTime) } } } } class H2ScalaEventsByTagTest extends EventsByTagTest("h2-application.conf") with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/EventsByUnfrequentTagTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.pattern.ask import akka.persistence.jdbc.query.EventsByUnfrequentTagTest._ import akka.persistence.query.{ EventEnvelope, NoOffset, Sequence } import com.typesafe.config.{ ConfigValue, ConfigValueFactory } import scala.concurrent.duration._ object EventsByUnfrequentTagTest { val maxBufferSize = 20 val refreshInterval = 500.milliseconds val configOverrides: Map[String, ConfigValue] = Map( "jdbc-read-journal.events-by-tag-buffer-sizes-per-query" -> ConfigValueFactory.fromAnyRef(1.toString), "jdbc-read-journal.max-buffer-size" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString), "jdbc-read-journal.refresh-interval" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString())) } abstract class EventsByUnfrequentTagTest(config: String) extends QueryTestSpec(config, configOverrides) { final val NoMsgTime: FiniteDuration = 100.millis it should "persist and find a tagged event with multiple (frequently and unfrequently) tags" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, _, _) => val often = "often" val notOften = "not-often" withClue("Persisting multiple tagged events") { (0 until 100).foreach { i => val additional = if (i % 40 == 0) { Seq(notOften) } else Seq.empty val tags = Seq(often) ++ additional (actor1 ? withTags(1, tags: _*)).futureValue } eventually { journalOps.countJournal.futureValue shouldBe 100 } journalOps.withEventsByTag()(often, NoOffset) { tp => tp.request(Int.MaxValue) (1 to 100).foreach { i => tp.expectNextPF { case EventEnvelope(Sequence(`i`), _, _, _) => } } tp.cancel() tp.expectNoMessage(NoMsgTime) } journalOps.withEventsByTag(10.seconds)(notOften, NoOffset) { tp => tp.request(Int.MaxValue) tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(41), _, _, _) => } tp.expectNextPF { case EventEnvelope(Sequence(81), _, _, _) => } tp.cancel() tp.expectNoMessage(NoMsgTime) } } } } } class H2ScalaEventsByUnfrequentTagTest extends EventsByUnfrequentTagTest("h2-shared-db-application.conf") with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/HardDeleteQueryTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.persistence.query.NoOffset import akka.pattern._ import scala.concurrent.duration._ import org.scalatest.matchers.should.Matchers abstract class HardDeleteQueryTest(config: String) extends QueryTestSpec(config) with Matchers { implicit val askTimeout: FiniteDuration = 500.millis it should "not return deleted events when using CurrentEventsByTag" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, _, _) => (actor1 ? withTags(1, "number")).futureValue (actor1 ? withTags(2, "number")).futureValue (actor1 ? withTags(3, "number")).futureValue // delete all three events and wait for confirmations (actor1 ? DeleteCmd(1)).futureValue shouldBe "deleted-1" (actor1 ? DeleteCmd(2)).futureValue shouldBe "deleted-2" (actor1 ? DeleteCmd(3)).futureValue shouldBe "deleted-3" // check that nothing gets delivered journalOps.withCurrentEventsByTag()("number", NoOffset) { tp => tp.request(Int.MaxValue) tp.expectComplete() } } } it should "not return deleted events when using EventsByTag" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, _, _) => (actor1 ? withTags(1, "number")).futureValue (actor1 ? withTags(2, "number")).futureValue (actor1 ? withTags(3, "number")).futureValue // delete all three events and wait for confirmations (actor1 ? DeleteCmd(1)).futureValue shouldBe "deleted-1" (actor1 ? DeleteCmd(2)).futureValue shouldBe "deleted-2" (actor1 ? DeleteCmd(3)).futureValue shouldBe "deleted-3" // check that nothing gets delivered journalOps.withEventsByTag()("number", NoOffset) { tp => tp.request(Int.MaxValue) tp.cancel() } } } it should "not return deleted events when using CurrentEventsByPersistenceId" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, _, _) => (actor1 ? withTags(1, "number")).futureValue (actor1 ? withTags(2, "number")).futureValue (actor1 ? withTags(3, "number")).futureValue // delete all three events and wait for confirmations (actor1 ? DeleteCmd(1)).futureValue shouldBe "deleted-1" (actor1 ? DeleteCmd(2)).futureValue shouldBe "deleted-2" (actor1 ? DeleteCmd(3)).futureValue shouldBe "deleted-3" // check that nothing gets delivered journalOps.withCurrentEventsByPersistenceId()("my-1") { tp => tp.request(Int.MaxValue) tp.expectComplete() } } } it should "not return deleted events when using EventsByPersistenceId" in withActorSystem { implicit system => val journalOps = new ScalaJdbcReadJournalOperations(system) withTestActors(replyToMessages = true) { (actor1, _, _) => (actor1 ? withTags(1, "number")).futureValue (actor1 ? withTags(2, "number")).futureValue (actor1 ? withTags(3, "number")).futureValue // delete all three events and wait for confirmations (actor1 ? DeleteCmd(1)).futureValue shouldBe "deleted-1" (actor1 ? DeleteCmd(2)).futureValue shouldBe "deleted-2" (actor1 ? DeleteCmd(3)).futureValue shouldBe "deleted-3" // check that nothing gets delivered journalOps.withEventsByPersistenceId()("my-1") { tp => tp.request(Int.MaxValue) tp.cancel() } } } } class H2HardDeleteQueryTest extends HardDeleteQueryTest("h2-application.conf") with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/JournalDaoStreamMessagesMemoryTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.actor.{ ActorSystem, ExtendedActorSystem } import akka.persistence.jdbc.config.JournalConfig import akka.persistence.jdbc.journal.dao.JournalDao import akka.persistence.{ AtomicWrite, PersistentRepr } import akka.serialization.{ Serialization, SerializationExtension } import akka.stream.scaladsl.{ Sink, Source } import akka.stream.testkit.scaladsl.TestSink import akka.stream.{ Materializer, SystemMaterializer } import com.typesafe.config.{ ConfigValue, ConfigValueFactory } import org.scalatest.concurrent.PatienceConfiguration.Timeout import org.slf4j.LoggerFactory import slick.jdbc.JdbcBackend.Database import slick.jdbc.JdbcProfile import java.lang.management.{ ManagementFactory, MemoryMXBean } import java.util.UUID import scala.collection.immutable import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor } import scala.util.{ Failure, Success } object JournalDaoStreamMessagesMemoryTest { val configOverrides: Map[String, ConfigValue] = Map("jdbc-journal.fetch-size" -> ConfigValueFactory.fromAnyRef("100")) val MB = 1024 * 1024 } abstract class JournalDaoStreamMessagesMemoryTest(configFile: String) extends QueryTestSpec(configFile, JournalDaoStreamMessagesMemoryTest.configOverrides) { import JournalDaoStreamMessagesMemoryTest.MB private val log = LoggerFactory.getLogger(this.getClass) val memoryMBean: MemoryMXBean = ManagementFactory.getMemoryMXBean it should "stream events" in { withActorSystem { implicit system: ActorSystem => withDatabase { db => implicit val ec: ExecutionContextExecutor = system.dispatcher implicit val mat: Materializer = SystemMaterializer(system).materializer val persistenceId = UUID.randomUUID().toString val writerUuid = UUID.randomUUID().toString val fqcn = journalConfig.pluginConfig.dao val args = Seq( (classOf[Database], db), (classOf[JdbcProfile], profile), (classOf[JournalConfig], journalConfig), (classOf[Serialization], SerializationExtension(system)), (classOf[ExecutionContext], ec), (classOf[Materializer], mat)) val dao: JournalDao = system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[JournalDao](fqcn, args) match { case Success(dao) => dao case Failure(cause) => throw cause } val payloadSize = 5000 // 5000 bytes val eventsPerBatch = 1000 val maxMem = 64 * MB val numberOfInsertBatches = { // calculate the number of batches using a factor to make sure we go a little bit over the limit (maxMem / (payloadSize * eventsPerBatch) * 1.2).round.toInt } val totalMessages = numberOfInsertBatches * eventsPerBatch val totalMessagePayload = totalMessages * payloadSize log.info( s"batches: $numberOfInsertBatches (with $eventsPerBatch events), total messages: $totalMessages, total msgs size: $totalMessagePayload") // payload can be the same when inserting to avoid unnecessary memory usage val payload = Array.fill(payloadSize)('a'.toByte) val lastInsert = Source .fromIterator(() => (1 to numberOfInsertBatches).iterator) .mapAsync(1) { i => val end = i * eventsPerBatch val start = end - (eventsPerBatch - 1) log.info(s"batch $i - events from $start to $end") val atomicWrites = (start to end).map { j => AtomicWrite(immutable.Seq(PersistentRepr(payload, j, persistenceId, writerUuid = writerUuid))) } dao.asyncWriteMessages(atomicWrites).map(_ => i) } .runWith(Sink.last) // wait until we write all messages // being very generous, 1 second per message lastInsert.futureValue(Timeout(totalMessages.seconds)) log.info("Events written, starting replay") // sleep and gc to have some kind of stable measurement of current heap usage Thread.sleep(1000) System.gc() Thread.sleep(1000) val usedBefore = memoryMBean.getHeapMemoryUsage.getUsed val messagesSrc = dao.messagesWithBatch(persistenceId, 0, totalMessages, batchSize = 100, None) val probe = messagesSrc .map { case Success((repr, _)) => if (repr.sequenceNr % 100 == 0) log.info(s"fetched: ${repr.persistenceId} - ${repr.sequenceNr}/${totalMessages}") case Failure(exception) => log.error("Failure when reading messages.", exception) } .runWith(TestSink()) probe.request(10) probe.within(20.seconds) { probe.expectNextN(10) } // sleep and gc to have some kind of stable measurement of current heap usage Thread.sleep(2000) System.gc() Thread.sleep(1000) val usedAfter = memoryMBean.getHeapMemoryUsage.getUsed log.info(s"Used heap before ${usedBefore / MB} MB, after ${usedAfter / MB} MB") // actual usage is much less than 10 MB (usedAfter - usedBefore) should be <= (10L * MB) probe.cancel() } } } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/JournalSequenceActorTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.actor.{ ActorRef, ActorSystem } import akka.pattern.ask import akka.persistence.jdbc.config.JournalSequenceRetrievalConfig import akka.persistence.jdbc.journal.dao.legacy.{ JournalRow, JournalTables } import akka.persistence.jdbc.query.JournalSequenceActor.{ GetMaxOrderingId, MaxOrderingId } import akka.persistence.jdbc.query.dao.TestProbeReadJournalDao import akka.persistence.jdbc.SharedActorSystemTestSpec import akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao import akka.serialization.SerializationExtension import akka.stream.scaladsl.{ Sink, Source } import akka.testkit.TestProbe import org.slf4j.LoggerFactory import slick.jdbc.{ JdbcBackend, JdbcCapabilities } import scala.concurrent.Future import scala.concurrent.duration._ import org.scalatest.time.Span abstract class JournalSequenceActorTest(configFile: String, isOracle: Boolean) extends QueryTestSpec(configFile) with JournalTables { private val log = LoggerFactory.getLogger(classOf[JournalSequenceActorTest]) val journalSequenceActorConfig = readJournalConfig.journalSequenceRetrievalConfiguration val journalTableCfg = journalConfig.journalTableConfiguration import profile.api._ implicit val askTimeout: FiniteDuration = 50.millis def generateId: Int = 0 behavior.of("JournalSequenceActor") it should "recover normally" in { if (newDao) pending withActorSystem { implicit system: ActorSystem => withDatabase { db => val numberOfRows = 15000 val rows = for (i <- 1 to numberOfRows) yield JournalRow(generateId, deleted = false, "id", i, Array(0.toByte)) db.run(JournalTable ++= rows).futureValue withJournalSequenceActor(db, maxTries = 100) { actor => eventually { actor.ask(GetMaxOrderingId).mapTo[MaxOrderingId].futureValue shouldBe MaxOrderingId(numberOfRows) } } } } } private def canForceInsert: Boolean = profile.capabilities.contains(JdbcCapabilities.forceInsert) if (canForceInsert && !newDao) { it should s"recover ${if (isOracle) "one hundred thousand" else "one million"} events quickly if no ids are missing" in { withActorSystem { implicit system: ActorSystem => withDatabase { db => val elements = if (isOracle) 100000 else 1000000 Source .fromIterator(() => (1 to elements).iterator) .map(id => JournalRow(id, deleted = false, "id", id, Array(0.toByte))) .grouped(10000) .mapAsync(4) { rows => db.run(JournalTable.forceInsertAll(rows)) } .runWith(Sink.ignore) .futureValue val startTime = System.currentTimeMillis() withJournalSequenceActor(db, maxTries = 100) { actor => implicit val patienceConfig: PatienceConfig = PatienceConfig(10.seconds, Span(200, org.scalatest.time.Millis)) eventually { val currentMax = actor.ask(GetMaxOrderingId).mapTo[MaxOrderingId].futureValue.maxOrdering currentMax shouldBe elements } } val timeTaken = System.currentTimeMillis() - startTime log.info(s"Recovered all events in $timeTaken ms") } } } } if (!isOracle && canForceInsert && !newDao) { // Note this test case cannot be executed for oracle, because forceInsertAll is not supported in the oracle driver. it should "recover after the specified max number if tries if the first event has a very high sequence number and lots of large gaps exist" in { withActorSystem { implicit system: ActorSystem => withDatabase { db => val numElements = 1000 val gapSize = 10000 val firstElement = 100000000 val lastElement = firstElement + (numElements * gapSize) Source .fromIterator(() => (firstElement to lastElement by gapSize).iterator) .map(id => JournalRow(id, deleted = false, "id", id, Array(0.toByte))) .grouped(10000) .mapAsync(4) { rows => db.run(JournalTable.forceInsertAll(rows)) } .runWith(Sink.ignore) .futureValue withJournalSequenceActor(db, maxTries = 2) { actor => // Should normally recover after `maxTries` seconds implicit val patienceConfig: PatienceConfig = PatienceConfig(10.seconds, Span(200, org.scalatest.time.Millis)) eventually { val currentMax = actor.ask(GetMaxOrderingId).mapTo[MaxOrderingId].futureValue.maxOrdering currentMax shouldBe lastElement } } } } } } if (canForceInsert && !newDao) { it should s"assume that the max ordering id in the database on startup is the max after (queryDelay * maxTries)" in { withActorSystem { implicit system: ActorSystem => withDatabase { db => val maxElement = 100000 // only even numbers, odd numbers are missing val idSeq = 2 to maxElement by 2 Source .fromIterator(() => idSeq.iterator) .map(id => JournalRow(id, deleted = false, "id", id, Array(0.toByte))) .grouped(10000) .mapAsync(4) { rows => db.run(JournalTable.forceInsertAll(rows)) } .runWith(Sink.ignore) .futureValue val highestValue = if (isOracle) { // ForceInsert does not seem to work for oracle, we must delete the odd numbered events db.run(JournalTable.filter(_.ordering % 2L === 1L).delete).futureValue maxElement / 2 } else maxElement withJournalSequenceActor(db, maxTries = 2) { actor => // The actor should assume the max after 2 seconds implicit val patienceConfig: PatienceConfig = PatienceConfig(3.seconds) eventually { val currentMax = actor.ask(GetMaxOrderingId).mapTo[MaxOrderingId].futureValue.maxOrdering currentMax shouldBe highestValue } } } } } } /** * @param maxTries The number of tries before events are assumed missing * (since the actor queries every second by default, * this is effectively the number of seconds after which events are assumed missing) */ def withJournalSequenceActor(db: JdbcBackend.Database, maxTries: Int)(f: ActorRef => Unit)( implicit system: ActorSystem): Unit = { import system.dispatcher val readJournalDao = new ByteArrayReadJournalDao(db, profile, readJournalConfig, SerializationExtension(system)) val actor = system.actorOf(JournalSequenceActor.props(readJournalDao, journalSequenceActorConfig.copy(maxTries = maxTries))) try f(actor) finally system.stop(actor) } } class MockDaoJournalSequenceActorTest extends SharedActorSystemTestSpec { def fetchMaxOrderingId(journalSequenceActor: ActorRef): Future[Long] = { journalSequenceActor.ask(GetMaxOrderingId)(20.millis).mapTo[MaxOrderingId].map(_.maxOrdering) } it should "re-query with delay only when events are missing." in { val batchSize = 100 val maxTries = 5 val queryDelay = 200.millis val almostQueryDelay = queryDelay - 50.millis val almostImmediately = 50.millis withTestProbeJournalSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, _) => daoProbe.expectMsg(almostImmediately, TestProbeReadJournalDao.JournalSequence(0, batchSize)) val firstBatch = (1L to 40L) ++ (51L to 110L) daoProbe.reply(firstBatch) withClue(s"when events are missing, the actor should wait for $queryDelay before querying again") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(almostQueryDelay, TestProbeReadJournalDao.JournalSequence(40, batchSize)) } // number 41 is still missing after this batch val secondBatch = 42L to 110L daoProbe.reply(secondBatch) withClue(s"when events are missing, the actor should wait for $queryDelay before querying again") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(almostQueryDelay, TestProbeReadJournalDao.JournalSequence(40, batchSize)) } val thirdBatch = 41L to 110L daoProbe.reply(thirdBatch) withClue( s"when no more events are missing, but less that batchSize elemens have been received, " + s"the actor should wait for $queryDelay before querying again") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(almostQueryDelay, TestProbeReadJournalDao.JournalSequence(110, batchSize)) } val fourthBatch = 111L to 210L daoProbe.reply(fourthBatch) withClue( "When no more events are missing and the number of events received is equal to batchSize, " + "the actor should query again immediately") { daoProbe.expectMsg(almostImmediately, TestProbeReadJournalDao.JournalSequence(210, batchSize)) } // Reply to prevent a dead letter warning on the timeout daoProbe.reply(Seq.empty) daoProbe.expectNoMessage(almostImmediately) } } it should "Assume an element missing after the configured amount of maxTries" in { val batchSize = 100 val maxTries = 5 val queryDelay = 150.millis val slightlyMoreThanQueryDelay = queryDelay + 50.millis val almostImmediately = 20.millis val allIds = (1L to 40L) ++ (43L to 200L) withTestProbeJournalSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, actor) => daoProbe.expectMsg(almostImmediately, TestProbeReadJournalDao.JournalSequence(0, batchSize)) daoProbe.reply(allIds.take(100)) val idsLargerThan40 = allIds.dropWhile(_ <= 40) val retryResponse = idsLargerThan40.take(100) for (i <- 1 to maxTries) withClue(s"should retry $maxTries times (attempt $i)") { daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeReadJournalDao.JournalSequence(40, batchSize)) daoProbe.reply(retryResponse) } // sanity check retryResponse.last shouldBe 142 withClue( "The elements 41 and 42 should be assumed missing, " + "the actor should query again immediately since a full batch has been received") { daoProbe.expectMsg(almostImmediately, TestProbeReadJournalDao.JournalSequence(142, batchSize)) fetchMaxOrderingId(actor).futureValue shouldBe 142 } // Reply to prevent a dead letter warning on the timeout daoProbe.reply(Seq.empty) daoProbe.expectNoMessage(almostImmediately) } } def withTestProbeJournalSequenceActor(batchSize: Int, maxTries: Int, queryDelay: FiniteDuration)( f: (TestProbe, ActorRef) => Unit)(implicit system: ActorSystem): Unit = { val testProbe = TestProbe() val config = JournalSequenceRetrievalConfig( batchSize = batchSize, maxTries = maxTries, queryDelay = queryDelay, maxBackoffQueryDelay = 4.seconds, askTimeout = 100.millis) val mockDao = new TestProbeReadJournalDao(testProbe) val actor = system.actorOf(JournalSequenceActor.props(mockDao, config)) try f(testProbe, actor) finally system.stop(actor) } } class H2JournalSequenceActorTest extends JournalSequenceActorTest("h2-application.conf", isOracle = false) with H2Cleaner ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/MultipleReadJournalTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.persistence.jdbc.query.EventsByTagTest._ import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal import akka.persistence.query.{ NoOffset, PersistenceQuery } import akka.stream.scaladsl.Sink class MultipleReadJournalTest extends QueryTestSpec("h2-two-read-journals-application.conf", configOverrides) with H2Cleaner { it should "be able to create two read journals and use eventsByTag on them" in withActorSystem { implicit system => val normalReadJournal = PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier) val secondReadJournal = PersistenceQuery(system).readJournalFor[JdbcReadJournal]("jdbc-read-journal-number-two") val events1 = normalReadJournal.currentEventsByTag("someTag", NoOffset).runWith(Sink.seq) val events2 = secondReadJournal.currentEventsByTag("someTag", NoOffset).runWith(Sink.seq) events1.futureValue shouldBe empty events2.futureValue shouldBe empty } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/QueryTestSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.actor.{ ActorRef, ActorSystem, Props, Stash, Status } import akka.pattern.ask import akka.event.LoggingReceive import akka.persistence.{ DeleteMessagesFailure, DeleteMessagesSuccess, PersistentActor, SaveSnapshotFailure, SaveSnapshotSuccess, SnapshotOffer } import akka.persistence.jdbc.SingleActorSystemPerTestSpec import akka.persistence.jdbc.query.EventAdapterTest.{ Event, Snapshot, TaggedAsyncEvent, TaggedEvent } import akka.persistence.jdbc.query.javadsl.{ JdbcReadJournal => JavaJdbcReadJournal } import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal import akka.persistence.journal.Tagged import akka.persistence.query.{ EventEnvelope, Offset, PersistenceQuery } import akka.stream.scaladsl.Sink import akka.stream.testkit.TestSubscriber import akka.stream.testkit.javadsl.{ TestSink => JavaSink } import akka.stream.testkit.scaladsl.TestSink import akka.stream.{ Materializer, SystemMaterializer } import com.typesafe.config.ConfigValue import scala.concurrent.Future import scala.concurrent.duration._ import akka.persistence.jdbc.testkit.internal.H2 import akka.persistence.jdbc.testkit.internal.MySQL import akka.persistence.jdbc.testkit.internal.Oracle import akka.persistence.jdbc.testkit.internal.Postgres import akka.persistence.jdbc.testkit.internal.SqlServer import scala.concurrent.ExecutionContext trait ReadJournalOperations { def withCurrentPersistenceIds(within: FiniteDuration = 60.second)(f: TestSubscriber.Probe[String] => Unit): Unit def withPersistenceIds(within: FiniteDuration = 60.second)(f: TestSubscriber.Probe[String] => Unit): Unit def withCurrentEventsByPersistenceId(within: FiniteDuration = 60.second)( persistenceId: String, fromSequenceNr: Long = 0, toSequenceNr: Long = Long.MaxValue)(f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit def withEventsByPersistenceId(within: FiniteDuration = 60.second)( persistenceId: String, fromSequenceNr: Long = 0, toSequenceNr: Long = Long.MaxValue)(f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit def withCurrentEventsByTag(within: FiniteDuration = 60.second)(tag: String, offset: Offset)( f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit def withEventsByTag(within: FiniteDuration = 60.second)(tag: String, offset: Offset)( f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit def countJournal: Future[Long] } class ScalaJdbcReadJournalOperations(readJournal: JdbcReadJournal)(implicit system: ActorSystem, mat: Materializer) extends ReadJournalOperations { def this(system: ActorSystem) = this(PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier))( system, SystemMaterializer(system).materializer) import system.dispatcher def withCurrentPersistenceIds(within: FiniteDuration)(f: TestSubscriber.Probe[String] => Unit): Unit = { val tp = readJournal.currentPersistenceIds().runWith(TestSink[String]()) tp.within(within)(f(tp)) } def withPersistenceIds(within: FiniteDuration)(f: TestSubscriber.Probe[String] => Unit): Unit = { val tp = readJournal.persistenceIds().runWith(TestSink[String]()) tp.within(within)(f(tp)) } def withCurrentEventsByPersistenceId( within: FiniteDuration)(persistenceId: String, fromSequenceNr: Long = 0, toSequenceNr: Long = Long.MaxValue)( f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = { val tp = readJournal .currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr) .runWith(TestSink[EventEnvelope]()) tp.within(within)(f(tp)) } def withEventsByPersistenceId( within: FiniteDuration)(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long)( f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = { val tp = readJournal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).runWith(TestSink[EventEnvelope]()) tp.within(within)(f(tp)) } def withCurrentEventsByTag(within: FiniteDuration)(tag: String, offset: Offset)( f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = { val tp = readJournal.currentEventsByTag(tag, offset).runWith(TestSink[EventEnvelope]()) tp.within(within)(f(tp)) } def withEventsByTag(within: FiniteDuration)(tag: String, offset: Offset)( f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = { val tp = readJournal.eventsByTag(tag, offset).runWith(TestSink[EventEnvelope]()) tp.within(within)(f(tp)) } override def countJournal: Future[Long] = readJournal .currentPersistenceIds() .filter(pid => (1 to 3).map(id => s"my-$id").contains(pid)) .mapAsync(1) { pid => readJournal.currentEventsByPersistenceId(pid, 0, Long.MaxValue).map(_ => 1L).runWith(Sink.seq).map(_.sum) } .runWith(Sink.seq) .map(_.sum) } class JavaDslJdbcReadJournalOperations(readJournal: javadsl.JdbcReadJournal)( implicit system: ActorSystem, mat: Materializer) extends ReadJournalOperations { def this(system: ActorSystem) = this( PersistenceQuery.get(system).getReadJournalFor(classOf[javadsl.JdbcReadJournal], JavaJdbcReadJournal.Identifier))( system, SystemMaterializer(system).materializer) import system.dispatcher def withCurrentPersistenceIds(within: FiniteDuration)(f: TestSubscriber.Probe[String] => Unit): Unit = { val sink: akka.stream.javadsl.Sink[String, TestSubscriber.Probe[String]] = JavaSink.create[String](system) val tp = readJournal.currentPersistenceIds().runWith(sink, mat) tp.within(within)(f(tp)) } def withPersistenceIds(within: FiniteDuration)(f: TestSubscriber.Probe[String] => Unit): Unit = { val sink: akka.stream.javadsl.Sink[String, TestSubscriber.Probe[String]] = JavaSink.create[String](system) val tp = readJournal.persistenceIds().runWith(sink, mat) tp.within(within)(f(tp)) } def withCurrentEventsByPersistenceId( within: FiniteDuration)(persistenceId: String, fromSequenceNr: Long = 0, toSequenceNr: Long = Long.MaxValue)( f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = { val sink: akka.stream.javadsl.Sink[EventEnvelope, TestSubscriber.Probe[EventEnvelope]] = JavaSink.create[EventEnvelope](system) val tp = readJournal.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).runWith(sink, mat) tp.within(within)(f(tp)) } def withEventsByPersistenceId( within: FiniteDuration)(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long)( f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = { val sink: akka.stream.javadsl.Sink[EventEnvelope, TestSubscriber.Probe[EventEnvelope]] = JavaSink.create[EventEnvelope](system) val tp = readJournal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).runWith(sink, mat) tp.within(within)(f(tp)) } def withCurrentEventsByTag(within: FiniteDuration)(tag: String, offset: Offset)( f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = { val sink: akka.stream.javadsl.Sink[EventEnvelope, TestSubscriber.Probe[EventEnvelope]] = JavaSink.create[EventEnvelope](system) val tp = readJournal.currentEventsByTag(tag, offset).runWith(sink, mat) tp.within(within)(f(tp)) } def withEventsByTag(within: FiniteDuration)(tag: String, offset: Offset)( f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = { val sink: akka.stream.javadsl.Sink[EventEnvelope, TestSubscriber.Probe[EventEnvelope]] = JavaSink.create[EventEnvelope](system) val tp = readJournal.eventsByTag(tag, offset).runWith(sink, mat) tp.within(within)(f(tp)) } override def countJournal: Future[Long] = readJournal .currentPersistenceIds() .asScala .filter(pid => (1 to 3).map(id => s"my-$id").contains(pid)) .mapAsync(1) { pid => readJournal .currentEventsByPersistenceId(pid, 0, Long.MaxValue) .asScala .map(_ => 1L) .runFold(List.empty[Long])(_ :+ _) .map(_.sum) } .runFold(List.empty[Long])(_ :+ _) .map(_.sum) } object QueryTestSpec { implicit final class EventEnvelopeProbeOps(val probe: TestSubscriber.Probe[EventEnvelope]) extends AnyVal { def expectNextEventEnvelope( persistenceId: String, sequenceNr: Long, event: Any): TestSubscriber.Probe[EventEnvelope] = { val env = probe.expectNext() assertEnvelope(env, persistenceId, sequenceNr, event) probe } def expectNextEventEnvelope( timeout: FiniteDuration, persistenceId: String, sequenceNr: Long, event: Any): TestSubscriber.Probe[EventEnvelope] = { val env = probe.expectNext(timeout) assertEnvelope(env, persistenceId, sequenceNr, event) probe } private def assertEnvelope(env: EventEnvelope, persistenceId: String, sequenceNr: Long, event: Any): Unit = { assert( env.persistenceId == persistenceId, s"expected persistenceId $persistenceId, found ${env.persistenceId}, in $env") assert(env.sequenceNr == sequenceNr, s"expected sequenceNr $sequenceNr, found ${env.sequenceNr}, in $env") assert(env.event == event, s"expected event $event, found ${env.event}, in $env") } } } abstract class QueryTestSpec(config: String, configOverrides: Map[String, ConfigValue] = Map.empty) extends SingleActorSystemPerTestSpec(config, configOverrides) { case class DeleteCmd(toSequenceNr: Long = Long.MaxValue) extends Serializable final val ExpectNextTimeout = 10.second class TestActor(id: Int, replyToMessages: Boolean) extends PersistentActor with Stash { override val persistenceId: String = "my-" + id var state: Int = 0 var snapshotSender: Option[ActorRef] = None override def receiveCommand: Receive = idle def idle: Receive = LoggingReceive { case "state" => sender() ! state case DeleteCmd(toSequenceNr) => deleteMessages(toSequenceNr) if (replyToMessages) { context.become(awaitingDeleting(sender())) } case event: Int => persist(event) { (event: Int) => updateState(event) if (replyToMessages) sender() ! akka.actor.Status.Success(event) } case event @ Tagged(payload: Int, tags) => persist(event) { _ => updateState(payload) if (replyToMessages) sender() ! akka.actor.Status.Success((payload, tags)) } case event: Event => persist(event) { evt => if (replyToMessages) sender() ! akka.actor.Status.Success(evt) } case event @ TaggedEvent(payload: Event, tag) => persist(event) { _ => if (replyToMessages) sender() ! akka.actor.Status.Success((payload, tag)) } case event @ TaggedAsyncEvent(payload: Event, tag) => persistAsync(event) { _ => if (replyToMessages) sender() ! akka.actor.Status.Success((payload, tag)) } case SaveSnapshotSuccess(_) => snapshotSender.foreach { sender => sender ! akka.actor.Status.Success(state) } case SaveSnapshotFailure(_, reason) => snapshotSender.foreach { sender => sender ! akka.actor.Status.Failure(reason) } case Snapshot => saveSnapshot(state) if (replyToMessages) snapshotSender = Some(sender()) } def awaitingDeleting(origSender: ActorRef): Receive = LoggingReceive { case DeleteMessagesSuccess(toSequenceNr) => origSender ! s"deleted-$toSequenceNr" unstashAll() context.become(idle) case DeleteMessagesFailure(ex, _) => origSender ! Status.Failure(ex) unstashAll() context.become(idle) // stash whatever other messages case _ => stash() } def updateState(event: Int): Unit = { state = state + event } override def receiveRecover: Receive = LoggingReceive { case event: Int => updateState(event) case SnapshotOffer(_, offeredSnapshot) => state = offeredSnapshot.asInstanceOf[Int] } } def setupEmpty(persistenceId: Int, replyToMessages: Boolean)(implicit system: ActorSystem): ActorRef = { system.actorOf(Props(new TestActor(persistenceId, replyToMessages))) } def withTestActors(seq: Int = 1, replyToMessages: Boolean = false)(f: (ActorRef, ActorRef, ActorRef) => Unit)( implicit system: ActorSystem): Unit = { val refs = (seq until seq + 3).map(setupEmpty(_, replyToMessages)).toList try { expectAllStarted(refs) f(refs.head, refs.drop(1).head, refs.drop(2).head) } finally killActors(refs: _*) } def withManyTestActors(amount: Int, seq: Int = 1, replyToMessages: Boolean = false)(f: Seq[ActorRef] => Unit)( implicit system: ActorSystem): Unit = { val refs = (seq until seq + amount).map(setupEmpty(_, replyToMessages)).toList try { expectAllStarted(refs) f(refs) } finally killActors(refs: _*) } def expectAllStarted(refs: Seq[ActorRef])(implicit system: ActorSystem): Unit = { // make sure we notice early if the actors failed to start (because of issues with journal) makes debugging // failing tests easier as we know it is not the actual interaction from the test that is the problem implicit val ec: ExecutionContext = system.dispatcher Future.sequence(refs.map(_ ? "state")).futureValue } def withTags(payload: Any, tags: String*) = Tagged(payload, Set(tags: _*)) } trait PostgresCleaner extends QueryTestSpec { def clearPostgres(): Unit = tables.foreach { name => withStatement(stmt => stmt.executeUpdate(s"DELETE FROM $name")) } override def beforeAll(): Unit = { dropAndCreate(Postgres) super.beforeAll() } override def beforeEach(): Unit = { dropAndCreate(Postgres) super.beforeEach() } } trait MysqlCleaner extends QueryTestSpec { def clearMySQL(): Unit = { withStatement { stmt => stmt.execute("SET FOREIGN_KEY_CHECKS = 0") tables.foreach { name => stmt.executeUpdate(s"TRUNCATE $name") } stmt.execute("SET FOREIGN_KEY_CHECKS = 1") } } override def beforeAll(): Unit = { dropAndCreate(MySQL) super.beforeAll() } override def beforeEach(): Unit = { clearMySQL() super.beforeEach() } } trait OracleCleaner extends QueryTestSpec { def clearOracle(): Unit = { tables.foreach { name => withStatement(stmt => stmt.executeUpdate(s"""DELETE FROM "$name" """)) } withStatement(stmt => stmt.executeUpdate("""BEGIN "reset_sequence"; END; """)) } override def beforeAll(): Unit = { dropAndCreate(Oracle) super.beforeAll() } override def beforeEach(): Unit = { clearOracle() super.beforeEach() } } trait SqlServerCleaner extends QueryTestSpec { var initial = true def clearSqlServer(): Unit = { val reset = if (initial) { initial = false 1 } else { 0 } withStatement { stmt => tables.foreach { name => stmt.executeUpdate(s"DELETE FROM $name") } stmt.executeUpdate(s"DBCC CHECKIDENT('${journalTableName}', RESEED, $reset)") } } override def beforeAll(): Unit = { dropAndCreate(SqlServer) super.beforeAll() } override def afterAll(): Unit = { dropAndCreate(SqlServer) super.afterAll() } override def beforeEach(): Unit = { clearSqlServer() super.beforeEach() } } trait H2Cleaner extends QueryTestSpec { def clearH2(): Unit = tables.foreach { name => withStatement(stmt => stmt.executeUpdate(s"DELETE FROM $name")) } override def beforeEach(): Unit = { dropAndCreate(H2) super.beforeEach() } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/TaggingEventAdapter.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query import akka.persistence.jdbc.query.TaggingEventAdapter.TagEvent import akka.persistence.journal.{ Tagged, WriteEventAdapter } object TaggingEventAdapter { case class TagEvent(payload: Any, tags: Set[String]) } /** * The TaggingEventAdapter will instruct persistence * to tag the received event. */ class TaggingEventAdapter extends WriteEventAdapter { override def manifest(event: Any): String = "" override def toJournal(event: Any): Any = event match { case TagEvent(payload, tags) => Tagged(payload, tags) case _ => event } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/dao/ReadJournalTablesTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query.dao.legacy import akka.persistence.jdbc.TablesTestSpec import akka.persistence.jdbc.journal.dao.legacy.JournalTables import slick.jdbc.JdbcProfile class ReadJournalTablesTest extends TablesTestSpec { val readJournalTableConfiguration = readJournalConfig.journalTableConfiguration object TestByteAReadJournalTables extends JournalTables { override val profile: JdbcProfile = slick.jdbc.PostgresProfile override val journalTableCfg = readJournalTableConfiguration } "JournalTable" should "be configured with a schema name" in { TestByteAReadJournalTables.JournalTable.baseTableRow.schemaName shouldBe readJournalTableConfiguration.schemaName } it should "be configured with a table name" in { TestByteAReadJournalTables.JournalTable.baseTableRow.tableName shouldBe readJournalTableConfiguration.tableName } it should "be configured with column names" in { val colName = toColumnName(readJournalTableConfiguration.tableName)(_) TestByteAReadJournalTables.JournalTable.baseTableRow.persistenceId.toString shouldBe colName( readJournalTableConfiguration.columnNames.persistenceId) TestByteAReadJournalTables.JournalTable.baseTableRow.sequenceNumber.toString shouldBe colName( readJournalTableConfiguration.columnNames.sequenceNumber) // TestByteAJournalTables.JournalTable.baseTableRow.tags.toString() shouldBe colName(journalTableConfiguration.columnNames.tags) } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/query/dao/TestProbeReadJournalDao.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.query.dao import akka.NotUsed import akka.persistence.jdbc.query.dao.TestProbeReadJournalDao.JournalSequence import akka.persistence.PersistentRepr import akka.stream.scaladsl.Source import akka.testkit.TestProbe import akka.util.Timeout import akka.pattern.ask import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.Try import akka.actor.Scheduler object TestProbeReadJournalDao { case class JournalSequence(offset: Long, limit: Long) } /** * Read journal dao where the journalSequence query is backed by a testprobe */ class TestProbeReadJournalDao(val probe: TestProbe) extends ReadJournalDao { // Since the testprobe is instrumented by the test, it should respond very fast implicit val askTimeout: Timeout = Timeout(100.millis) /** * Returns distinct stream of persistenceIds */ override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] = ??? /** * Returns a Source of bytes for certain tag from an offset. The result is sorted by * created time asc thus the offset is relative to the creation time */ override def eventsByTag( tag: String, offset: Long, maxOffset: Long, max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = ??? /** * Returns a Source of bytes for a certain persistenceId */ override def messages( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = ??? override def messagesWithBatch( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, batchSize: Int, refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[Try[(PersistentRepr, Long)], NotUsed] = ??? /** * @param offset Minimum value to retrieve * @param limit Maximum number of values to retrieve * @return A Source of journal event sequence numbers (corresponding to the Ordering column) */ override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] = { val f = probe.ref.ask(JournalSequence(offset, limit)).mapTo[scala.collection.immutable.Seq[Long]] Source.future(f).mapConcat(identity) } /** * @return The value of the maximum (ordering) id in the journal */ override def maxJournalSequence(): Future[Long] = Future.successful(0) } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/serialization/StoreOnlySerializableMessagesTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.serialization import scala.concurrent.duration._ import akka.actor.ActorRef import akka.actor.Props import akka.event.LoggingReceive import akka.persistence.PersistentActor import akka.persistence.RecoveryCompleted import akka.persistence.jdbc.SharedActorSystemTestSpec import akka.persistence.jdbc.testkit.internal.H2 import akka.persistence.jdbc.testkit.internal.SchemaType import akka.testkit.TestProbe abstract class StoreOnlySerializableMessagesTest(config: String, schemaType: SchemaType) extends SharedActorSystemTestSpec(config) { case class PersistFailure(cause: Throwable, event: Any, seqNr: Long) case class PersistRejected(cause: Throwable, event: Any, seqNr: Long) class TestActor( val persistenceId: String, recoverProbe: ActorRef, persistFailureProbe: ActorRef, persistRejectedProbe: ActorRef) extends PersistentActor { override val receiveRecover: Receive = LoggingReceive { case msg => recoverProbe ! msg } override val receiveCommand: Receive = LoggingReceive { case msg => persist(msg) { _ => sender() ! akka.actor.Status.Success("") } } override protected def onPersistFailure(cause: Throwable, event: Any, seqNr: Long): Unit = persistFailureProbe ! PersistFailure(cause, event, seqNr) override protected def onPersistRejected(cause: Throwable, event: Any, seqNr: Long): Unit = persistRejectedProbe ! PersistRejected(cause, event, seqNr) } def withActor(id: String = "1")(f: ActorRef => TestProbe => TestProbe => TestProbe => Unit): Unit = { val recoverProbe = TestProbe() val persistFailureProbe = TestProbe() val persistRejectedProbe = TestProbe() val persistentActor = system.actorOf( Props(new TestActor(s"my-$id", recoverProbe.ref, persistFailureProbe.ref, persistRejectedProbe.ref))) try f(persistentActor)(recoverProbe)(persistFailureProbe)(persistRejectedProbe) finally killActors(persistentActor) } override def beforeAll(): Unit = { dropAndCreate(schemaType) super.beforeAll() } it should "persist a single serializable message" in { withActor("1") { actor => recover => failure => rejected => val tp = TestProbe() recover.expectMsg(RecoveryCompleted) tp.send(actor, "foo") // strings are serializable tp.expectMsg(akka.actor.Status.Success("")) failure.expectNoMessage(100.millis) rejected.expectNoMessage(100.millis) } // the recover cycle withActor("1") { _ => recover => failure => rejected => recover.expectMsg("foo") recover.expectMsg(RecoveryCompleted) failure.expectNoMessage(100.millis) rejected.expectNoMessage(100.millis) } } it should "not persist a single non-serializable message" in { class NotSerializable withActor("2") { actor => recover => _ => rejected => val tp = TestProbe() recover.expectMsg(RecoveryCompleted) tp.send(actor, new NotSerializable) // the NotSerializable class cannot be serialized tp.expectNoMessage(300.millis) // the handler should not have been called, because persist has failed // the actor should call the OnPersistRejected rejected.expectMsgPF() { case PersistRejected(_, _, _) => } } // the recover cycle, no message should be recovered withActor("2") { _ => recover => _ => _ => recover.expectMsg(RecoveryCompleted) recover.expectNoMessage(100.millis) } } it should "persist only serializable messages" in { class NotSerializable withActor("3") { actor => recover => _ => rejected => val tp = TestProbe() recover.expectMsg(RecoveryCompleted) tp.send(actor, "foo") tp.expectMsg(akka.actor.Status.Success("")) tp.send(actor, new NotSerializable) // the NotSerializable class cannot be serialized tp.expectNoMessage(300.millis) // the handler should not have been called, because persist has failed // the actor should call the OnPersistRejected rejected.expectMsgPF() { case PersistRejected(_, _, _) => } rejected.expectNoMessage(100.millis) } // recover cycle withActor("3") { _ => recover => failure => rejected => recover.expectMsg("foo") recover.expectMsg(RecoveryCompleted) failure.expectNoMessage(100.millis) rejected.expectNoMessage(100.millis) } } } class H2StoreOnlySerializableMessagesTest extends StoreOnlySerializableMessagesTest("h2-application.conf", H2) ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/snapshot/JdbcSnapshotStoreSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot import akka.persistence.CapabilityFlag import akka.persistence.jdbc.config._ import akka.persistence.jdbc.util.{ ClasspathResources, DropCreate } import akka.persistence.jdbc.db.SlickDatabase import akka.persistence.snapshot.SnapshotStoreSpec import com.typesafe.config.{ Config, ConfigFactory } import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures import scala.concurrent.duration._ import akka.persistence.jdbc.testkit.internal.H2 import akka.persistence.jdbc.testkit.internal.SchemaType import scala.concurrent.ExecutionContext abstract class JdbcSnapshotStoreSpec(config: Config, schemaType: SchemaType) extends SnapshotStoreSpec(config) with BeforeAndAfterAll with ScalaFutures with ClasspathResources with DropCreate { implicit val pc: PatienceConfig = PatienceConfig(timeout = 10.seconds) implicit lazy val ec: ExecutionContext = system.dispatcher lazy val cfg = system.settings.config.getConfig("jdbc-journal") lazy val journalConfig = new JournalConfig(cfg) lazy val db = SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig("slick")), "slick.db") protected override def supportsSerialization: CapabilityFlag = newDao protected override def supportsMetadata: CapabilityFlag = newDao override def beforeAll(): Unit = { dropAndCreate(schemaType) super.beforeAll() } override def afterAll(): Unit = { db.close() } } class H2SnapshotStoreSpec extends JdbcSnapshotStoreSpec(ConfigFactory.load("h2-application.conf"), H2) ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/snapshot/dao/legacy/SnapshotTablesTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.snapshot.dao.legacy import akka.persistence.jdbc.TablesTestSpec import slick.jdbc.JdbcProfile class SnapshotTablesTest extends TablesTestSpec { val snapshotTableConfiguration = snapshotConfig.legacySnapshotTableConfiguration object TestByteASnapshotTables extends SnapshotTables { override val profile: JdbcProfile = slick.jdbc.PostgresProfile override val snapshotTableCfg = snapshotTableConfiguration } "SnapshotTable" should "be configured with a schema name" in { TestByteASnapshotTables.SnapshotTable.baseTableRow.schemaName shouldBe snapshotTableConfiguration.schemaName } it should "be configured with a table name" in { TestByteASnapshotTables.SnapshotTable.baseTableRow.tableName shouldBe snapshotTableConfiguration.tableName } it should "be configured with column names" in { val colName = toColumnName(snapshotTableConfiguration.tableName)(_) TestByteASnapshotTables.SnapshotTable.baseTableRow.persistenceId.toString shouldBe colName( snapshotTableConfiguration.columnNames.persistenceId) TestByteASnapshotTables.SnapshotTable.baseTableRow.sequenceNumber.toString shouldBe colName( snapshotTableConfiguration.columnNames.sequenceNumber) TestByteASnapshotTables.SnapshotTable.baseTableRow.created.toString shouldBe colName( snapshotTableConfiguration.columnNames.created) TestByteASnapshotTables.SnapshotTable.baseTableRow.snapshot.toString shouldBe colName( snapshotTableConfiguration.columnNames.snapshot) } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/state/Payloads.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state import akka.serialization._ final case class MyPayload(data: String) class MyPayloadSerializer extends Serializer { val MyPayloadClass = classOf[MyPayload] def identifier: Int = 77123 def includeManifest: Boolean = true def toBinary(o: AnyRef): Array[Byte] = o match { case MyPayload(data) => s"${data}".getBytes("UTF-8") case _ => throw new Exception("Unknown object for serialization") } def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = manifest match { case Some(MyPayloadClass) => MyPayload(s"${new String(bytes, "UTF-8")}") case Some(c) => throw new Exception(s"unexpected manifest ${c}") case None => throw new Exception("no manifest") } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state import scala.concurrent.Future import akka.actor.ActorSystem import akka.Done import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import scala.annotation.nowarn @nowarn("msg=never used") object ScaladslSnippets extends ScalaFutures with Matchers { def create(): Unit = { // #create import akka.persistence.jdbc.testkit.scaladsl.SchemaUtils implicit val system: ActorSystem = ActorSystem("example") val _: Future[Done] = SchemaUtils.createIfNotExists() // #create } def durableStatePlugin(): Unit = { implicit val system: ActorSystem = ActorSystem() // #jdbc-durable-state-store import akka.persistence.state.DurableStateStoreRegistry import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore val store = DurableStateStoreRegistry .get(system) .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier) // #jdbc-durable-state-store } def getObject(): Unit = { implicit val system: ActorSystem = ActorSystem() // #get-object import akka.persistence.state.DurableStateStoreRegistry import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore import akka.persistence.state.scaladsl.GetObjectResult val store = DurableStateStoreRegistry .get(system) .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier) val futureResult: Future[GetObjectResult[String]] = store.getObject("InvalidPersistenceId") futureResult.futureValue.value shouldBe None // #get-object } def upsertAndGetObject(): Unit = { implicit val system: ActorSystem = ActorSystem() implicit val e = system.dispatcher // #upsert-get-object import akka.persistence.state.DurableStateStoreRegistry import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore import akka.persistence.state.scaladsl.GetObjectResult val store = DurableStateStoreRegistry .get(system) .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier) val v: Future[GetObjectResult[String]] = for { n <- store.upsertObject("p234", 1, "a valid string", "t123") _ = n shouldBe akka.Done g <- store.getObject("p234") _ = g.value shouldBe Some("a valid string") u <- store.upsertObject("p234", 2, "updated valid string", "t123") _ = u shouldBe akka.Done h <- store.getObject("p234") } yield h v.futureValue.value shouldBe Some("updated valid string") // #upsert-get-object } def deleteObject(): Unit = { implicit val system: ActorSystem = ActorSystem() // #delete-object import akka.persistence.state.DurableStateStoreRegistry import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore val store = DurableStateStoreRegistry .get(system) .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier) store.deleteObject("p123", 0L).futureValue shouldBe Done store.getObject("p123").futureValue.value shouldBe None // #delete-object } def currentChanges(): Unit = { implicit val system: ActorSystem = ActorSystem() // #current-changes import akka.NotUsed import akka.stream.scaladsl.Source import akka.persistence.state.DurableStateStoreRegistry import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore import akka.persistence.query.{ DurableStateChange, NoOffset } val store = DurableStateStoreRegistry .get(system) .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier) val willCompleteTheStream: Source[DurableStateChange[String], NotUsed] = store.currentChanges("tag-1", NoOffset) // #current-changes } def changes(): Unit = { implicit val system: ActorSystem = ActorSystem() // #changes import akka.NotUsed import akka.stream.scaladsl.Source import akka.persistence.state.DurableStateStoreRegistry import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore import akka.persistence.query.{ DurableStateChange, NoOffset } val store = DurableStateStoreRegistry .get(system) .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier) val willNotCompleteTheStream: Source[DurableStateChange[String], NotUsed] = store.changes("tag-1", NoOffset) // #changes } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/state/scaladsl/DataGenerationHelper.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state.scaladsl import org.scalatest.concurrent.ScalaFutures import scala.concurrent.{ ExecutionContext, Future } trait DataGenerationHelper extends ScalaFutures { implicit def defaultPatience: PatienceConfig // upsert multiple records for 1 persistence id def upsertManyForOnePersistenceId( store: JdbcDurableStateStore[String], persistenceId: String, tag: String, startIndex: Int, n: Int) = { (startIndex until startIndex + n).map { c => store.upsertObject(persistenceId, c, s"$c valid string", tag).futureValue } } // upsert multiple records for 1 persistence id def upsertForManyDifferentPersistenceIds( store: JdbcDurableStateStore[String], persistenceIdPrefix: String, revision: Int, tag: String, startIndex: Int, n: Int) = { (startIndex until startIndex + n).map { c => store.upsertObject(s"$persistenceIdPrefix-$c", revision, s"$c valid string", tag).futureValue } } private def times(n: Int, ls: List[String]) = ls.flatMap { List.fill(n)(_) } // upsert multiple records for a random shuffle of a list of persistence ids def upsertRandomlyShuffledPersistenceIds( store: JdbcDurableStateStore[String], persistenceIds: List[String], tag: String, replicationFactor: Int) = { val allPersistenceIds = scala.util.Random.shuffle(times(replicationFactor, persistenceIds)) val m = collection.mutable.Map.empty[String, Long] allPersistenceIds.map { p => m.get(p) .fold { val _ = store.upsertObject(p, 1, s"1 valid string", tag).futureValue m += ((p, 1)) } { seq => { val _ = store.upsertObject(p, seq + 1, s"${seq + 1} valid string", tag).futureValue m += ((p, seq + 1)) } } } } def upsertParallel(store: JdbcDurableStateStore[String], pids: Set[String], tag: String, noOfItems: Int)( implicit ec: ExecutionContext) = { for { _ <- Future.unit f1 = Future(upsertManyForOnePersistenceId(store, pids.head, tag, 1, noOfItems)) f2 = Future(upsertManyForOnePersistenceId(store, pids.tail.head, tag, 1, noOfItems)) f3 = Future(upsertManyForOnePersistenceId(store, pids.last, tag, 1, noOfItems)) _ <- f1 _ <- f2 _ <- f3 } yield (()) } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/state/scaladsl/DurableStateSequenceActorTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state.scaladsl import scala.concurrent.Future import scala.concurrent.duration._ import com.typesafe.config.{ Config, ConfigFactory } import akka.actor.{ ActorRef, ActorSystem, ExtendedActorSystem } import akka.pattern.ask import akka.persistence.jdbc.SharedActorSystemTestSpec import akka.persistence.jdbc.state.scaladsl.DurableStateSequenceActor.VisitedElement import akka.persistence.jdbc.state.scaladsl.DurableStateSequenceActor.{ GetMaxGlobalOffset, MaxGlobalOffset } import akka.persistence.jdbc.testkit.internal.{ H2, SchemaType } import akka.testkit.TestProbe import akka.util.Timeout import org.scalatest.concurrent.Eventually abstract class DurableStateSequenceActorTest(config: Config, schemaType: SchemaType) extends StateSpecBase(config, schemaType) with DataGenerationHelper with Eventually { val durableStateSequenceActorConfig = durableStateConfig.stateSequenceConfig implicit val askTimeout: FiniteDuration = 50.millis implicit val timeout: Timeout = Timeout(1.minute) "A DurableStateSequenceActor" must { "recover normally" in { withActorSystem { implicit system => val store = new JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) upsertForManyDifferentPersistenceIds(store, "pid", 1, "t1", 1, 400).size shouldBe 400 withDurableStateSequenceActor(store, maxTries = 100) { actor => eventually { actor.ask(GetMaxGlobalOffset).mapTo[MaxGlobalOffset].futureValue shouldBe MaxGlobalOffset(400) } } } } } /** * @param maxTries The number of tries before events are assumed missing * (since the actor queries every second by default, * this is effectively the number of seconds after which events are assumed missing) */ def withDurableStateSequenceActor(store: JdbcDurableStateStore[String], maxTries: Int)(f: ActorRef => Unit)( implicit system: ActorSystem): Unit = { val actor = system.actorOf(DurableStateSequenceActor.props(store, durableStateSequenceActorConfig.copy(maxTries = maxTries))) try f(actor) finally system.stop(actor) } } class MockDurableStateSequenceActorTest extends SharedActorSystemTestSpec { def fetchMaxGlobalOffset(durableStateSequenceActor: ActorRef): Future[Long] = { durableStateSequenceActor.ask(GetMaxGlobalOffset)(3.seconds).mapTo[MaxGlobalOffset].map(_.maxOffset) } it should "re-query with delay only when events are missing" in { val batchSize = 100 val maxTries = 5 val queryDelay = 300.millis val almostQueryDelay = queryDelay - 50.millis val almostImmediately = 50.millis withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, _) => daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize)) val firstBatch = ((1L to 40L) ++ (51L to 110L)).map(n => VisitedElement(s"pid-$n", n, 1)) daoProbe.reply(firstBatch) withClue(s"when events are missing, the actor should wait for $queryDelay before querying again") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(almostQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(40, batchSize)) } // number 41 is still missing after this batch val secondBatch = (42L to 110L).map(n => VisitedElement(s"pid-$n", n, 1)) daoProbe.reply(secondBatch) withClue(s"when events are missing, the actor should wait for $queryDelay before querying again") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(almostQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(40, batchSize)) } val thirdBatch = (41L to 110L).map(n => VisitedElement(s"pid-$n", n, 1)) daoProbe.reply(thirdBatch) withClue( s"when no more events are missing, but less that batchSize elemens have been received, " + s"the actor should wait for $queryDelay before querying again") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(almostQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(110, batchSize)) } val fourthBatch = (111L to 210L).map(n => VisitedElement(s"pid-$n", n, 1)) daoProbe.reply(fourthBatch) withClue( "When no more events are missing and the number of events received is equal to batchSize, " + "the actor should query again immediately") { daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(210, batchSize)) } // Reply to prevent a dead letter warning on the timeout daoProbe.reply(Seq.empty) daoProbe.expectNoMessage(almostImmediately) } } it should "Assume an element missing after the configured amount of maxTries" in { val batchSize = 100 val maxTries = 5 val queryDelay = 300.millis val slightlyMoreThanQueryDelay = queryDelay + 100.millis val almostImmediately = 50.millis val allIds = ((1L to 40L) ++ (43L to 200L)).map(n => VisitedElement(s"pid-$n", n, 1)) withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, actor) => daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize)) daoProbe.reply(allIds.take(100)) val idsLargerThan40 = allIds.dropWhile(_.offset <= 40) val retryResponse = idsLargerThan40.take(100) for (i <- 1 to maxTries) withClue(s"should retry $maxTries times (attempt $i)") { daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(40, batchSize)) daoProbe.reply(retryResponse) } // sanity check retryResponse.last.offset shouldBe 142 withClue( "The elements 41 and 42 should be assumed missing, " + "the actor should query again immediately since a full batch has been received") { daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(142, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 142 } // Reply to prevent a dead letter warning on the timeout daoProbe.reply(Seq.empty) daoProbe.expectNoMessage(almostImmediately) } } it should "not delay several updates to known pid" in { val batchSize = 7 val maxTries = 5 val queryDelay = 300.millis import DurableStateSequenceActor.VisitedElement val almostQueryDelay = queryDelay - 50.millis val almostImmediately = 50.millis withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, actor) => daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize)) val firstBatch = List(VisitedElement("p1", 1, 1), VisitedElement("p2", 2, 1), VisitedElement("p3", 3, 1)) daoProbe.reply(firstBatch) withClue(s"when offsets are not missing ") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(queryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(3, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 3 } // two updates to p3 val secondBatch = List(VisitedElement("p1", 4, 2), VisitedElement("p2", 5, 2), VisitedElement("p3", 7, 3)) daoProbe.reply(secondBatch) withClue(s"when several updates to known pid ") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(queryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(7, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 7 } // five updates to p2 and three to p3 val thirdBatch = List(VisitedElement("p1", 8, 3), VisitedElement("p2", 13, 7), VisitedElement("p3", 16, 6)) daoProbe.reply(thirdBatch) withClue(s"when several updates to known pid ") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(queryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(16, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 16 } // Reply to prevent a dead letter warning on the timeout daoProbe.reply(Seq.empty) daoProbe.expectNoMessage(almostImmediately) } } it should "not delay more complex updates from several pids" in { val batchSize = 7 val maxTries = 5 val queryDelay = 300.millis import DurableStateSequenceActor.VisitedElement val almostQueryDelay = queryDelay - 50.millis val almostImmediately = 50.millis val slightlyMoreThanQueryDelay = queryDelay + 100.millis withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, actor) => daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize)) val firstBatch = List(VisitedElement("p1", 1, 1), VisitedElement("p2", 2, 1), VisitedElement("p3", 3, 1)) daoProbe.reply(firstBatch) withClue(s"when offsets are not missing ") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(queryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(3, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 3 } // updates like this: // p1, 4, 2 <<< // p2, 5, 2 // p2, 6, 3 // p2, 7, 4 // p3, 8, 2 // p3, 9, 3 // p3, 10, 4 <<< // p2, 11, 5 <<< val secondBatch = List(VisitedElement("p1", 4, 2), VisitedElement("p3", 10, 4), VisitedElement("p2", 11, 5)) daoProbe.reply(secondBatch) daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(11, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 11 // Reply to prevent a dead letter warning on the timeout daoProbe.reply(Seq.empty) daoProbe.expectNoMessage(almostImmediately) } } it should "re-query for unknown pid" in { val batchSize = 7 val maxTries = 5 val queryDelay = 300.millis import DurableStateSequenceActor.VisitedElement val almostQueryDelay = queryDelay - 50.millis val almostImmediately = 50.millis val slightlyMoreThanQueryDelay = queryDelay + 100.millis withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, actor) => daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize)) val firstBatch = List(VisitedElement("p1", 1, 1)) daoProbe.reply(firstBatch) withClue(s"when offsets are not missing ") { daoProbe.expectNoMessage(almostQueryDelay) daoProbe.expectMsg(queryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(1, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 1 } // updates like this: // p1, 2, 2 <<< // p2, 3, 2 // p2, 4, 3 <<< // p3, 5, 2 // p3, 6, 3 // p3, 7, 4 <<< val secondBatch = List(VisitedElement("p1", 2, 2), VisitedElement("p2", 4, 3), VisitedElement("p3", 7, 4)) daoProbe.reply(secondBatch) for (i <- 1 to maxTries) withClue(s"when updates to unknown pid, attempt $i ") { val expectedQueryOffset = 2 // p1 offset 2 is ok daoProbe.expectMsg( slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(expectedQueryOffset, batchSize)) daoProbe.reply(secondBatch) } daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(7, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 7 // two updates for p2 and p3 but now they are known and gaps can be filled val thirdBatch = List(VisitedElement("p2", 9, 5), VisitedElement("p3", 11, 6)) daoProbe.reply(thirdBatch) daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(11, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 11 // Reply to prevent a dead letter warning on the timeout daoProbe.reply(Seq.empty) daoProbe.expectNoMessage(almostImmediately) } } it should "evict revision cache when exceeding capacity" in { val batchSize = 100 val maxTries = 5 val queryDelay = 300.millis import DurableStateSequenceActor.VisitedElement val almostImmediately = 50.millis val slightlyMoreThanQueryDelay = queryDelay + 100.millis withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay, revisionCacheCapacity = 5) { (daoProbe, actor) => daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize)) val firstBatch = List( VisitedElement("p1", 1, 1), VisitedElement("p2", 2, 1), VisitedElement("p3", 3, 1), VisitedElement("p4", 4, 1), VisitedElement("p5", 5, 1)) daoProbe.reply(firstBatch) withClue(s"when offsets are not missing ") { daoProbe.expectMsg( slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(5, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 5 } // pids in cache val secondBatch = List( VisitedElement("p1", 7, 3), VisitedElement("p2", 9, 3), VisitedElement("p3", 11, 3), VisitedElement("p4", 13, 3)) daoProbe.reply(secondBatch) withClue(s"when offsets are not missing ") { daoProbe.expectMsg( slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(13, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 13 } // exceeding cache capacity of 5, p1, p2, p5 will be evicted because lowest offset val thirdBatch = List(VisitedElement("p4", 15, 5), VisitedElement("p6", 16, 1), VisitedElement("p7", 17, 1)) daoProbe.reply(thirdBatch) withClue(s"when offsets are not missing ") { daoProbe.expectMsg( slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(17, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 17 } // p1, p2, p5 were evicted because lowest offset val fourthBatch = List(VisitedElement("p2", 19, 5), VisitedElement("p1", 21, 5), VisitedElement("p5", 23, 3)) daoProbe.reply(fourthBatch) for (i <- 1 to maxTries) withClue(s"when updates to unknown pid, attempt $i ") { daoProbe .expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(17, batchSize)) daoProbe.reply(fourthBatch) } daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(23, batchSize)) fetchMaxGlobalOffset(actor).futureValue shouldBe 23 // Reply to prevent a dead letter warning on the timeout daoProbe.reply(Seq.empty) daoProbe.expectNoMessage(almostImmediately) } } import akka.persistence.jdbc.config.DurableStateTableConfiguration def withTestProbeDurableStateSequenceActor( batchSize: Int, maxTries: Int, queryDelay: FiniteDuration, revisionCacheCapacity: Int = 10000)(f: (TestProbe, ActorRef) => Unit)(implicit system: ActorSystem): Unit = { val testProbe = TestProbe() val customConfig = ConfigFactory.parseString(s""" jdbc-durable-state-store { batchSize = $batchSize refreshInterval = 500.milliseconds durable-state-sequence-retrieval { query-delay = $queryDelay max-tries = $maxTries max-backoff-query-delay = 4.seconds ask-timeout = 100.millis batch-size = $batchSize revision-cache-capacity = $revisionCacheCapacity } } """) lazy val cfg = customConfig .getConfig("jdbc-durable-state-store") .withFallback(system.settings.config.getConfig("jdbc-durable-state-store")) .withFallback(ConfigFactory.load("h2-application.conf").getConfig("jdbc-durable-state-store")) val stateTableConfig = new DurableStateTableConfiguration(cfg) val mockStore = new TestProbeDurableStateStoreQuery(testProbe, db, slick.jdbc.H2Profile, stateTableConfig, serialization)( system.asInstanceOf[ExtendedActorSystem]) val actor = system.actorOf( DurableStateSequenceActor .props(mockStore.asInstanceOf[JdbcDurableStateStore[String]], stateTableConfig.stateSequenceConfig)) try f(testProbe, actor) finally system.stop(actor) } } class H2DurableStateSequenceActorTest extends DurableStateSequenceActorTest(ConfigFactory.load("h2-application.conf"), H2) { implicit lazy val system: ActorSystem = ActorSystem("test", config.withFallback(customSerializers)) } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/state/scaladsl/DurableStateStorePluginSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state.scaladsl import com.typesafe.config.{ Config, ConfigFactory } import akka.actor._ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore import akka.persistence.state.DurableStateStoreRegistry import slick.jdbc.{ H2Profile, JdbcProfile } abstract class DurableStateStorePluginSpec(config: Config, profile: JdbcProfile) extends AnyWordSpecLike with BeforeAndAfterAll with Matchers with ScalaFutures { implicit lazy val system: ExtendedActorSystem = ActorSystem("test", config).asInstanceOf[ExtendedActorSystem] "A durable state store plugin" must { "instantiate a JdbcDurableDataStore successfully" in { val store = DurableStateStoreRegistry .get(system) .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier) store shouldBe a[JdbcDurableStateStore[_]] store.system.settings.config shouldBe system.settings.config store.profile shouldBe profile } "instantiate another JdbcDurableDataStore successfully" in { val store1 = DurableStateStoreRegistry .get(system) .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier) val store2 = DurableStateStoreRegistry .get(system) .durableStateStoreFor[JdbcDurableStateStore[String]]("another-jdbc-durable-state-store") store1.configPath shouldBe JdbcDurableStateStore.Identifier store2.configPath shouldBe "another-jdbc-durable-state-store" } } override def afterAll(): Unit = { system.terminate().futureValue } } class H2DurableStateStorePluginSpec extends DurableStateStorePluginSpec(ConfigFactory.load("h2-application.conf"), H2Profile) ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/state/scaladsl/JdbcDurableStateSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state.scaladsl import com.typesafe.config.{ Config, ConfigFactory } import akka.actor._ import akka.persistence.jdbc.state.{ MyPayload, OffsetSyntax } import OffsetSyntax._ import akka.persistence.jdbc.testkit.internal.{ H2, Postgres, SchemaType } import akka.persistence.query.UpdatedDurableState import akka.persistence.query.{ NoOffset, Offset, Sequence } import akka.stream.scaladsl.Sink import org.scalatest.time.Millis import org.scalatest.time.Seconds import org.scalatest.time.Span import scala.annotation.nowarn import scala.concurrent.Future @nowarn("msg=deprecated") abstract class JdbcDurableStateSpec(config: Config, schemaType: SchemaType) extends StateSpecBase(config, schemaType) { override implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(60, Seconds), interval = Span(100, Millis)) "A durable state store" must withActorSystem { implicit system => val stateStoreString = new JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) "not load a state given an invalid persistenceId" in { whenReady { stateStoreString.getObject("InvalidPersistenceId") } { v => v.value shouldBe None } } "add a valid state successfully" in { whenReady { stateStoreString.upsertObject("p123", 1, "a valid string", "t123") } { v => v shouldBe akka.Done } } "support composite upsert-fetch-repeat loop" in { whenReady { for { n <- stateStoreString.upsertObject("p234", 1, "a valid string", "t123") _ = n shouldBe akka.Done g <- stateStoreString.getObject("p234") _ = g.value shouldBe Some("a valid string") u <- stateStoreString.upsertObject("p234", 2, "updated valid string", "t123") _ = u shouldBe akka.Done h <- stateStoreString.getObject("p234") } yield h } { v => v.value shouldBe Some("updated valid string") } } "fail inserting an already existing sequence number" in { whenReady { (for { n <- stateStoreString.upsertObject("p345", 1, "a valid string", "t123") _ = n shouldBe akka.Done g <- stateStoreString.getObject("p345") _ = g.value shouldBe Some("a valid string") u <- stateStoreString.upsertObject("p345", 1, "updated valid string", "t123") } yield u).failed } { e => schemaType match { case H2 => e shouldBe an[org.h2.jdbc.JdbcSQLIntegrityConstraintViolationException] case Postgres => e shouldBe an[org.postgresql.util.PSQLException] case _ => ??? } } } "fail inserting incorrect sequence number with 0 rows affected" in { whenReady { stateStoreString.upsertObject("p234", 1, "1 valid string", "t1").futureValue stateStoreString.upsertObject("p234", 2, "2 valid string", "t1").futureValue stateStoreString.upsertObject("p234", 3, "3 valid string", "t1").futureValue stateStoreString.upsertObject("p234", 5, "5 valid string", "t1").failed } { e => e shouldBe an[IllegalStateException] // offset should not change stateStoreString.maxStateStoreOffset().futureValue shouldBe 3 // sequence number should not change stateStoreString.getObject("p234").futureValue.revision shouldBe 3 } } "delete an existing state" in { whenReady { stateStoreString.deleteObject("p123") } { v => v shouldBe akka.Done whenReady { stateStoreString.getObject("p123") } { v => v.value shouldBe None } } } } "A durable state store with payload that needs custom serializer" must withActorSystem { implicit system => val stateStorePayload = new JdbcDurableStateStore[MyPayload]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) "not load a state given an invalid persistenceId" in { whenReady { stateStorePayload.getObject("InvalidPersistenceId") } { v => v.value shouldBe None } } "add a valid state successfully" in { whenReady { stateStorePayload.upsertObject("p123", 1, MyPayload("a valid string"), "t123") } { v => v shouldBe akka.Done } } "support composite upsert-fetch-repeat loop" in { whenReady { for { n <- stateStorePayload.upsertObject("p234", 1, MyPayload("a valid string"), "t123") _ = n shouldBe akka.Done g <- stateStorePayload.getObject("p234") _ = g.value shouldBe Some(MyPayload("a valid string")) u <- stateStorePayload.upsertObject("p234", 2, MyPayload("updated valid string"), "t123") _ = u shouldBe akka.Done h <- stateStorePayload.getObject("p234") } yield h } { v => v.value shouldBe Some(MyPayload("updated valid string")) } } "delete an existing state" in { whenReady { stateStorePayload.deleteObject("p234") } { v => v shouldBe akka.Done whenReady { stateStorePayload.getObject("p234") } { v => v.value shouldBe None } } } } "A JDBC durable state store" must { "find all states by tag either from the beginning or from a specific offset" in withActorSystem { implicit system => val stateStoreString = new JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) import stateStoreString._ // fetch from beginning upsertManyForOnePersistenceId(stateStoreString, "p1", "t1", 1, 4) val chgs = currentChanges("t1", NoOffset).runWith(Sink.seq).futureValue chgs.size shouldBe 1 chgs.map(_.offset.value).max shouldBe 4 // upsert more and fetch from after the last offset upsertManyForOnePersistenceId(stateStoreString, "p1", "t1", 5, 7) val moreChgs = currentChanges("t1", chgs.head.offset).runWith(Sink.seq).futureValue moreChgs.size shouldBe 1 moreChgs.map(_.offset.value).max shouldBe 11 // upsert same tag, different persistence id and fetch from after the last offset upsertManyForOnePersistenceId(stateStoreString, "p2", "t1", 1, 3) val otherChgs = currentChanges("t1", moreChgs.head.offset).runWith(Sink.seq).futureValue otherChgs.size shouldBe 1 otherChgs.map(_.offset.value).max shouldBe 14 // again fetch from the beginning val cs = currentChanges("t1", NoOffset).runWith(Sink.seq).futureValue cs.size shouldBe 2 cs.map(_.offset.value).max shouldBe 14 } "find the max offset after a series of upserts with multiple persistence ids" in withActorSystem { implicit system => val stateStoreString = new JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) import stateStoreString._ upsertRandomlyShuffledPersistenceIds(stateStoreString, List("p1", "p2", "p3"), "t1", 3) val chgs = currentChanges("t1", NoOffset).runWith(Sink.seq).futureValue chgs.size shouldBe 3 chgs.map(_.offset.value).max shouldBe 9 } "find all states by tags with offsets sorted and proper max and min offsets when starting offset is specified" in withActorSystem { implicit system => val stateStoreString = new JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) import stateStoreString._ upsertRandomlyShuffledPersistenceIds(stateStoreString, List("p1", "p2", "p3"), "t1", 3) val chgs = stateStoreString.currentChanges("t1", Offset.sequence(7)).runWith(Sink.seq).futureValue chgs.map(_.offset.value) shouldBe sorted chgs.map(_.offset.value).max shouldBe 9 chgs.map(_.offset.value).min should be > 7L } "find all states by tags returning a live source with no offset specified" in withActorSystem { implicit system => val stateStoreString = new JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) import stateStoreString._ upsertRandomlyShuffledPersistenceIds(stateStoreString, List("p1", "p2", "p3"), "t1", 3) val source = stateStoreString.changes("t1", NoOffset) val m = collection.mutable.ListBuffer.empty[(String, Long)] // trick to complete the future val f = source .takeWhile { e => m += ((e.persistenceId, e.offset.value)) e.offset.value < 12 } .runWith(Sink.seq) // more data after some delay Thread.sleep(100) upsertObject("p3", 4, "4 valid string", "t2").futureValue upsertObject("p2", 4, "4 valid string", "t1").futureValue upsertObject("p1", 4, "4 valid string", "t1").futureValue whenReady(f) { _ => m.size shouldBe 2 m.toList.map(_._2) shouldBe sorted m.toList.map(_._2).max shouldBe 12 } } "find all states by tags returning a live source with a starting offset specified" in withActorSystem { implicit system => val stateStoreString = new JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) import stateStoreString._ upsertRandomlyShuffledPersistenceIds(stateStoreString, List("p1", "p2", "p3"), "t1", 3) val source = stateStoreString.changes("t1", Sequence(4)) val m = collection.mutable.ListBuffer.empty[(String, Long)] // trick to complete the future val f = source .takeWhile { e => m += ((e.persistenceId, e.offset.value)) e.offset.value < 12 } .runWith(Sink.seq) // more data after some delay Thread.sleep(100) upsertManyForOnePersistenceId(stateStoreString, "p3", "t1", 4, 3) whenReady(f) { _ => m.map(_._2) shouldBe sorted m.map(_._2).min should be > 4L m.map(_._2).max shouldBe 12 } } } "A JDBC durable state store in the face of parallel upserts" must { "fetch proper values of offsets with currentChanges()" in withActorSystem { implicit system => val stateStoreString = new JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) import stateStoreString._ upsertParallel(stateStoreString, Set("p1", "p2", "p3"), "t1", 1000)(e).futureValue whenReady { currentChanges("t1", NoOffset) .collect { case u: UpdatedDurableState[String] => u } .runWith(Sink.seq): Future[Seq[UpdatedDurableState[String]]] } { chgs => chgs.map(_.offset.value) shouldBe sorted chgs.map(_.offset.value).max shouldBe 3000 } whenReady { currentChanges("t1", Sequence(2000)) .collect { case u: UpdatedDurableState[String] => u } .runWith(Sink.seq): Future[Seq[UpdatedDurableState[String]]] } { chgs => chgs.map(_.offset.value) shouldBe sorted chgs.map(_.offset.value).max shouldBe 3000 } } "fetch proper values of offsets from beginning with changes() and phased upserts" in withActorSystem { implicit system => val stateStoreString = new JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) import stateStoreString._ upsertParallel(stateStoreString, Set("p1", "p2", "p3"), "t1", 5)(e).futureValue val source = changes("t2", NoOffset) val m = collection.mutable.ListBuffer.empty[(String, Long)] // trick to complete the future val f = source .takeWhile { e => m += ((e.persistenceId, e.offset.value)) e.offset.value < 21 } .runWith(Sink.seq) // more data after some delay Thread.sleep(1000) upsertManyForOnePersistenceId(stateStoreString, "p3", "t1", 6, 3) Thread.sleep(1000) upsertManyForOnePersistenceId(stateStoreString, "p3", "t2", 9, 3) whenReady(f) { _ => m.map(_._2) shouldBe sorted m.map(_._2).min should be > 0L m.map(_._2).max shouldBe 21 } } "fetch proper values of offsets from beginning for a larger dataset with changes() and phased upserts" in withActorSystem { implicit system => val stateStoreString = new JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, schemaTypeToProfile(schemaType), durableStateConfig, serialization) import stateStoreString._ upsertParallel(stateStoreString, Set("p1", "p2", "p3"), "t1", 1000)(e).futureValue val source = changes("t2", NoOffset) val m = collection.mutable.ListBuffer.empty[(String, Long)] // trick to complete the future val f = source .takeWhile { e => m += ((e.persistenceId, e.offset.value)) e.offset.value < 3060 } .runWith(Sink.seq) // more data after some delay Thread.sleep(1000) upsertManyForOnePersistenceId(stateStoreString, "p3", "t1", 1001, 30) Thread.sleep(1000) upsertManyForOnePersistenceId(stateStoreString, "p3", "t2", 1031, 30) whenReady(f) { _ => m.map(_._2) shouldBe sorted m.map(_._2).min should be > 0L m.map(_._2).max shouldBe 3060 } } } } class H2DurableStateSpec extends JdbcDurableStateSpec(ConfigFactory.load("h2-application.conf"), H2) { implicit lazy val system: ActorSystem = ActorSystem("test", config.withFallback(customSerializers)) } // In H2's default mode unquoted identifiers are uppercased, so raw-SQL paths in // DurableStateQueries must quote identifiers via the profile to match the schema. class H2DefaultModeDurableStateSpec extends JdbcDurableStateSpec(ConfigFactory.load("h2-default-mode-application.conf"), H2) { implicit lazy val system: ActorSystem = ActorSystem("test", config.withFallback(customSerializers)) } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/state/scaladsl/StateSpecBase.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state.scaladsl import com.typesafe.config.{ Config, ConfigFactory } import scala.concurrent.duration._ import scala.concurrent.ExecutionContext import scala.util.{ Failure, Success } import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import org.scalatest.concurrent.ScalaFutures import org.scalatest.time._ import akka.actor._ import akka.persistence.jdbc.db.SlickDatabase import akka.persistence.jdbc.config._ import akka.persistence.jdbc.testkit.internal.{ H2, MySQL, Postgres, SchemaType } import akka.persistence.jdbc.util.DropCreate import akka.serialization.SerializationExtension import akka.util.Timeout abstract class StateSpecBase(val config: Config, schemaType: SchemaType) extends AnyWordSpecLike with BeforeAndAfterAll with BeforeAndAfterEach with Matchers with ScalaFutures with DropCreate with DataGenerationHelper { implicit def system: ActorSystem implicit lazy val e: ExecutionContext = system.dispatcher private[jdbc] def schemaTypeToProfile(s: SchemaType) = s match { case H2 => slick.jdbc.H2Profile case Postgres => slick.jdbc.PostgresProfile case MySQL => slick.jdbc.MySQLProfile case _ => ??? } val customSerializers = ConfigFactory.parseString(""" akka.actor { serializers { my-payload = "akka.persistence.jdbc.state.MyPayloadSerializer" } serialization-bindings { "akka.persistence.jdbc.state.MyPayload" = my-payload } } """) val customConfig = ConfigFactory.parseString(s""" jdbc-durable-state-store { batchSize = 200 refreshInterval = 300.milliseconds durable-state-sequence-retrieval { batch-size = 1000 query-delay = 100.milliseconds max-tries = 3 } } """) lazy val cfg = customConfig .getConfig("jdbc-durable-state-store") .withFallback(system.settings.config.getConfig("jdbc-durable-state-store")) .withFallback(config.getConfig("jdbc-durable-state-store")) .withFallback(customSerializers.getConfig("akka.actor")) lazy val db = if (cfg.hasPath("slick.profile")) { SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig("slick")), "slick.db") } else { // needed for integration test where we use postgres-shared-db-application.conf SlickDatabase.database( config, new SlickConfiguration(config.getConfig("akka-persistence-jdbc.shared-databases.slick")), "akka-persistence-jdbc.shared-databases.slick.db") } lazy val durableStateConfig = new DurableStateTableConfiguration(cfg) lazy val serialization = SerializationExtension(system) implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(60, Seconds), interval = Span(100, Millis)) def withActorSystem(f: ExtendedActorSystem => Unit): Unit = { implicit val system: ExtendedActorSystem = ActorSystem("JdbcDurableStateSpec", config.withFallback(customSerializers)).asInstanceOf[ExtendedActorSystem] implicit val timeout: Timeout = Timeout(1.minute) try { f(system) } finally { system .actorSelection( "system/" + s"${JdbcDurableStateStore.Identifier}.akka-persistence-jdbc-durable-state-sequence-actor") .resolveOne() .onComplete { case Success(actorRef) => { system.stop(actorRef) Thread.sleep(1000) system.log.debug(s"Is terminated: ${actorRef.isTerminated}") } case Failure(_) => system.log.warning("system/" + "-persistence-jdbc-durable-state-sequence-actorsomename" + " does not exist") } system.terminate().futureValue } } override def beforeAll(): Unit = { dropAndCreate(schemaType) super.beforeAll() } override def beforeEach(): Unit = { dropAndCreate(schemaType) super.beforeAll() } override def afterAll(): Unit = { db.close() system.terminate().futureValue } } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/state/scaladsl/TestProbeDurableStateStoreQuery.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.state.scaladsl import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.ExtendedActorSystem import akka.pattern.ask import akka.persistence.jdbc.config.DurableStateTableConfiguration import akka.persistence.query.DurableStateChange import akka.persistence.query.Offset import akka.persistence.state.scaladsl.GetObjectResult import akka.stream.scaladsl.Source import akka.testkit.TestProbe import akka.util.Timeout import slick.jdbc.{ JdbcBackend, JdbcProfile } import akka.serialization.Serialization object TestProbeDurableStateStoreQuery { case class StateInfoSequence(offset: Long, limit: Long) } class TestProbeDurableStateStoreQuery( val probe: TestProbe, db: JdbcBackend#Database, profile: JdbcProfile, durableStateConfig: DurableStateTableConfiguration, serialization: Serialization)(override implicit val system: ExtendedActorSystem) extends JdbcDurableStateStore[String]( JdbcDurableStateStore.Identifier, db, profile, durableStateConfig, serialization)(system) { implicit val askTimeout: Timeout = Timeout(100.millis) override def getObject(persistenceId: String): Future[GetObjectResult[String]] = ??? override def currentChanges(tag: String, offset: Offset): Source[DurableStateChange[String], NotUsed] = ??? override def changes(tag: String, offset: Offset): Source[DurableStateChange[String], NotUsed] = ??? override def stateStoreStateInfo(offset: Long, limit: Long): Source[(String, Long, Long), NotUsed] = { val f = probe.ref .ask(TestProbeDurableStateStoreQuery.StateInfoSequence(offset, limit)) .mapTo[scala.collection.immutable.Seq[DurableStateSequenceActor.VisitedElement]] Source.future(f).mapConcat(e => e.map(x => (x.pid, x.offset, x.revision))) } override def maxStateStoreOffset(): Future[Long] = Future.successful(0) } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/util/ClasspathResources.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.util import java.io.InputStream import scala.io.{ Source => ScalaIOSource } object ClasspathResources extends ClasspathResources trait ClasspathResources { def streamToString(is: InputStream): String = ScalaIOSource.fromInputStream(is).mkString def fromClasspathAsString(fileName: String): String = streamToString(fromClasspathAsStream(fileName)) def fromClasspathAsStream(fileName: String): InputStream = getClass.getClassLoader.getResourceAsStream(fileName) } ================================================ FILE: core/src/test/scala/akka/persistence/jdbc/util/DropCreate.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.util import java.sql.Statement import akka.annotation.InternalApi import akka.persistence.jdbc.testkit.internal.SchemaType import akka.persistence.jdbc.testkit.internal.SchemaUtilsImpl import com.typesafe.config.Config import org.slf4j.LoggerFactory import slick.jdbc.JdbcBackend.Database import slick.jdbc.JdbcBackend.Session /** * INTERNAL API */ @InternalApi private[jdbc] trait DropCreate { private val logger = LoggerFactory.getLogger(this.getClass) def db: Database def config: Config def newDao: Boolean = !SchemaUtilsImpl.legacy("jdbc-journal", config) /** * INTERNAL API */ @InternalApi private[jdbc] def dropAndCreate(schemaType: SchemaType): Unit = { // blocking calls, usually done in our before test methods SchemaUtilsImpl.dropWithSlick(schemaType, logger, db, !newDao) SchemaUtilsImpl.createWithSlick(schemaType, logger, db, !newDao) } def withSession[A](f: Session => A): A = { withDatabase { db => val session = db.createSession() try f(session) finally session.close() } } def withStatement[A](f: Statement => A): A = withSession(session => session.withStatement()(f)) /** * INTERNAL API */ @InternalApi private[jdbc] def withDatabase[A](f: Database => A): A = f(db) } ================================================ FILE: doc/deadlock.md ================================================ # Slick Scheduling Algorithm For the [new scheduling algorithm in #1461](https://github.com/slick/slick/pull/1461) to work correctly without deadlocks it is critical that Slicks knows the connection pool size. It is set automatically [when you let Slick configure HikariCP](https://github.com/szeiger/slick/commit/353a6e41f389fbe776f1c38166bbe1a3f0a3f2e0) by calling `Database.forDatabase(...)` or `Database.forDataSource` where the default maxConnections is set to `1` connection if you don't override it yourself which for most cases if too low, in all other cases you have to do it on your own by using Introduction of priority levels: - __HighPriority__: a DBIO which already has a connection associated (due to running in a transaction or with pinned session) - __MediumPriority__: the old highPrio = true case: a continuation of another DBIO action, it should always be able to be enqueued - __LowPriority__: any other DBIO, if the queue is full it will not be enqueued The queue backing Slick's ThreadPoolExecutor now consists internally of two backing queues: - A __HighPriority__ queue which contains only the HighPriority runnables - The old queue containing the other priority levels. AsyncExecutor HighPriority items are always taken first, until this queue is exhausted and only then Low- or MediumPriority items are considered. We also do connection counting in the HikariCPJdbcDataSource: When there are no more connections in the pool, we prevent low/medium priority items from being processed from the queue. Only HighPriority items are able to make progress, because they already have a connection. ## Database thread pool Every Database contains an AsyncExecutor that manages the thread pool for asynchronous execution of Database I/O Actions. Its size is the main parameter to tune for the best performance of the Database object. It should be set to the value that you would use for the size of the connection pool in a traditional, blocking application. When using Database.forConfig, the thread pool is configured directly in the external configuration file together with the connection parameters. If you use any other factory method to get a Database, you can either use a default configuration or specify a custom AsyncExecutor: ```scala val db = Database.forURL("jdbc:h2:mem:test1;DB_CLOSE_DELAY=-1", driver="org.h2.Driver", executor = AsyncExecutor("test1", numThreads=10, queueSize=1000)) ``` ## Configuration The object `akka.persistence.jdbc.util.SlickDatabase` reads the HikariJdbcDataSource configuration from typesafe configuration from the object `slick.db`. It expects a `connectionPool` field that has been set to `HikariCP` The class `slick.jdbc.hikaricp.HikariCPJdbcDataSource` reads the configured Hikari Connection Pool configuration. ```scala def forConfig(c: Config, driver: Driver, name: String, classLoader: ClassLoader): HikariCPJdbcDataSource = { val hconf = new HikariConfig() // Connection settings if (c.hasPath("dataSourceClass")) { hconf.setDataSourceClassName(c.getString("dataSourceClass")) } else { Option(c.getStringOr("driverClassName", c.getStringOr("driver"))).map(hconf.setDriverClassName _) } hconf.setJdbcUrl(c.getStringOr("url", null)) c.getStringOpt("user").foreach(hconf.setUsername) c.getStringOpt("password").foreach(hconf.setPassword) c.getPropertiesOpt("properties").foreach(hconf.setDataSourceProperties) // Pool configuration hconf.setConnectionTimeout(c.getMillisecondsOr("connectionTimeout", 1000)) hconf.setValidationTimeout(c.getMillisecondsOr("validationTimeout", 1000)) hconf.setIdleTimeout(c.getMillisecondsOr("idleTimeout", 600000)) hconf.setMaxLifetime(c.getMillisecondsOr("maxLifetime", 1800000)) hconf.setLeakDetectionThreshold(c.getMillisecondsOr("leakDetectionThreshold", 0)) hconf.setInitializationFailFast(c.getBooleanOr("initializationFailFast", false)) c.getStringOpt("connectionTestQuery").foreach(hconf.setConnectionTestQuery) c.getStringOpt("connectionInitSql").foreach(hconf.setConnectionInitSql) val numThreads = c.getIntOr("numThreads", 20) hconf.setMaximumPoolSize(c.getIntOr("maxConnections", numThreads * 5)) hconf.setMinimumIdle(c.getIntOr("minConnections", numThreads)) hconf.setPoolName(c.getStringOr("poolName", name)) hconf.setRegisterMbeans(c.getBooleanOr("registerMbeans", false)) // Equivalent of ConnectionPreparer hconf.setReadOnly(c.getBooleanOr("readOnly", false)) c.getStringOpt("isolation").map("TRANSACTION_" + _).foreach(hconf.setTransactionIsolation) hconf.setCatalog(c.getStringOr("catalog", null)) val ds = new HikariDataSource(hconf) new HikariCPJdbcDataSource(ds, hconf) } ``` ## Resources - [Transactionally deadlock still there on 3.2.0-M1?](https://github.com/slick/slick/issues/1614) - [Slick deadlock #1461](https://github.com/slick/slick/pull/1461) - [connection leak detected #1678](https://github.com/slick/slick/issues/1678) ================================================ FILE: docs/LICENSE ================================================ LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT THIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS "AGREEMENT") IS A LEGAL AGREEMENT BETWEEN YOU ("USER") AND LIGHTBEND, INC. ("LICENSOR"). BY CLICKING THE "I ACCEPT" BUTTON, OR INSTALLING, COPYING OR OTHERWISE USING LIGHTBEND’S SOFTWARE (THE “SOFTWARE”) AND ITS ASSOCIATED USER DOCUMENTATION, USER ACKNOWLEDGES THAT USER HAS REVIEWED AND ACCEPTS THIS AGREEMENT AND AGREES TO BE BOUND BY ALL OF ITS TERMS. IF YOU ARE AGREEING TO THIS AGREEMENT AS AN INDIVIDUAL, “USER” REFERS TO YOU INDIVIDUALLY. IF YOU ARE AGREEING TO THIS AGREEMENT AS A REPRESENTATIVE OF AN ENTITY, YOU REPRESENT THAT YOU HAVE THE AUTHORITY TO BIND THAT ENTITY AND “USER” REFERS TO THAT ENTITY AND ALL THE USERS ACCESSING THE SOFTWARE BY, THROUGH OR ON BEHALF OF THAT ENTITY. IF USER DOES NOT AGREE WITH ALL OF THE TERMS OF THIS AGREEMENT, DO NOT INSTALL, COPY OR OTHERWISE USE THE SOFTWARE OR ITS DOCUMENTATION. 1. DEFINITIONS. 1. “User System” means User’s website(s), computers, servers and other equipment and software upon and with which the Software is run. 2. “Documentation” means the user instructions and help files made available by Licensor for use with the Software, as may be updated from time to time by Licensor. 3. “Intellectual Property Rights” means all intellectual property rights or similar proprietary rights, including (a) patent rights and utility models, (b) copyrights and database rights, (c) trademarks, trade names, domain names and trade dress and the goodwill associated therewith, (d) trade secrets, (e) mask works, and (f) industrial design rights; in each case, including any registrations of, applications to register, and renewals and extensions of, any of the foregoing in any jurisdiction in the world. 4. “Open Source Software” means all software that is available under the GNU Affero General Public License (AGPL), GNU General Public License (GPL), GNU Lesser General Public License (LGPL), Mozilla Public License (MPL), Apache License, BSD licenses, or any other license that is approved by or similar to those approved by the Open Source Initiative (www.opensource.org). 2. LICENSES AND RESTRICTIONS. 1. License. Subject to User’s compliance with the terms and conditions of this Agreement, Licensor hereby grants to User, during the term of this Agreement, a limited, non-exclusive, non-transferable and non-sublicensable right to (i) install and execute one (1) copy of the Software in accordance with the Documentation, solely in binary form, and not for the benefit of any other person or entity, and (ii) access and use the Documentation, solely for User’s own internally purposes in support of End User’s permitted use of the Software. 2. Restrictions. User shall not, directly or indirectly, or permit any User or third party to: (a) reverse engineer, decompile, disassemble or otherwise attempt to discover the source code or underlying ideas or algorithms of the Software; (b) modify, translate, or create derivative works based on any element of the Software or any related Documentation (except to the extent applicable laws specifically prohibit such restriction for interoperability purposes, in which case you agree to first contact Licensor and provide Licensor an opportunity to create such changes as are needed for interoperability purposes); (c) use, rent, lease, distribute, sell, resell, assign, or otherwise transfer the Software or any copy thereof; (d) use the Software for timesharing purposes or otherwise for the benefit of any person or entity other than for the benefit of User and Users; (e) remove any proprietary notices from the Software or the Documentation or attempt to defeat any copy protection device included with the Software; or (f) use the Software for any purpose other than its intended purpose. 3. Reservation of Rights. Nothing in this Agreement shall be construed to give User a right to use or otherwise obtain access to any source code from which the Software is compiled or interpreted. Except as expressly granted in this Agreement, there are no other licenses granted to User, express, implied or by way of estoppel. All rights not granted in this Agreement are reserved by Licensor. 4. Open Source Software. Notwithstanding the foregoing, certain items of software included with the Software are Open Source Software and remains subject Open Source Software licenses. Such Open Source Software is not subject to the terms and conditions of this Agreement. Instead, each such item of Open Source Software is licensed under the terms of the end user license that accompanies such Open Source Software and nothing in this Agreement limits your rights under, or grants you rights that supersede, the terms and conditions of any applicable end user license for such Open Source Software. If required by any license for particular Open Source Software, Licensor makes such Open Source Software, and any Licensor modifications to that Open Source Software, available as further described in the Documentation. USE OF THE SOFTWARE IN ANY MANNER OTHER THAN AS PROVIDED IN THIS AGREEMENT IS STRICTLY PROHIBITED AND MAY INFRINGE ON THE INTELLECTUAL PROPERTY RIGHTS OF LICENSOR AND/OR ITS LICENSOR(S), SUBJECTING USER TO CIVIL AND CRIMINAL PENALTIES, INCLUDING WITHOUT LIMITATION MONETARY DAMAGES AND IMPRISONMENT FOR COPYRIGHT INFRINGEMENT. 3. USER OBLIGATIONS. 1. User System. User is responsible for (a) obtaining, deploying and maintaining the User System, and all computer hardware, software, modems, routers and other communications equipment necessary for User and its Users to install and use the Software; and (b) paying all third party fees and access charges incurred in connection with the foregoing. Licensor shall not be responsible for supplying any hardware, software or other equipment to User under this Agreement. 2. Compliance with Laws. User agrees to use the Software in compliance with all applicable laws, including local laws of the country or region in which User resides, and in compliance with all United States export laws and regulations. User shall not use the Software for any purpose prohibited by applicable law. 3. Trademarks and Tradenames. With regard to all copies of the Software permitted herein, User shall reproduce on such copies all Licensor copyright notices, and other proprietary notices appearing on and in the original copy of the software received from Licensor. Except as set forth in the foregoing sentence, User will not, during the term of this Agreement or thereafter, use any trademark of Licensor, or any word and/or symbol likely to be confused with any Licensor trademark, either alone or in any combination with other words and/or symbols. 4. SUPPORT AND MAINTENANCE. 1. Support. Licensor is not responsible for maintenance or support of the Software, or the equipment on which the Software resides or is used, under this Agreement. By accepting the license granted under this Agreement, User agrees that Licensor will be under no obligation to provide any support, maintenance or service in connection with the Software or such equipment. 2. Upgrades and Updates. Licensor may from time to time in its sole discretion develop and provide updates for the Software, which may include upgrades, bug fixes, patches, other error corrections, and/or new features (collectively, including related documentation, “Updates”). Updates may also modify or delete in their entirety certain features and functionality. You agree that Licensor has no obligation to provide any Updates or to continue to provide or enable any particular features or functionality. 5. REPRESENTATIONS AND WARRANTIES; DISCLAIMER. 1. Mutual Representations and Warranties. Each party represents, warrants and covenants that: (a) it has the full power and authority to enter into this Agreement and to perform its obligations hereunder, without the need for any consents, approvals or immunities not yet obtained; and (b) its acceptance of and performance under this Agreement shall not breach any oral or written agreement with any third party or any obligation owed by it to any third party to keep any information or materials in confidence or in trust. 2. Disclaimer. EXCEPT FOR THE WARRANTIES SET FORTH IN THIS SECTION 5, THE SOFTWARE AND THE DOCUMENTATION ARE PROVIDED ON AN AS-IS BASIS. USER’S USE OF THE SOFTWARE AND THE DOCUMENTATION IS AT USER’S OWN RISK. LICENSOR DOES NOT MAKE, AND HEREBY DISCLAIMS, ANY AND ALL OTHER EXPRESS, STATUTORY AND IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE, QUALITY, SUITABILITY, OPERABILITY, CONDITION, SYSTEM INTEGRATION, NON-INTERFERENCE, WORKMANSHIP, TRUTH, ACCURACY (OF DATA OR ANY OTHER INFORMATION OR CONTENT), ABSENCE OF DEFECTS, WHETHER LATENT OR PATENT, AND ANY WARRANTIES ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE. LICENSOR ALSO DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN, PERFORMED AND/OR PROVIDED BY THE SOFTWARE WILL MEET USER’S REQUIREMENTS, THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, THAT THE SOFTWARE WILL BE COMPATIBLE OR WORK WITH ANY THIRD-PARTY SOFTWARE, APPLICATIONS OR DEVICES, OR THAT DEFECTS IN THE SOFTWARE WILL BE CORRECTED. USER EXPRESSLY ACKNOWLEDGES AND AGREES THAT, TO THE EXTENT PERMITTED BY APPLICABLE LAW, ITS USE OF THE SOFTWARE IS AT ITS SOLE RISK AND THAT THE ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT IS WITH USER. USER FURTHER ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS NOT INTENDED OR SUITABLE FOR USE IN SITUATIONS OR ENVIRONMENTS WHERE THE FAILURE OR TIME DELAYS OF, OR ERRORS OR INACCURACIES IN THE CONTENT, DATA OR INFORMATION PROVIDED BY THE SOFTWARE COULD LEAD TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. ANY WARRANTIES MADE BY LICENSOR ARE FOR THE BENEFIT OF USER ONLY AND NOT FOR THE BENEFIT OF ANY THIRD PARTY. THE SOFTWARE AND THE DOCUMENTATION ARE LICENSED AND NOT SOLD. NO AGENT OF LICENSOR IS AUTHORIZED TO ALTER OR EXPAND THE WARRANTIES OF LICENSOR AS SET FORTH HEREIN. 6. INDEMNIFICATION. User shall defend Licensor and its licensors and their respective officers, directors and employees (“Licensor Indemnified Parties”) from and against any and all Third-Party Claims which arise out of or relate to: (a) User’s use or alleged use of the Software other than as permitted under this Agreement; or (b) arising out of or relating to any violation of Section 2.2, or any violation of applicable laws. User shall pay all damages, costs and expenses, including attorneys’ fees and costs (whether by settlement or award of by a final judicial judgment) incurred by the Licensor Indemnified Parties from any such Third-Party Claim. In no event shall Licensor settle any claim without User’s prior written approval. Licensor may, at its own expense, engage separate counsel to advise Licensor regarding a Third-Party Claim and to participate in the defense of the claim, subject to User’s right to control the defense and settlement. If you are a New Jersey resident, this indemnification clause is to be only as broad and inclusive as is permitted by the law of the state of New Jersey. 7. CONFIDENTIALITY. 1. Confidential Information. User acknowledges that the Software contains valuable proprietary information and trade secrets and that unauthorized or improper use of the Software will result in irreparable harm to Licensor for which monetary damages would be inadequate and for which Licensor may be entitled to immediate injunctive relief. Accordingly, you will maintain the confidentiality of the proprietary information and not sell, license, publish, display, distribute, disclose or otherwise make available such proprietary information to any third party, nor use such information except as authorized by this Agreement. 2. Injunctive Relief. User agrees that any unauthorized disclosure of confidential information may cause immediate and irreparable injury to Licensor and that, in the event of such breach, Licensor will be entitled, in addition to any other available remedies, to seek immediate injunctive and other equitable relief, without bond and without the necessity of showing actual monetary damages. 8. PROPRIETARY RIGHTS. 1. Licensor. As between Licensor and User, all right, title and interest in the Software, the Documentation, and any other Licensor materials furnished or made available hereunder, and all modifications and enhancements thereof, and all suggestions, ideas and feedback proposed by User regarding the Software, including all copyright rights, patent rights and other Intellectual Property Rights in each of the foregoing, belong to and are retained solely by Licensor or Licensor’s licensors and providers, as applicable. User hereby does and will irrevocably assign to Licensor all evaluations, ideas, feedback and suggestions made by User to Licensor regarding the Software or the Documentation (collectively, “Feedback”) and all Intellectual Property Rights in the Feedback. 9. LIMITATION OF LIABILITY. 1. No Consequential Damages. NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR ANY DAMAGES FOR LOST DATA, BUSINESS INTERRUPTION, LOST PROFITS, LOST REVENUE OR LOST BUSINESS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT, EVEN IF LICENSOR OR ITS LICENSORS OR USER HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, INCLUDING WITHOUT LIMITATION, ANY SUCH DAMAGES ARISING OUT OF THE LICENSING, PROVISION OR USE OF THE SOFTWARE OR THE RESULTS OF THE USE OF THE SOFTWARE. LICENSOR WILL NOT BE LIABLE FOR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES. 2. LIMITS ON LIABILITY. NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR CUMULATIVE, AGGREGATE DAMAGES GREATER THAN FIVE HUNDRED DOLLARS (US $500). 3. ESSENTIAL PURPOSE. USER ACKNOWLEDGES THAT THE TERMS IN THIS SECTION 9 (LIMITATION OF LIABILITY) SHALL APPLY TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW AND SHALL APPLY EVEN IF AN EXCLUSIVE OR LIMITED REMEDY STATED HEREIN FAILS OF ITS ESSENTIAL PURPOSE. SOME JURISDICTIONS DO NOT ALLOW CERTAIN LIMITATIONS OF LIABILITY, SO SOME OR ALL OF THE ABOVE LIMITATIONS OF LIABILITY MAY NOT APPLY TO YOU. IF YOU ARE A NEW JERSEY RESIDENT, THIS LIMITATION OF LIABILITY SECTION IS TO BE ONLY AS BROAD AND INCLUSIVE AS IS PERMITTED BY THE LAW OF THE STATE OF NEW JERSEY. 10. TERM AND TERMINATION. 1. Term. This Agreement and User’s right to use the Software commences on earlier of the date that User: (a) installs the Software, (b) begins using the Software or (c) otherwise demonstrates assent to this Agreement. User’s right to use the Software shall continue until such time as this Agreement is terminated (the “Term”). 2. Termination for Cause. A party may terminate this Agreement, upon written notice to the other party in the event the other party files a petition for bankruptcy or has a petition for bankruptcy filed against it that is not dismissed within sixty (60) calendar days after filing or admits its inability to pay its debts as they mature, makes an assignment for the benefit of its creditors or ceases to function as a going concern or to conduct its operations in the normal course of business and such termination shall occur immediately upon notice. Licensor may terminate this Agreement at any time without notice if it ceases to support the Software, which Licensor may do in its sole discretion. In addition, this Agreement will terminate immediately and automatically without any notice if User breaches any of its terms and conditions. 3. Termination for Convenience. Either party may terminate this Agreement for convenience on at least thirty (30) calendar days prior written notice to the other party. User may also terminate this Agreement by ceasing all use of the Software. 4. Effects of Termination. Upon expiration or termination of this Agreement, User’s shall cease all use of the Software and the Documentation and shall destroy all copies of the Software in User’s possession or control. 5. Survival. This Section and Sections 1, 2.2 (Restrictions), 2.3 (Reservation of Rights), 2.4 (Open Source Software), 5.2 (Disclaimer), 6 (Indemnification), 7 (Confidentiality), 8 (Proprietary Rights), 9 (Limitation of Liability), 10.4 (Effects of Termination) and 11 (Miscellaneous) shall survive any termination or expiration of this Agreement. 11. MISCELLANEOUS. 1. Notices. Licensor may give notice to User by means of a general notice through electronic mail to User’s e-mail address, or by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service to User’s address on record with Licensor. User may give notice to Licensor by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service addressed to Licensor, Lightbend Inc., 580 California, #1231, San Francisco, CA 94104, Attention: User Support. Notice shall be deemed to have been given upon receipt or, if earlier, two (2) business days after mailing, as applicable. All communications and notices to be made or given pursuant to this Agreement shall be in the English language. 2. Governing Law. This Agreement and the rights and obligations of the parties to and under this agreement shall be governed by and construed under the laws of the United States and the State of California as applied to agreements entered into and to be performed in such State without giving effect to conflicts of laws rules or principles. The parties agree that the United Nations Convention on Contracts for the International Sale of Goods is specifically excluded from application to this Agreement and that the application of the Uniform Computer Information Transactions Act (UCITA) is specifically disclaimed. Any dispute arising out of or in connection with this Agreement, including but not limited to any question regarding its existence, interpretation, validity, performance, or termination, or any dispute between the parties arising from the parties' relationship created by this Agreement, shall be referred to and finally resolved by arbitration administered by the American Arbitration Association under its rules. The number of arbitrators shall be one (1). The parties shall endeavor to agree upon the sole arbitrator and jointly nominate the arbitrator. If the parties cannot agree upon the sole arbitrator within a time prescribed by AAA, the parties shall request the AAA to propose five (5) arbitrators and each party shall rank the proposed arbitrators. The AAA shall appoint an arbitrator from the list of five (5), based upon the parties' rankings. 3. U.S. Government Users. If User is a Federal Government entity, Licensor provides the Software and the Documentation, including related software and technology, for ultimate Federal Government end use solely in accordance with the following: Government technical data rights include only those rights customarily provided to the public with a commercial item or process and Government software rights related to the Software and the Documentation include only those rights customarily provided to the public, as defined in this Agreement. The technical data rights and customary commercial software license is provided in accordance with FAR 12.211 (Technical Data) and FAR 12.212 (Software) and, for Department of Defense transactions, DFAR 252.227-7015 (Technical Data – Commercial Items) and DFAR 227.7202-3 (Rights in Commercial Computer Software or Computer Software Documentation). If greater rights are needed, a mutually acceptable written addendum specifically conveying such rights must be included in this Agreement. 4. Export. The Software utilizes software and technology that may be subject to United States and foreign export controls. User acknowledges and agrees that the Software shall not be used, and none of the underlying information, software, or technology may be transferred or otherwise exported or re-exported to countries as to which the United States maintains an embargo (collectively, “Embargoed Countries”), or to or by a national or resident thereof, or any person or entity on the U.S. Department of Treasury’s List of Specially Designated Nationals or the U.S. Department of Commerce’s Table of Denial Orders (collectively, “Designated Nationals”). The lists of Embargoed Countries and Designated Nationals are subject to change without notice. By using the Software, User represents and warrants that it is not located in, under the control of, or a national or resident of an Embargoed Country or Designated National. The Software may use encryption technology that is subject to licensing requirements under the U.S. Export Administration Regulations, 15 C.F.R. Parts 730-774 and Council Regulation (EC) No. 1334/2000. User agrees to comply strictly with all applicable export laws and assume sole responsibility for obtaining licenses to export or re-export as may be required. Licensor and its licensors make no representation that the Software is appropriate or available for use in other locations. By using the Software, User represents and warrants that it is not located in any such country or on any such list. 5. General. User shall not assign its rights hereunder, or delegate the performance of any of its duties or obligations hereunder, whether by merger, acquisition, sale of assets, operation of law, or otherwise, without the prior written consent of Licensor. Any purported assignment in violation of the preceding sentence is null and void. Subject to the foregoing, this Agreement shall be binding upon, and inure to the benefit of, the successors and assigns of the parties thereto. Except as otherwise specified in this Agreement, this Agreement may be amended or supplemented only by a writing that refers explicitly to this Agreement and that is signed on behalf of both parties. No waiver will be implied from conduct or failure to enforce rights. No waiver will be effective unless in a writing signed on behalf of the party against whom the waiver is asserted. If any of this Agreement is found invalid or unenforceable that term will be enforced to the maximum extent permitted by law and the remainder of this Agreement will remain in full force. Nothing contained herein shall be construed as creating an agency, partnership, or other form of joint enterprise between the parties. This Agreement constitutes the entire agreement between the parties relating to this subject matter and supersedes all prior or simultaneous understandings, representations, discussions, negotiations, and agreements, whether written or oral. Neither party shall be liable to the other party or any third party for failure or delay in performing its obligations under this Agreement when such failure or delay is due to any cause beyond the control of the party concerned, including, without limitation, force majeure, governmental orders or restrictions, fire, or flood, provided that upon cessation of such events such party shall thereupon promptly perform or complete the performance of its obligations hereunder. ================================================ FILE: docs/release-train-issue-template.md ================================================ Release Akka Persistence JDBC $VERSION$ ### Cutting the release - [ ] Check that open PRs and issues assigned to the milestone are reasonable - [ ] Update the Change date and version in the LICENSE file. - [ ] Create a new milestone for the [next version](https://github.com/akka/akka-persistence-jdbc/milestones) - [ ] Close the [$VERSION$ milestone](https://github.com/akka/akka-persistence-jdbc/milestones?direction=asc&sort=due_date) - [ ] Make sure all important PRs have been merged - [ ] Update the revision in Fossa in the Akka Group for the Akka umbrella version, e.g. `22.10`. Note that the revisions for the release is udpated by Akka Group > Projects > Edit. For recent dependency updates the Fossa validation can be triggered from the GitHub actions "Dependency License Scanning". - [ ] Wait until [main build finished](https://github.com/akka/akka-persistence-jdbc/actions) after merging the latest PR - [ ] Update the [draft release](https://github.com/akka/akka-persistence-jdbc/releases) with the next tag version `v$VERSION$`, title and release description. Use the `Publish release` button, which will create the tag. - [ ] Check that GitHub Actions release build has executed successfully (GitHub Actions will start a [CI build](https://github.com/akka/akka-persistence-jdbc/actions) for the new tag and publish artifacts to https://repo.akka.io/maven) ### Check availability - [ ] Check [API](https://doc.akka.io/api/akka-persistence-jdbc/$VERSION$/) documentation - [ ] Check [reference](https://doc.akka.io/libraries/akka-persistence-jdbc/$VERSION$/) documentation. Check that the reference docs were deployed and show a version warning (see section below on how to fix the version warning). - [ ] Check the release `mvn dependency:get -Dartifact=com.lightbend.akka:akka-persistence-jdbc_2.13:$VERSION$` ### When everything is on https://repo.akka.io/maven - [ ] Log into `gustav.akka.io` as `akkarepo` - [ ] If this updates the `current` version, run `./update-akka-persistence-jdbc-current-version.sh $VERSION$` - [ ] otherwise check changes and commit the new version to the local git repository ``` cd ~/www git status git add libraries/akka-persistence-jdbc/current libraries/akka-persistence-jdbc/$VERSION$ git add api/akka-persistence-jdbc/current api/akka-persistence-jdbc/$VERSION$ git commit -m "Akka Persistence JDBC $VERSION$" ``` ### Announcements For important patch releases, and only if critical issues have been fixed: - [ ] Send a release notification to [Lightbend discuss](https://discuss.akka.io) - [ ] Tweet using the [@akkateam](https://twitter.com/akkateam/) account (or ask someone to) about the new release - [ ] Announce internally (with links to Tweet, discuss) For minor or major releases: - [ ] Include noteworthy features and improvements in Akka umbrella release announcement at akka.io. Coordinate with PM and marketing. ### Afterwards - [ ] Update [akka-dependencies bom](https://github.com/lightbend/akka-dependencies) and version for [Akka module versions](https://doc.akka.io/libraries/akka-dependencies/current/) in [akka-dependencies repo](https://github.com/akka/akka-dependencies) - [ ] Update [Akka Guide samples](https://github.com/akka/akka-platform-guide) - Close this issue ================================================ FILE: docs/src/main/paradox/_template/projectSpecificFooter.st ================================================ ================================================ FILE: docs/src/main/paradox/assets/js/warnOldVersion.js ================================================ function initOldVersionWarnings($, thisVersion, projectUrl) { if (projectUrl && projectUrl !== "") { var schemeLessUrl = projectUrl; if (projectUrl.startsWith("http://")) projectUrl = schemeLessUrl.substring(5); else if (projectUrl.startsWith("https://")) projectUrl = schemeLessUrl.substring(6); const url = schemeLessUrl + (schemeLessUrl.endsWith("\/") ? "" : "/") + "paradox.json"; $.get(url, function (versionData) { const currentVersion = versionData.version; if (thisVersion !== currentVersion) { showVersionWarning(thisVersion, currentVersion, projectUrl); } }); } } function showVersionWarning(thisVersion, currentVersion, projectUrl) { $('#docs').prepend( '
' + '

This documentation regards version ' + thisVersion + ', ' + 'however the current version is ' + currentVersion + '.

' + '
' ); } ================================================ FILE: docs/src/main/paradox/configuration.md ================================================ # Configuration The plugin relies on @extref[Slick](slick:) to do create the SQL dialect for the database in use, therefore the following must be configured in `application.conf` Configure `akka-persistence`: - instruct akka persistence to use the `jdbc-journal` plugin, - instruct akka persistence to use the `jdbc-snapshot-store` plugin, Configure `slick`: - The following slick profiles are supported: - `slick.jdbc.PostgresProfile$` - `slick.jdbc.MySQLProfile$` - `slick.jdbc.H2Profile$` - `slick.jdbc.OracleProfile$` - `slick.jdbc.SQLServerProfile$` ## Database Schema - @extref:[Postgres Schema](github:/core/src/main/resources/schema/postgres/postgres-create-schema.sql) - @extref:[MySQL Schema](github:/core/src/main/resources/schema/mysql/mysql-create-schema.sql) - @extref:[H2 Schema](github:/core/src/main/resources/schema/h2/h2-create-schema.sql) - @extref:[Oracle Schema](github:/core/src/main/resources/schema/oracle/oracle-create-schema.sql) - @extref:[SQL Server Schema](github:/core/src/main/resources/schema/sqlserver/sqlserver-create-schema.sql) @@@ note Please note that the H2 database is not recommended to be used as a production database, and support for H2 is primarily for testing purposes. @@@ For testing purposes the journal and snapshot tables can be created programmatically using the provided `SchemaUtils`. Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala) { #create } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java) { #create } A `dropIfExists` variant is also available. **Note**: `SchemaUtils` was introduced in version 5.0.0. ## Reference Configuration akka-persistence-jdbc provides the defaults as part of the @extref:[reference.conf](github:/core/src/main/resources/reference.conf). This file documents all the values which can be configured. There are several possible ways to configure loading your database connections. Options will be explained below. ### One database connection pool per journal type There is the possibility to create a separate database connection pool per journal-type (one pool for the write-journal, one pool for the snapshot-journal, and one pool for the read-journal). This is the default and the following example configuration shows how this is configured: Postgres : @@snip[Postgres](/core/src/test/resources/postgres-application.conf) MySQL : @@snip[MySQL](/core/src/test/resources/mysql-application.conf) H2 : @@snip[H2](/core/src/test/resources/h2-application.conf) Oracle : @@snip[Oracle](/core/src/test/resources/oracle-application.conf) SQL Server : @@snip[SQL Server](/core/src/test/resources/sqlserver-application.conf) ### Sharing the database connection pool between the journals In order to create only one connection pool which is shared between all journals the following configuration can be used: Postgres : @@snip[Postgres](/core/src/test/resources/postgres-shared-db-application.conf) MySQL : @@snip[MySQL](/core/src/test/resources/mysql-shared-db-application.conf) H2 : @@snip[H2](/core/src/test/resources/h2-shared-db-application.conf) Oracle : @@snip[Oracle](/core/src/test/resources/oracle-shared-db-application.conf) SQL Server : @@snip[SQL Server](/core/src/test/resources/sqlserver-shared-db-application.conf) ### Customized loading of the db connection It is also possible to load a custom database connection. In order to do so a custom implementation of @extref:[SlickDatabaseProvider](github:/core/src/main/scala/akka/persistence/jdbc/db/SlickExtension.scala) needs to be created. The methods that need to be implemented supply the Slick `Database` and `Profile` to the journals. To enable your custom `SlickDatabaseProvider`, the fully qualified class name of the `SlickDatabaseProvider` needs to be configured in the application.conf. In addition, you might want to consider whether you want the database to be closed automatically: ```hocon akka-persistence-jdbc { database-provider-fqcn = "com.mypackage.CustomSlickDatabaseProvider" } jdbc-journal { use-shared-db = "enabled" // setting this to any non-empty string prevents the journal from closing the database on shutdown } jdbc-snapshot-store { use-shared-db = "enabled" // setting this to any non-empty string prevents the snapshot-journal from closing the database on shutdown } ``` ### DataSource lookup by JNDI name The plugin uses `Slick` as the database access library. Slick @extref[supports jndi](slick:database.html#using-a-jndi-name) for looking up @javadoc[DataSource](javax.sql.DataSource)s. To enable the JNDI lookup, you must add the following to your application.conf: ```hocon jdbc-journal { slick { profile = "slick.jdbc.PostgresProfile$" jndiName = "java:jboss/datasources/PostgresDS" } } ``` When using the `use-shared-db = slick` setting, the follow configuration can serve as an example: ```hocon akka-persistence-jdbc { shared-databases { slick { profile = "slick.jdbc.PostgresProfile$" jndiName = "java:/jboss/datasources/bla" } } } ``` ## Explicitly shutting down the database connections The plugin automatically shuts down the HikariCP connection pool when the ActorSystem is terminated. This is done using @apidoc[ActorSystem.registerOnTermination](ActorSystem). ## Tuning for Lower Latency The `jdbc-read-journal.journal-sequence-retrieval.query-delay` configuration controls how often the actor queries for new data. The default is `1s`, but this can be set lower for latency-sensitive applications to reduce the time between data retrievals. Similarly, `jdbc-read-journal.refresh-interval` dictates how often the system polls for new events when idle, also defaulting to `1s`. In mostly idle applications that still require low latencies, it is important to adjust both `query-delay` and `refresh-interval` to achieve optimal performance. Lowering just one of these values might not be sufficient for reducing latency. As with any performance tuning, it’s important to test these settings in your environment to find the right balance. Reducing these intervals will increase the load on your database, as each node in the cluster will be querying the event journal more frequently. ================================================ FILE: docs/src/main/paradox/custom-dao.md ================================================ # Custom DAO Implementation The plugin supports loading a custom DAO for the journal and snapshot. You should implement a custom Data Access Object (DAO) if you wish to alter the default persistency strategy in any way, but wish to reuse all the logic that the plugin already has in place, eg. the Akka Persistence Query API. For example, the default persistency strategy that the plugin supports serializes journal and snapshot messages using a serializer of your choice and stores them as byte arrays in the database. By means of configuration in `application.conf` a DAO can be configured, below the default DAOs are shown: ```hocon jdbc-journal { dao = "akka.persistence.jdbc.journal.dao.DefaultJournalDao" } jdbc-snapshot-store { dao = "akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao" } jdbc-read-journal { dao = "akka.persistence.jdbc.query.dao.DefaultReadJournalDao" } ``` Storing messages as byte arrays in blobs is not the only way to store information in a database. For example, you could store messages with full type information as a normal database rows, each event type having its own table. For example, implementing a Journal Log table that stores all persistenceId, sequenceNumber and event type discriminator field, and storing the event data in another table with full typing You only have to implement two interfaces `akka.persistence.jdbc.journal.dao.JournalDao` and/or `akka.persistence.jdbc.snapshot.dao.SnapshotDao`. For example, take a look at the following two custom DAOs: ```scala class MyCustomJournalDao(db: Database, val profile: JdbcProfile, journalConfig: JournalConfig, serialization: Serialization)(implicit ec: ExecutionContext, mat: Materializer) extends JournalDao { // snip } class MyCustomSnapshotDao(db: JdbcBackend#Database, val profile: JdbcProfile, snapshotConfig: SnapshotConfig, serialization: Serialization)(implicit ec: ExecutionContext, val mat: Materializer) extends SnapshotDao { // snip } ``` As you can see, the custom DAOs get a _Slick database_, a _Slick profile_, the journal or snapshot _configuration_, an _akka.serialization.Serialization_, an _ExecutionContext_ and _Materializer_ injected after constructed. You should register the Fully Qualified Class Name in `application.conf` so that the custom DAOs will be used. For more information please review the two default implementations `akka.persistence.jdbc.dao.bytea.journal.ByteArrayJournalDao` and `akka.persistence.jdbc.dao.bytea.snapshot.ByteArraySnapshotDao` or the demo custom DAO example from the [demo-akka-persistence](https://github.com/dnvriend/demo-akka-persistence-jdbc) site. @@@warning { title="Binary compatibility" } The APIs for custom DAOs are not guaranteed to be binary backwards compatible between major versions of the plugin. For example 4.0.0 is not binary backwards compatible with 3.5.x. There may also be source incompatible changes of the APIs for customer DAOs if new capabilities must be added to to the traits. @@@ ================================================ FILE: docs/src/main/paradox/durable-state-store.md ================================================ # DurableStateStore ## How to get the DurableStateStore The `DurableStateStore` for JDBC plugin is obtained through the `DurableStateStoreRegistry` extension. Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #jdbc-durable-state-store } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #jdbc-durable-state-store } ## APIs supported by DurableStateStore The plugin supports the following APIs: ### getObject `getObject(persistenceId)` returns `GetObjectResult(value, revision)`, where `value` is an `Option` (`Optional` in Java) and is set to the value of the object if it exists with the passed in `persistenceId`. Otherwise `value` is empty. Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #get-object } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #get-object } ### upsertObject `upsertObject(persistenceId, revision, value, tag)` inserts the record if the `persistenceId` does not exist in the database. Or else it updates the record with the latest revision passed as `revision`. The update succeeds only if the incoming `revision` is 1 more than the already existing one. This snippet is an example of a sequnece of `upsertObject` and `getObject`. Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #upsert-get-object } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #upsert-get-object } ### deleteObject `deleteObject(persistenceId)` deletes the record with the input `persistenceId`. Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #delete-object } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #delete-object } ### currentChanges `currentChanges(tag, offset)` gets a source of the most recent changes made to objects with the given `tag` since the passed in `offset`. This api returns changes that occurred up to when the `Source` returned by this call is materialized. Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #current-changes } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #current-changes } ### changes `changes(tag, offset)` gets a source of the most recent changes made to objects with the given `tag` since the passed in `offset`. The returned source will never terminate, it effectively watches for changes to the objects and emits changes as they happen. Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #changes } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #changes } ================================================ FILE: docs/src/main/paradox/index.md ================================================ # Akka Persistence JDBC The Akka Persistence JDBC plugin allows for using JDBC-compliant databases as backend for @extref:[Akka Persistence](akka:persistence.html) and @extref:[Akka Persistence Query](akka:persistence-query.html). @@toc { depth=2 } @@@ index * [Overview](overview.md) * [Configuration](configuration.md) * [Migration](migration.md) * [Query](query.md) * [Custom DAO](custom-dao.md) * [Snapshots](snapshots.md) * [Durable State Store](durable-state-store.md) @@@ ================================================ FILE: docs/src/main/paradox/migration.md ================================================ # Migration ## Migrating to version 5.4.0 Release `5.4.0` change the schema of `event_tag` table. The previous version was using an auto-increment column as a primary key and foreign key on the `event_tag` table. As a result, the insert of multiple events in batch was not performant. While in `5.4.0`, the primary key and foreign key on the `event_tag` table have been replaced with a primary key from the `event_journal` table. In order to migrate to the new schema, we made a [**migration script**](https://github.com/akka/akka-persistence-jdbc/tree/master/core/src/main/resources/schema) which is capable of creating the new column, migrate the rows and add the new constraints. By default, the plugin will behave as in previous version. However, it's required to add two new columns to the `event_tag` table. Before upgrading to `5.4.0`, make sure that you apply at least the first step of the [**migration script**](https://github.com/akka/akka-persistence-jdbc/tree/master/core/src/main/resources/schema). If you want to use the new `event_tag` keys, you need to run a multiple-phase rollout: 1. apply the first step of the migration script (as mentioned above) and then redeploy your application with the default settings after upgrading to version `5.4.0`. 2. apply the second step of the migration script that will migrate the rows and adapt the constraints. 3. redeploy the application by disabling the legacy-mode: ```config jdbc-journal { tables { // ... event_tag { // ... // enable the new tag key legacy-tag-key = false } } } // or simply configue via flatting style jdbc-journal.tables.event_tag.legacy-tag-key = false ``` ## Migrating to version 5.2.0 **Release `5.2.0` updates H2 to version 2.1.214 which is not compatible to the previous 1.4.200*** H2 has undergone considerable changes that broke backwards compatibility to make H2 SQL Standard compliant. For migration please refer to the H2 [migration guide](https://www.h2database.com/html/migration-to-v2.html) ## Migrating to version 5.0.0 **Release `5.0.0` introduces a new schema and serialization that is not compatible with older versions.** The previous version was wrapping the event payload with Akka's `PersistentRepr`, while in 5.0.0 the serialized event payload is persisted directly into the column. In order to migrate to the new schema, a migration tool capable of reading the serialized representation of `PersistentRepr` is required. That [tool doesn't exist yet](https://github.com/akka/akka-persistence-jdbc/issues/317), therefore, the new schema can only be used with new applications. If you have existing data override the DAO to continue using the old schema: ```hocon # Use the DAOs for the legacy (pre 5.0) database schema jdbc-journal { dao = "akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao" } jdbc-snapshot-store { dao = "akka.persistence.jdbc.snapshot.dao.legacy.ByteArraySnapshotDao" } jdbc-read-journal { dao = "akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao" } ``` If you have re-configured the `schemaName`, `tableName` and `columnNames` through configuration settings then you will need to move them to a new key. * key `jdbc-journal.tables.journal` becomes `jdbc-journal.tables.legacy_journal` * key `jdbc-snapshot-store.tables.snapshot` becomes `jdbc-snapshot-store.tables.legacy_snapshot` * key `jdbc-read-journal.tables.journal` becomes `jdbc-read-journal.tables.legacy_journal` ================================================ FILE: docs/src/main/paradox/overview.md ================================================ # Overview The Akka Persistence JDBC plugin allows for using JDBC-compliant databases as backend for @extref:[Akka Persistence](akka:persistence.html) and @extref:[Akka Persistence Query](akka:persistence-query.html). akka-persistence-jdbc writes journal and snapshot entries to a configured JDBC store. It implements the full akka-persistence-query API and is therefore very useful for implementing DDD-style application models using Akka and Scala for creating reactive applications. Akka Persistence JDBC requires Akka $akka.version$ or later. It uses @extref:[Slick](slick:) $slick.version$ internally to access the database via JDBC, this does not require user code to make use of Slick. ## Version history | Description | Version | Akka version | |---------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------|--------------| | Required database schema migration, see @ref:[Migration](migration.md#migrating-to-version-5-4-0) | [5.4.0](https://github.com/akka/akka-persistence-jdbc/releases/tag/v5.4.0) | Akka 2.6.+ | | New database schema, see @ref:[Migration](migration.md#migrating-to-version-5-0-0) | [5.0.0](https://github.com/akka/akka-persistence-jdbc/releases/tag/v5.0.0) | Akka 2.6.+ | | First release within the Akka organization | [4.0.0](https://github.com/akka/akka-persistence-jdbc/releases/tag/v4.0.0) | Akka 2.6.+ | | Requires Akka 2.5.0 | [3.5.3+](https://github.com/akka/akka-persistence-jdbc/releases/tag/v3.5.3) | Akka 2.5.23+ or 2.6.x | See the full release history at [GitHub releases](https://github.com/akka/akka-persistence-jdbc/releases). ## Module info The Akka dependencies are available from Akka's library repository. To access them there, you need to configure the URL for this repository. @@repository [sbt,Maven,Gradle] { id="akka-repository" name="Akka library repository" url="https://repo.akka.io/maven" } Additionally, add the dependencies as below. @@dependency [sbt,Maven,Gradle] { group=com.lightbend.akka artifact=akka-persistence-jdbc_$scala.binary.version$ version=$project.version$ symbol2=AkkaVersion value2=$akka.version$ group2=com.typesafe.akka artifact2=akka-persistence-query_$scala.binary.version$ version2=AkkaVersion symbol3=SlickVersion value3=$slick.version$ group3=com.typesafe.slick artifact3=slick_$scala.binary.version$ version3=SlickVersion group4=com.typesafe.slick artifact4=slick-hikaricp_$scala.binary.version$ version4=SlickVersion } @@project-info{ projectId="core" } ## Contribution policy Contributions via GitHub pull requests are gladly accepted from their original author. Along with any pull requests, please state that the contribution is your original work and that you license the work to the project under the project's open source license. Whether or not you state this explicitly, by submitting any copyrighted material via pull request, email, or other means you agree to license the material under the project's open source license and warrant that you have the legal authority to do so. ## Code of Conduct Contributors all agree to follow the [Lightbend Community Code of Conduct](https://www.lightbend.com/conduct). ## License This source code is made available under the [Business Source License 1.1](https://raw.githubusercontent.com/akka/akka-persistence-jdbc/master/LICENSE) ================================================ FILE: docs/src/main/paradox/query.md ================================================ # Persistence Query ## How to get the ReadJournal The `ReadJournal` is retrieved via the `akka.persistence.query.PersistenceQuery` extension: Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala) { #read-journal } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java) { #read-journal } ## Persistence Query Plugin The plugin supports the following queries: ## AllPersistenceIdsQuery and CurrentPersistenceIdsQuery `allPersistenceIds` and `currentPersistenceIds` are used for retrieving all persistenceIds of all persistent actors. Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala) { #persistence-ids } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java) { #persistence-ids } The returned event stream is unordered and you can expect different order for multiple executions of the query. When using the `persistenceIds` query, the stream is not completed when it reaches the end of the currently used persistenceIds, but it continues to push new persistenceIds when new persistent actors are created. When using the `currentPersistenceIds` query, the stream is completed when the end of the current list of persistenceIds is reached, thus it is not a `live` query. The stream is completed with failure if there is a failure in executing the query in the backend journal. ## EventsByPersistenceIdQuery and CurrentEventsByPersistenceIdQuery `eventsByPersistenceId` and `currentEventsByPersistenceId` is used for retrieving events for a specific PersistentActor identified by persistenceId. Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala) { #events-by-persistence-id } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java) { #events-by-persistence-id } You can retrieve a subset of all events by specifying `fromSequenceNr` and `toSequenceNr` or use `0L` and `Long.MaxValue` respectively to retrieve all events. Note that the corresponding sequence number of each event is provided in the `EventEnvelope`, which makes it possible to resume the stream at a later point from a given sequence number. The returned event stream is ordered by sequence number, i.e. the same order as the PersistentActor persisted the events. The same prefix of stream elements (in same order) are returned for multiple executions of the query, except for when events have been deleted. The stream is completed with failure if there is a failure in executing the query in the backend journal. ## EventsByTag and CurrentEventsByTag `eventsByTag` and `currentEventsByTag` are used for retrieving events that were marked with a given `tag`, e.g. all domain events of an Aggregate Root type. Scala : @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala) { #events-by-tag } Java : @@snip[snip](/core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java) { #events-by-tag } ### Performance If you see slow database queries for `eventsByTag`, please consider adding a dedicated index for the `tag` column in the `event_tag` table. For postgres, the following index can be used: ``` CREATE INDEX CONCURRENTLY event_tag_tag_idx ON public.event_tag (tag); ``` ================================================ FILE: docs/src/main/paradox/snapshots.md ================================================ --- project.description: Snapshot builds via the Sonatype snapshot repository. --- # Snapshots Snapshots are published to https://repo.akka.io/snapshots repository after every successful build on master. Add the following to your project build definition to resolve Akka Persistence JDBC's snapshots: ## Configure repository Maven : ```xml ... akka-repository Akka library snapshot repository https://repo.akka.io/snapshots ... ``` sbt : ```scala resolvers += "Akka library snapshot repository".at("https://repo.akka.io/snapshots") ``` Gradle : ```gradle repositories { maven { url "https://repo.akka.io/snapshots" } } ``` ## Documentation The [snapshot documentation](https://doc.akka.io/libraries/akka-persistence-jdbc/snapshot) is updated with every snapshot build. ================================================ FILE: integration/LICENSE ================================================ LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT THIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS "AGREEMENT") IS A LEGAL AGREEMENT BETWEEN YOU ("USER") AND LIGHTBEND, INC. ("LICENSOR"). BY CLICKING THE "I ACCEPT" BUTTON, OR INSTALLING, COPYING OR OTHERWISE USING LIGHTBEND’S SOFTWARE (THE “SOFTWARE”) AND ITS ASSOCIATED USER DOCUMENTATION, USER ACKNOWLEDGES THAT USER HAS REVIEWED AND ACCEPTS THIS AGREEMENT AND AGREES TO BE BOUND BY ALL OF ITS TERMS. IF YOU ARE AGREEING TO THIS AGREEMENT AS AN INDIVIDUAL, “USER” REFERS TO YOU INDIVIDUALLY. IF YOU ARE AGREEING TO THIS AGREEMENT AS A REPRESENTATIVE OF AN ENTITY, YOU REPRESENT THAT YOU HAVE THE AUTHORITY TO BIND THAT ENTITY AND “USER” REFERS TO THAT ENTITY AND ALL THE USERS ACCESSING THE SOFTWARE BY, THROUGH OR ON BEHALF OF THAT ENTITY. IF USER DOES NOT AGREE WITH ALL OF THE TERMS OF THIS AGREEMENT, DO NOT INSTALL, COPY OR OTHERWISE USE THE SOFTWARE OR ITS DOCUMENTATION. 1. DEFINITIONS. 1. “User System” means User’s website(s), computers, servers and other equipment and software upon and with which the Software is run. 2. “Documentation” means the user instructions and help files made available by Licensor for use with the Software, as may be updated from time to time by Licensor. 3. “Intellectual Property Rights” means all intellectual property rights or similar proprietary rights, including (a) patent rights and utility models, (b) copyrights and database rights, (c) trademarks, trade names, domain names and trade dress and the goodwill associated therewith, (d) trade secrets, (e) mask works, and (f) industrial design rights; in each case, including any registrations of, applications to register, and renewals and extensions of, any of the foregoing in any jurisdiction in the world. 4. “Open Source Software” means all software that is available under the GNU Affero General Public License (AGPL), GNU General Public License (GPL), GNU Lesser General Public License (LGPL), Mozilla Public License (MPL), Apache License, BSD licenses, or any other license that is approved by or similar to those approved by the Open Source Initiative (www.opensource.org). 2. LICENSES AND RESTRICTIONS. 1. License. Subject to User’s compliance with the terms and conditions of this Agreement, Licensor hereby grants to User, during the term of this Agreement, a limited, non-exclusive, non-transferable and non-sublicensable right to (i) install and execute one (1) copy of the Software in accordance with the Documentation, solely in binary form, and not for the benefit of any other person or entity, and (ii) access and use the Documentation, solely for User’s own internally purposes in support of End User’s permitted use of the Software. 2. Restrictions. User shall not, directly or indirectly, or permit any User or third party to: (a) reverse engineer, decompile, disassemble or otherwise attempt to discover the source code or underlying ideas or algorithms of the Software; (b) modify, translate, or create derivative works based on any element of the Software or any related Documentation (except to the extent applicable laws specifically prohibit such restriction for interoperability purposes, in which case you agree to first contact Licensor and provide Licensor an opportunity to create such changes as are needed for interoperability purposes); (c) use, rent, lease, distribute, sell, resell, assign, or otherwise transfer the Software or any copy thereof; (d) use the Software for timesharing purposes or otherwise for the benefit of any person or entity other than for the benefit of User and Users; (e) remove any proprietary notices from the Software or the Documentation or attempt to defeat any copy protection device included with the Software; or (f) use the Software for any purpose other than its intended purpose. 3. Reservation of Rights. Nothing in this Agreement shall be construed to give User a right to use or otherwise obtain access to any source code from which the Software is compiled or interpreted. Except as expressly granted in this Agreement, there are no other licenses granted to User, express, implied or by way of estoppel. All rights not granted in this Agreement are reserved by Licensor. 4. Open Source Software. Notwithstanding the foregoing, certain items of software included with the Software are Open Source Software and remains subject Open Source Software licenses. Such Open Source Software is not subject to the terms and conditions of this Agreement. Instead, each such item of Open Source Software is licensed under the terms of the end user license that accompanies such Open Source Software and nothing in this Agreement limits your rights under, or grants you rights that supersede, the terms and conditions of any applicable end user license for such Open Source Software. If required by any license for particular Open Source Software, Licensor makes such Open Source Software, and any Licensor modifications to that Open Source Software, available as further described in the Documentation. USE OF THE SOFTWARE IN ANY MANNER OTHER THAN AS PROVIDED IN THIS AGREEMENT IS STRICTLY PROHIBITED AND MAY INFRINGE ON THE INTELLECTUAL PROPERTY RIGHTS OF LICENSOR AND/OR ITS LICENSOR(S), SUBJECTING USER TO CIVIL AND CRIMINAL PENALTIES, INCLUDING WITHOUT LIMITATION MONETARY DAMAGES AND IMPRISONMENT FOR COPYRIGHT INFRINGEMENT. 3. USER OBLIGATIONS. 1. User System. User is responsible for (a) obtaining, deploying and maintaining the User System, and all computer hardware, software, modems, routers and other communications equipment necessary for User and its Users to install and use the Software; and (b) paying all third party fees and access charges incurred in connection with the foregoing. Licensor shall not be responsible for supplying any hardware, software or other equipment to User under this Agreement. 2. Compliance with Laws. User agrees to use the Software in compliance with all applicable laws, including local laws of the country or region in which User resides, and in compliance with all United States export laws and regulations. User shall not use the Software for any purpose prohibited by applicable law. 3. Trademarks and Tradenames. With regard to all copies of the Software permitted herein, User shall reproduce on such copies all Licensor copyright notices, and other proprietary notices appearing on and in the original copy of the software received from Licensor. Except as set forth in the foregoing sentence, User will not, during the term of this Agreement or thereafter, use any trademark of Licensor, or any word and/or symbol likely to be confused with any Licensor trademark, either alone or in any combination with other words and/or symbols. 4. SUPPORT AND MAINTENANCE. 1. Support. Licensor is not responsible for maintenance or support of the Software, or the equipment on which the Software resides or is used, under this Agreement. By accepting the license granted under this Agreement, User agrees that Licensor will be under no obligation to provide any support, maintenance or service in connection with the Software or such equipment. 2. Upgrades and Updates. Licensor may from time to time in its sole discretion develop and provide updates for the Software, which may include upgrades, bug fixes, patches, other error corrections, and/or new features (collectively, including related documentation, “Updates”). Updates may also modify or delete in their entirety certain features and functionality. You agree that Licensor has no obligation to provide any Updates or to continue to provide or enable any particular features or functionality. 5. REPRESENTATIONS AND WARRANTIES; DISCLAIMER. 1. Mutual Representations and Warranties. Each party represents, warrants and covenants that: (a) it has the full power and authority to enter into this Agreement and to perform its obligations hereunder, without the need for any consents, approvals or immunities not yet obtained; and (b) its acceptance of and performance under this Agreement shall not breach any oral or written agreement with any third party or any obligation owed by it to any third party to keep any information or materials in confidence or in trust. 2. Disclaimer. EXCEPT FOR THE WARRANTIES SET FORTH IN THIS SECTION 5, THE SOFTWARE AND THE DOCUMENTATION ARE PROVIDED ON AN AS-IS BASIS. USER’S USE OF THE SOFTWARE AND THE DOCUMENTATION IS AT USER’S OWN RISK. LICENSOR DOES NOT MAKE, AND HEREBY DISCLAIMS, ANY AND ALL OTHER EXPRESS, STATUTORY AND IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE, QUALITY, SUITABILITY, OPERABILITY, CONDITION, SYSTEM INTEGRATION, NON-INTERFERENCE, WORKMANSHIP, TRUTH, ACCURACY (OF DATA OR ANY OTHER INFORMATION OR CONTENT), ABSENCE OF DEFECTS, WHETHER LATENT OR PATENT, AND ANY WARRANTIES ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE. LICENSOR ALSO DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN, PERFORMED AND/OR PROVIDED BY THE SOFTWARE WILL MEET USER’S REQUIREMENTS, THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, THAT THE SOFTWARE WILL BE COMPATIBLE OR WORK WITH ANY THIRD-PARTY SOFTWARE, APPLICATIONS OR DEVICES, OR THAT DEFECTS IN THE SOFTWARE WILL BE CORRECTED. USER EXPRESSLY ACKNOWLEDGES AND AGREES THAT, TO THE EXTENT PERMITTED BY APPLICABLE LAW, ITS USE OF THE SOFTWARE IS AT ITS SOLE RISK AND THAT THE ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT IS WITH USER. USER FURTHER ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS NOT INTENDED OR SUITABLE FOR USE IN SITUATIONS OR ENVIRONMENTS WHERE THE FAILURE OR TIME DELAYS OF, OR ERRORS OR INACCURACIES IN THE CONTENT, DATA OR INFORMATION PROVIDED BY THE SOFTWARE COULD LEAD TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. ANY WARRANTIES MADE BY LICENSOR ARE FOR THE BENEFIT OF USER ONLY AND NOT FOR THE BENEFIT OF ANY THIRD PARTY. THE SOFTWARE AND THE DOCUMENTATION ARE LICENSED AND NOT SOLD. NO AGENT OF LICENSOR IS AUTHORIZED TO ALTER OR EXPAND THE WARRANTIES OF LICENSOR AS SET FORTH HEREIN. 6. INDEMNIFICATION. User shall defend Licensor and its licensors and their respective officers, directors and employees (“Licensor Indemnified Parties”) from and against any and all Third-Party Claims which arise out of or relate to: (a) User’s use or alleged use of the Software other than as permitted under this Agreement; or (b) arising out of or relating to any violation of Section 2.2, or any violation of applicable laws. User shall pay all damages, costs and expenses, including attorneys’ fees and costs (whether by settlement or award of by a final judicial judgment) incurred by the Licensor Indemnified Parties from any such Third-Party Claim. In no event shall Licensor settle any claim without User’s prior written approval. Licensor may, at its own expense, engage separate counsel to advise Licensor regarding a Third-Party Claim and to participate in the defense of the claim, subject to User’s right to control the defense and settlement. If you are a New Jersey resident, this indemnification clause is to be only as broad and inclusive as is permitted by the law of the state of New Jersey. 7. CONFIDENTIALITY. 1. Confidential Information. User acknowledges that the Software contains valuable proprietary information and trade secrets and that unauthorized or improper use of the Software will result in irreparable harm to Licensor for which monetary damages would be inadequate and for which Licensor may be entitled to immediate injunctive relief. Accordingly, you will maintain the confidentiality of the proprietary information and not sell, license, publish, display, distribute, disclose or otherwise make available such proprietary information to any third party, nor use such information except as authorized by this Agreement. 2. Injunctive Relief. User agrees that any unauthorized disclosure of confidential information may cause immediate and irreparable injury to Licensor and that, in the event of such breach, Licensor will be entitled, in addition to any other available remedies, to seek immediate injunctive and other equitable relief, without bond and without the necessity of showing actual monetary damages. 8. PROPRIETARY RIGHTS. 1. Licensor. As between Licensor and User, all right, title and interest in the Software, the Documentation, and any other Licensor materials furnished or made available hereunder, and all modifications and enhancements thereof, and all suggestions, ideas and feedback proposed by User regarding the Software, including all copyright rights, patent rights and other Intellectual Property Rights in each of the foregoing, belong to and are retained solely by Licensor or Licensor’s licensors and providers, as applicable. User hereby does and will irrevocably assign to Licensor all evaluations, ideas, feedback and suggestions made by User to Licensor regarding the Software or the Documentation (collectively, “Feedback”) and all Intellectual Property Rights in the Feedback. 9. LIMITATION OF LIABILITY. 1. No Consequential Damages. NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR ANY DAMAGES FOR LOST DATA, BUSINESS INTERRUPTION, LOST PROFITS, LOST REVENUE OR LOST BUSINESS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT, EVEN IF LICENSOR OR ITS LICENSORS OR USER HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, INCLUDING WITHOUT LIMITATION, ANY SUCH DAMAGES ARISING OUT OF THE LICENSING, PROVISION OR USE OF THE SOFTWARE OR THE RESULTS OF THE USE OF THE SOFTWARE. LICENSOR WILL NOT BE LIABLE FOR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES. 2. LIMITS ON LIABILITY. NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR CUMULATIVE, AGGREGATE DAMAGES GREATER THAN FIVE HUNDRED DOLLARS (US $500). 3. ESSENTIAL PURPOSE. USER ACKNOWLEDGES THAT THE TERMS IN THIS SECTION 9 (LIMITATION OF LIABILITY) SHALL APPLY TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW AND SHALL APPLY EVEN IF AN EXCLUSIVE OR LIMITED REMEDY STATED HEREIN FAILS OF ITS ESSENTIAL PURPOSE. SOME JURISDICTIONS DO NOT ALLOW CERTAIN LIMITATIONS OF LIABILITY, SO SOME OR ALL OF THE ABOVE LIMITATIONS OF LIABILITY MAY NOT APPLY TO YOU. IF YOU ARE A NEW JERSEY RESIDENT, THIS LIMITATION OF LIABILITY SECTION IS TO BE ONLY AS BROAD AND INCLUSIVE AS IS PERMITTED BY THE LAW OF THE STATE OF NEW JERSEY. 10. TERM AND TERMINATION. 1. Term. This Agreement and User’s right to use the Software commences on earlier of the date that User: (a) installs the Software, (b) begins using the Software or (c) otherwise demonstrates assent to this Agreement. User’s right to use the Software shall continue until such time as this Agreement is terminated (the “Term”). 2. Termination for Cause. A party may terminate this Agreement, upon written notice to the other party in the event the other party files a petition for bankruptcy or has a petition for bankruptcy filed against it that is not dismissed within sixty (60) calendar days after filing or admits its inability to pay its debts as they mature, makes an assignment for the benefit of its creditors or ceases to function as a going concern or to conduct its operations in the normal course of business and such termination shall occur immediately upon notice. Licensor may terminate this Agreement at any time without notice if it ceases to support the Software, which Licensor may do in its sole discretion. In addition, this Agreement will terminate immediately and automatically without any notice if User breaches any of its terms and conditions. 3. Termination for Convenience. Either party may terminate this Agreement for convenience on at least thirty (30) calendar days prior written notice to the other party. User may also terminate this Agreement by ceasing all use of the Software. 4. Effects of Termination. Upon expiration or termination of this Agreement, User’s shall cease all use of the Software and the Documentation and shall destroy all copies of the Software in User’s possession or control. 5. Survival. This Section and Sections 1, 2.2 (Restrictions), 2.3 (Reservation of Rights), 2.4 (Open Source Software), 5.2 (Disclaimer), 6 (Indemnification), 7 (Confidentiality), 8 (Proprietary Rights), 9 (Limitation of Liability), 10.4 (Effects of Termination) and 11 (Miscellaneous) shall survive any termination or expiration of this Agreement. 11. MISCELLANEOUS. 1. Notices. Licensor may give notice to User by means of a general notice through electronic mail to User’s e-mail address, or by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service to User’s address on record with Licensor. User may give notice to Licensor by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service addressed to Licensor, Lightbend Inc., 580 California, #1231, San Francisco, CA 94104, Attention: User Support. Notice shall be deemed to have been given upon receipt or, if earlier, two (2) business days after mailing, as applicable. All communications and notices to be made or given pursuant to this Agreement shall be in the English language. 2. Governing Law. This Agreement and the rights and obligations of the parties to and under this agreement shall be governed by and construed under the laws of the United States and the State of California as applied to agreements entered into and to be performed in such State without giving effect to conflicts of laws rules or principles. The parties agree that the United Nations Convention on Contracts for the International Sale of Goods is specifically excluded from application to this Agreement and that the application of the Uniform Computer Information Transactions Act (UCITA) is specifically disclaimed. Any dispute arising out of or in connection with this Agreement, including but not limited to any question regarding its existence, interpretation, validity, performance, or termination, or any dispute between the parties arising from the parties' relationship created by this Agreement, shall be referred to and finally resolved by arbitration administered by the American Arbitration Association under its rules. The number of arbitrators shall be one (1). The parties shall endeavor to agree upon the sole arbitrator and jointly nominate the arbitrator. If the parties cannot agree upon the sole arbitrator within a time prescribed by AAA, the parties shall request the AAA to propose five (5) arbitrators and each party shall rank the proposed arbitrators. The AAA shall appoint an arbitrator from the list of five (5), based upon the parties' rankings. 3. U.S. Government Users. If User is a Federal Government entity, Licensor provides the Software and the Documentation, including related software and technology, for ultimate Federal Government end use solely in accordance with the following: Government technical data rights include only those rights customarily provided to the public with a commercial item or process and Government software rights related to the Software and the Documentation include only those rights customarily provided to the public, as defined in this Agreement. The technical data rights and customary commercial software license is provided in accordance with FAR 12.211 (Technical Data) and FAR 12.212 (Software) and, for Department of Defense transactions, DFAR 252.227-7015 (Technical Data – Commercial Items) and DFAR 227.7202-3 (Rights in Commercial Computer Software or Computer Software Documentation). If greater rights are needed, a mutually acceptable written addendum specifically conveying such rights must be included in this Agreement. 4. Export. The Software utilizes software and technology that may be subject to United States and foreign export controls. User acknowledges and agrees that the Software shall not be used, and none of the underlying information, software, or technology may be transferred or otherwise exported or re-exported to countries as to which the United States maintains an embargo (collectively, “Embargoed Countries”), or to or by a national or resident thereof, or any person or entity on the U.S. Department of Treasury’s List of Specially Designated Nationals or the U.S. Department of Commerce’s Table of Denial Orders (collectively, “Designated Nationals”). The lists of Embargoed Countries and Designated Nationals are subject to change without notice. By using the Software, User represents and warrants that it is not located in, under the control of, or a national or resident of an Embargoed Country or Designated National. The Software may use encryption technology that is subject to licensing requirements under the U.S. Export Administration Regulations, 15 C.F.R. Parts 730-774 and Council Regulation (EC) No. 1334/2000. User agrees to comply strictly with all applicable export laws and assume sole responsibility for obtaining licenses to export or re-export as may be required. Licensor and its licensors make no representation that the Software is appropriate or available for use in other locations. By using the Software, User represents and warrants that it is not located in any such country or on any such list. 5. General. User shall not assign its rights hereunder, or delegate the performance of any of its duties or obligations hereunder, whether by merger, acquisition, sale of assets, operation of law, or otherwise, without the prior written consent of Licensor. Any purported assignment in violation of the preceding sentence is null and void. Subject to the foregoing, this Agreement shall be binding upon, and inure to the benefit of, the successors and assigns of the parties thereto. Except as otherwise specified in this Agreement, this Agreement may be amended or supplemented only by a writing that refers explicitly to this Agreement and that is signed on behalf of both parties. No waiver will be implied from conduct or failure to enforce rights. No waiver will be effective unless in a writing signed on behalf of the party against whom the waiver is asserted. If any of this Agreement is found invalid or unenforceable that term will be enforced to the maximum extent permitted by law and the remainder of this Agreement will remain in full force. Nothing contained herein shall be construed as creating an agency, partnership, or other form of joint enterprise between the parties. This Agreement constitutes the entire agreement between the parties relating to this subject matter and supersedes all prior or simultaneous understandings, representations, discussions, negotiations, and agreements, whether written or oral. Neither party shall be liable to the other party or any third party for failure or delay in performing its obligations under this Agreement when such failure or delay is due to any cause beyond the control of the party concerned, including, without limitation, force majeure, governmental orders or restrictions, fire, or flood, provided that upon cessation of such events such party shall thereupon promptly perform or complete the performance of its obligations hereunder. ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/AllPersistenceIdsTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ AllPersistenceIdsTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } class PostgresScalaAllPersistenceIdsTest extends AllPersistenceIdsTest("postgres-application.conf") with PostgresCleaner class MySQLScalaAllPersistenceIdsTest extends AllPersistenceIdsTest("mysql-application.conf") with MysqlCleaner class OracleScalaAllPersistenceIdsTest extends AllPersistenceIdsTest("oracle-application.conf") with OracleCleaner class SqlServerScalaAllPersistenceIdsTest extends AllPersistenceIdsTest("sqlserver-application.conf") with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/CurrentEventsByPersistenceIdTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ CurrentEventsByPersistenceIdTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } // Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config class PostgresScalaCurrentEventsByPersistenceIdTest extends CurrentEventsByPersistenceIdTest("postgres-shared-db-application.conf") with PostgresCleaner class MySQLScalaCurrentEventsByPersistenceIdTest extends CurrentEventsByPersistenceIdTest("mysql-shared-db-application.conf") with MysqlCleaner class OracleScalaCurrentEventsByPersistenceIdTest extends CurrentEventsByPersistenceIdTest("oracle-shared-db-application.conf") with OracleCleaner class SqlServerScalaCurrentEventsByPersistenceIdTest extends CurrentEventsByPersistenceIdTest("sqlserver-shared-db-application.conf") with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/CurrentEventsByTagTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ CurrentEventsByTagTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } // Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config class PostgresScalaCurrentEventsByTagTest extends CurrentEventsByTagTest("postgres-shared-db-application.conf") with PostgresCleaner class MySQLScalaCurrentEventsByTagTest extends CurrentEventsByTagTest("mysql-shared-db-application.conf") with MysqlCleaner class OracleScalaCurrentEventsByTagTest extends CurrentEventsByTagTest("oracle-shared-db-application.conf") with OracleCleaner class SqlServerScalaCurrentEventsByTagTest extends CurrentEventsByTagTest("sqlserver-shared-db-application.conf") with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/CurrentPersistenceIdsTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ CurrentPersistenceIdsTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } // Note: these tests use the shared-db configs, the test for all persistence ids use the regular db config class PostgresScalaCurrentPersistenceIdsTest extends CurrentPersistenceIdsTest("postgres-shared-db-application.conf") with PostgresCleaner class MySQLScalaCurrentPersistenceIdsTest extends CurrentPersistenceIdsTest("mysql-shared-db-application.conf") with MysqlCleaner class OracleScalaCurrentPersistenceIdsTest extends CurrentPersistenceIdsTest("oracle-shared-db-application.conf") with OracleCleaner class SqlServerScalaCurrentPersistenceIdsTest extends CurrentPersistenceIdsTest("sqlserver-application.conf") with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/EventAdapterTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ EventAdapterTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } class PostgresScalaEventAdapterTest extends EventAdapterTest("postgres-application.conf") with PostgresCleaner class MySQLScalaEventAdapterTest extends EventAdapterTest("mysql-application.conf") with MysqlCleaner class OracleScalaEventAdapterTest extends EventAdapterTest("oracle-application.conf") with OracleCleaner class SqlServerScalaEventAdapterTest extends EventAdapterTest("sqlserver-application.conf") with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/EventSourcedCleanupTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2024 Lightbend Inc. */ package akka.persistence.jdbc.integration import akka.persistence.jdbc.cleanup.scaladsl.EventSourcedCleanupTest import akka.persistence.jdbc.query.{ MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } // Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config class PostgresEventSourcedCleanupTest extends EventSourcedCleanupTest("postgres-shared-db-application.conf") with PostgresCleaner class MySQLEventSourcedCleanupTest extends EventSourcedCleanupTest("mysql-shared-db-application.conf") with MysqlCleaner class OracleEventSourcedCleanupTest extends EventSourcedCleanupTest("oracle-shared-db-application.conf") with OracleCleaner class SqlServerEventSourcedCleanupTest extends EventSourcedCleanupTest("sqlserver-shared-db-application.conf") with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/EventsByPersistenceIdTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ EventsByPersistenceIdTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } class PostgresScalaEventsByPersistenceIdTest extends EventsByPersistenceIdTest("postgres-application.conf") with PostgresCleaner class MySQLScalaEventsByPersistenceIdTest extends EventsByPersistenceIdTest("mysql-application.conf") with MysqlCleaner class OracleScalaEventsByPersistenceIdTest extends EventsByPersistenceIdTest("oracle-application.conf") with OracleCleaner class SqlServerScalaEventsByPersistenceIdTest extends EventsByPersistenceIdTest("sqlserver-application.conf") with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/EventsByTagMigrationTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2024 Lightbend Inc. */ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ EventsByTagMigrationTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } class PostgresScalaEventsByTagMigrationTest extends EventsByTagMigrationTest("postgres-application.conf") with PostgresCleaner {} class MySQLScalaEventByTagMigrationTest extends EventsByTagMigrationTest("mysql-application.conf") with MysqlCleaner { override def dropLegacyFKConstraint(): Unit = dropConstraint(constraintType = "FOREIGN KEY", constraintDialect = "FOREIGN KEY") override def dropLegacyPKConstraint(): Unit = dropConstraint(constraintType = "PRIMARY KEY", constraintDialect = "", constraintNameDialect = "KEY") override def addNewPKConstraint(): Unit = addPKConstraint(constraintNameDialect = "") override def addNewFKConstraint(): Unit = addFKConstraint() override def migrateLegacyRows(): Unit = fillNewColumn( joinDialect = joinSQL, pidSetDialect = s"${tagTableCfg.tableName}.${tagTableCfg.columnNames.persistenceId} = ${journalTableName}.${journalTableCfg.columnNames.persistenceId}", seqNrSetDialect = s"${tagTableCfg.tableName}.${tagTableCfg.columnNames.sequenceNumber} = ${journalTableName}.${journalTableCfg.columnNames.sequenceNumber}") } class OracleScalaEventByTagMigrationTest extends EventsByTagMigrationTest("oracle-application.conf") with OracleCleaner { override def addNewColumn(): Unit = { // mock event_id not null, in order to change it to null later alterColumn(alterDialect = "MODIFY", changeToDialect = "NOT NULL") } override def dropLegacyFKConstraint(): Unit = dropConstraint(constraintTableName = "USER_CONSTRAINTS", constraintType = "R") override def dropLegacyPKConstraint(): Unit = dropConstraint(constraintTableName = "USER_CONSTRAINTS", constraintType = "P") override def migrateLegacyRows(): Unit = withStatement { stmt => stmt.execute(s"""UPDATE ${tagTableCfg.tableName} |SET (${tagTableCfg.columnNames.persistenceId}, ${tagTableCfg.columnNames.sequenceNumber}) = ( | SELECT ${journalTableCfg.columnNames.persistenceId}, ${journalTableCfg.columnNames.sequenceNumber} | ${fromSQL} |) |WHERE EXISTS ( | SELECT 1 | ${fromSQL} |)""".stripMargin) } } class SqlServerScalaEventByTagMigrationTest extends EventsByTagMigrationTest("sqlserver-application.conf") with SqlServerCleaner { override def addNewPKConstraint(): Unit = { // Change new column not null alterColumn(columnName = tagTableCfg.columnNames.persistenceId, changeToDialect = "NVARCHAR(255) NOT NULL") alterColumn(columnName = tagTableCfg.columnNames.sequenceNumber, changeToDialect = "NUMERIC(10,0) NOT NULL") super.addNewPKConstraint() } } ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/EventsByTagTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ EventsByTagTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } class PostgresScalaEventsByTagTest extends EventsByTagTest("postgres-application.conf") with PostgresCleaner class MySQLScalaEventByTagTest extends EventsByTagTest("mysql-application.conf") with MysqlCleaner class OracleScalaEventByTagTest extends EventsByTagTest("oracle-application.conf") with OracleCleaner class SqlServerScalaEventByTagTest extends EventsByTagTest("sqlserver-application.conf") with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/HardDeleteQueryTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ HardDeleteQueryTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } class PostgresHardDeleteQueryTest extends HardDeleteQueryTest("postgres-application.conf") with PostgresCleaner class MySQLHardDeleteQueryTest extends HardDeleteQueryTest("mysql-application.conf") with MysqlCleaner class OracleHardDeleteQueryTest extends HardDeleteQueryTest("oracle-application.conf") with OracleCleaner class SqlServerHardDeleteQueryTest extends HardDeleteQueryTest("sqlserver-application.conf") with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/JdbcJournalPerfSpec.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.journal.JdbcJournalPerfSpec import akka.persistence.jdbc.testkit.internal.MySQL import akka.persistence.jdbc.testkit.internal.Oracle import akka.persistence.jdbc.testkit.internal.Postgres import akka.persistence.jdbc.testkit.internal.SqlServer import com.typesafe.config.ConfigFactory class PostgresJournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load("postgres-application.conf"), Postgres) { override def eventsCount: Int = 100 } class PostgresJournalPerfSpecSharedDb extends JdbcJournalPerfSpec(ConfigFactory.load("postgres-shared-db-application.conf"), Postgres) { override def eventsCount: Int = 100 } class MySQLJournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load("mysql-application.conf"), MySQL) { override def eventsCount: Int = 100 } class MySQLJournalPerfSpecSharedDb extends JdbcJournalPerfSpec(ConfigFactory.load("mysql-shared-db-application.conf"), MySQL) { override def eventsCount: Int = 100 } class OracleJournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load("oracle-application.conf"), Oracle) { override def eventsCount: Int = 100 } class OracleJournalPerfSpecSharedDb extends JdbcJournalPerfSpec(ConfigFactory.load("oracle-shared-db-application.conf"), Oracle) { override def eventsCount: Int = 100 } class SqlServerJournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load("sqlserver-application.conf"), SqlServer) { override def eventsCount: Int = 100 } class SqlServerJournalPerfSpecSharedDb extends JdbcJournalPerfSpec(ConfigFactory.load("sqlserver-shared-db-application.conf"), SqlServer) { override def eventsCount: Int = 100 } ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/JdbcJournalSpec.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.journal.JdbcJournalSpec import akka.persistence.jdbc.testkit.internal.{ MySQL, Oracle, Postgres, SqlServer } import com.typesafe.config.ConfigFactory class PostgresJournalSpec extends JdbcJournalSpec(ConfigFactory.load("postgres-application.conf"), Postgres) class PostgresJournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load("postgres-shared-db-application.conf"), Postgres) class MySQLJournalSpec extends JdbcJournalSpec(ConfigFactory.load("mysql-application.conf"), MySQL) class MySQLJournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load("mysql-shared-db-application.conf"), MySQL) class OracleJournalSpec extends JdbcJournalSpec(ConfigFactory.load("oracle-application.conf"), Oracle) class OracleJournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load("oracle-shared-db-application.conf"), Oracle) class SqlServerJournalSpec extends JdbcJournalSpec(ConfigFactory.load("sqlserver-application.conf"), SqlServer) class SqlServerJournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load("sqlserver-shared-db-application.conf"), SqlServer) ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/JdbcSnapshotStoreSpec.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.snapshot.JdbcSnapshotStoreSpec import akka.persistence.jdbc.testkit.internal.MySQL import akka.persistence.jdbc.testkit.internal.Oracle import akka.persistence.jdbc.testkit.internal.Postgres import akka.persistence.jdbc.testkit.internal.SqlServer import com.typesafe.config.ConfigFactory class PostgresSnapshotStoreSpec extends JdbcSnapshotStoreSpec(ConfigFactory.load("postgres-application.conf"), Postgres) class MySQLSnapshotStoreSpec extends JdbcSnapshotStoreSpec(ConfigFactory.load("mysql-application.conf"), MySQL) class OracleSnapshotStoreSpec extends JdbcSnapshotStoreSpec(ConfigFactory.load("oracle-application.conf"), Oracle) class SqlServerSnapshotStoreSpec extends JdbcSnapshotStoreSpec(ConfigFactory.load("sqlserver-application.conf"), SqlServer) ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/JournalDaoStreamMessagesMemoryTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ JournalDaoStreamMessagesMemoryTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } class PostgresJournalDaoStreamMessagesMemoryTest extends JournalDaoStreamMessagesMemoryTest("postgres-application.conf") with PostgresCleaner class MySQLJournalDaoStreamMessagesMemoryTest extends JournalDaoStreamMessagesMemoryTest("mysql-application.conf") with MysqlCleaner class OracleJournalDaoStreamMessagesMemoryTest extends JournalDaoStreamMessagesMemoryTest("oracle-application.conf") with OracleCleaner class SqlServerJournalDaoStreamMessagesMemoryTest extends JournalDaoStreamMessagesMemoryTest("sqlserver-application.conf") with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/JournalSequenceActorTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.query.{ JournalSequenceActorTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner } class PostgresJournalSequenceActorTest extends JournalSequenceActorTest("postgres-application.conf", isOracle = false) with PostgresCleaner class MySQLJournalSequenceActorTest extends JournalSequenceActorTest("mysql-application.conf", isOracle = false) with MysqlCleaner class OracleJournalSequenceActorTest extends JournalSequenceActorTest("oracle-application.conf", isOracle = true) with OracleCleaner class SqlServerJournalSequenceActorTest extends JournalSequenceActorTest("sqlserver-application.conf", isOracle = false) with SqlServerCleaner ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/PostgresDurableStateStorePluginSpec.scala ================================================ package akka.persistence.jdbc.integration import com.typesafe.config.ConfigFactory import slick.jdbc.PostgresProfile import akka.persistence.jdbc.state.scaladsl.DurableStateStorePluginSpec class PostgresDurableStateStorePluginSpec extends DurableStateStorePluginSpec(ConfigFactory.load("postgres-shared-db-application.conf"), PostgresProfile) {} ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/PostgresScalaJdbcDurableStateChangesByTagTest.scala ================================================ package akka.persistence.jdbc.integration import com.typesafe.config.ConfigFactory import akka.actor.ActorSystem import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateSpec import akka.persistence.jdbc.testkit.internal.Postgres class PostgresScalaJdbcDurableStateStoreQueryTest extends JdbcDurableStateSpec(ConfigFactory.load("postgres-shared-db-application.conf"), Postgres) { implicit lazy val system: ActorSystem = ActorSystem("JdbcDurableStateSpec", config.withFallback(customSerializers)) } ================================================ FILE: integration/src/test/scala/akka/persistence/jdbc/integration/StoreOnlySerializableMessagesTest.scala ================================================ package akka.persistence.jdbc.integration import akka.persistence.jdbc.serialization.StoreOnlySerializableMessagesTest import akka.persistence.jdbc.testkit.internal.MySQL import akka.persistence.jdbc.testkit.internal.Oracle import akka.persistence.jdbc.testkit.internal.Postgres import akka.persistence.jdbc.testkit.internal.SqlServer class PostgresStoreOnlySerializableMessagesTest extends StoreOnlySerializableMessagesTest("postgres-application.conf", Postgres) class MySQLStoreOnlySerializableMessagesTest extends StoreOnlySerializableMessagesTest("mysql-application.conf", MySQL) class OracleStoreOnlySerializableMessagesTest extends StoreOnlySerializableMessagesTest("oracle-application.conf", Oracle) class SqlServerStoreOnlySerializableMessagesTest extends StoreOnlySerializableMessagesTest("sqlserver-application.conf", SqlServer) ================================================ FILE: migrator/src/main/scala/akka/persistence/jdbc/migrator/JournalMigrator.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.migrator import akka.Done import akka.actor.ActorSystem import akka.persistence.PersistentRepr import akka.persistence.jdbc.AkkaSerialization import akka.persistence.jdbc.config.{ JournalConfig, ReadJournalConfig } import akka.persistence.jdbc.db.SlickExtension import akka.persistence.jdbc.journal.dao.JournalQueries import akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalSerializer import akka.persistence.jdbc.journal.dao.JournalTables.{ JournalAkkaSerializationRow, TagRow } import akka.persistence.jdbc.query.dao.legacy.ReadJournalQueries import akka.serialization.{ Serialization, SerializationExtension } import akka.stream.scaladsl.Source import org.slf4j.{ Logger, LoggerFactory } import slick.jdbc._ import scala.concurrent.{ ExecutionContextExecutor, Future } import scala.util.{ Failure, Success } /** * This will help migrate the legacy journal data onto the new journal schema with the * appropriate serialization * * @param system the actor system */ final case class JournalMigrator(profile: JdbcProfile)(implicit system: ActorSystem) { implicit val ec: ExecutionContextExecutor = system.dispatcher import profile.api._ val log: Logger = LoggerFactory.getLogger(getClass) // get the various configurations private val journalConfig: JournalConfig = new JournalConfig( system.settings.config.getConfig(JournalMigrator.JournalConfig)) private val readJournalConfig: ReadJournalConfig = new ReadJournalConfig( system.settings.config.getConfig(JournalMigrator.ReadJournalConfig)) // the journal database private val journalDB: JdbcBackend.Database = SlickExtension(system).database(system.settings.config.getConfig(JournalMigrator.ReadJournalConfig)).database // get an instance of the new journal queries private val newJournalQueries: JournalQueries = new JournalQueries(profile, journalConfig.eventJournalTableConfiguration, journalConfig.eventTagTableConfiguration) // let us get the journal reader private val serialization: Serialization = SerializationExtension(system) private val legacyJournalQueries: ReadJournalQueries = new ReadJournalQueries(profile, readJournalConfig) private val serializer: ByteArrayJournalSerializer = new ByteArrayJournalSerializer(serialization, readJournalConfig.pluginConfig.tagSeparator) private val bufferSize: Int = journalConfig.daoConfig.bufferSize private val query = legacyJournalQueries.JournalTable.result .withStatementParameters( rsType = ResultSetType.ForwardOnly, rsConcurrency = ResultSetConcurrency.ReadOnly, fetchSize = bufferSize) .transactionally /** * write all legacy events into the new journal tables applying the proper serialization */ def migrate(): Future[Done] = Source .fromPublisher(journalDB.stream(query)) .via(serializer.deserializeFlow) .map { case Success((repr, tags, ordering)) => (repr, tags, ordering) case Failure(exception) => throw exception // blow-up on failure } .map { case (repr, tags, ordering) => serialize(repr, tags, ordering) } // get pages of many records at once .grouped(bufferSize) .mapAsync(1)(records => { val stmt: DBIO[Unit] = records // get all the sql statements for this record as an option .map { case (newRepr, newTags) => log.debug(s"migrating event for PersistenceID: ${newRepr.persistenceId} with tags ${newTags.mkString(",")}") writeJournalRowsStatements(newRepr, newTags) } // reduce to 1 statement .foldLeft[DBIO[Unit]](DBIO.successful[Unit] {})((priorStmt, nextStmt) => { priorStmt.andThen(nextStmt) }) journalDB.run(stmt) }) .run() /** * serialize the PersistentRepr and construct a JournalAkkaSerializationRow and set of matching tags * * @param repr the PersistentRepr * @param tags the tags * @param ordering the ordering of the PersistentRepr * @return the tuple of JournalAkkaSerializationRow and set of tags */ private def serialize( repr: PersistentRepr, tags: Set[String], ordering: Long): (JournalAkkaSerializationRow, Set[String]) = { val serializedPayload: AkkaSerialization.AkkaSerialized = AkkaSerialization.serialize(serialization, repr.payload).get val serializedMetadata: Option[AkkaSerialization.AkkaSerialized] = repr.metadata.flatMap(m => AkkaSerialization.serialize(serialization, m).toOption) val row: JournalAkkaSerializationRow = JournalAkkaSerializationRow( ordering, repr.deleted, repr.persistenceId, repr.sequenceNr, repr.writerUuid, repr.timestamp, repr.manifest, serializedPayload.payload, serializedPayload.serId, serializedPayload.serManifest, serializedMetadata.map(_.payload), serializedMetadata.map(_.serId), serializedMetadata.map(_.serManifest)) (row, tags) } private def writeJournalRowsStatements( journalSerializedRow: JournalAkkaSerializationRow, tags: Set[String]): DBIO[Unit] = { val journalInsert: DBIO[Long] = newJournalQueries.JournalTable .returning(newJournalQueries.JournalTable.map(_.ordering)) .forceInsert(journalSerializedRow) val tagInserts = newJournalQueries.TagTable ++= tags .map(tag => TagRow( Some(journalSerializedRow.ordering), // legacy tag key enabled by default. Some(journalSerializedRow.persistenceId), Some(journalSerializedRow.sequenceNumber), tag)) .toSeq journalInsert.flatMap(_ => tagInserts.asInstanceOf[DBIO[Unit]]) } } case object JournalMigrator { final val JournalConfig: String = "jdbc-journal" final val ReadJournalConfig: String = "jdbc-read-journal" } ================================================ FILE: migrator/src/main/scala/akka/persistence/jdbc/migrator/SnapshotMigrator.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.migrator import akka.actor.ActorSystem import akka.persistence.SnapshotMetadata import akka.persistence.jdbc.config.{ ReadJournalConfig, SnapshotConfig } import akka.persistence.jdbc.db.SlickExtension import akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao import akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao import akka.persistence.jdbc.snapshot.dao.legacy.{ ByteArraySnapshotSerializer, SnapshotQueries } import akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.SnapshotRow import akka.serialization.{ Serialization, SerializationExtension } import akka.stream.scaladsl.{ Sink, Source } import akka.Done import akka.persistence.jdbc.migrator.SnapshotMigrator.{ NoParallelism, SnapshotStoreConfig } import org.slf4j.{ Logger, LoggerFactory } import slick.jdbc import slick.jdbc.{ JdbcBackend, JdbcProfile } import scala.concurrent.Future /** * This will help migrate the legacy snapshot data onto the new snapshot schema with the * appropriate serialization * * @param system the actor system */ case class SnapshotMigrator(profile: JdbcProfile)(implicit system: ActorSystem) { val log: Logger = LoggerFactory.getLogger(getClass) import system.dispatcher import profile.api._ private val snapshotConfig: SnapshotConfig = new SnapshotConfig(system.settings.config.getConfig(SnapshotStoreConfig)) private val readJournalConfig: ReadJournalConfig = new ReadJournalConfig( system.settings.config.getConfig(JournalMigrator.ReadJournalConfig)) private val snapshotDB: jdbc.JdbcBackend.Database = SlickExtension(system).database(system.settings.config.getConfig(SnapshotStoreConfig)).database private val journalDB: JdbcBackend.Database = SlickExtension(system).database(system.settings.config.getConfig(JournalMigrator.ReadJournalConfig)).database private val serialization: Serialization = SerializationExtension(system) private val queries: SnapshotQueries = new SnapshotQueries(profile, snapshotConfig.legacySnapshotTableConfiguration) private val serializer: ByteArraySnapshotSerializer = new ByteArraySnapshotSerializer(serialization) // get the instance if the default snapshot dao private val defaultSnapshotDao: DefaultSnapshotDao = new DefaultSnapshotDao(snapshotDB, profile, snapshotConfig, serialization) // get the instance of the legacy journal DAO private val legacyJournalDao: ByteArrayReadJournalDao = new ByteArrayReadJournalDao(journalDB, profile, readJournalConfig, SerializationExtension(system)) private def toSnapshotData(row: SnapshotRow): (SnapshotMetadata, Any) = serializer.deserialize(row).get /** * migrate the latest snapshot data */ def migrateLatest(): Future[Done] = { legacyJournalDao .allPersistenceIdsSource(Long.MaxValue) .mapAsync(NoParallelism) { persistenceId => // let us fetch the latest snapshot for each persistenceId snapshotDB.run(queries.selectLatestByPersistenceId(persistenceId).result).map { rows => rows.headOption.map(toSnapshotData).map { case (metadata, value) => log.debug(s"migrating snapshot for ${metadata.toString}") defaultSnapshotDao.save(metadata, value) } } } .runWith(Sink.ignore) } /** * migrate all the legacy snapshot schema data into the new snapshot schema */ def migrateAll(): Future[Done] = Source .fromPublisher(snapshotDB.stream(queries.SnapshotTable.result)) .mapAsync(NoParallelism) { record => val (metadata, value) = toSnapshotData(record) log.debug(s"migrating snapshot for ${metadata.toString}") defaultSnapshotDao.save(metadata, value) } .run() } case object SnapshotMigrator { final val SnapshotStoreConfig: String = "jdbc-snapshot-store" final val NoParallelism: Int = 1 } ================================================ FILE: migrator/src/test/LICENSE ================================================ LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT THIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS "AGREEMENT") IS A LEGAL AGREEMENT BETWEEN YOU ("USER") AND LIGHTBEND, INC. ("LICENSOR"). BY CLICKING THE "I ACCEPT" BUTTON, OR INSTALLING, COPYING OR OTHERWISE USING LIGHTBEND’S SOFTWARE (THE “SOFTWARE”) AND ITS ASSOCIATED USER DOCUMENTATION, USER ACKNOWLEDGES THAT USER HAS REVIEWED AND ACCEPTS THIS AGREEMENT AND AGREES TO BE BOUND BY ALL OF ITS TERMS. IF YOU ARE AGREEING TO THIS AGREEMENT AS AN INDIVIDUAL, “USER” REFERS TO YOU INDIVIDUALLY. IF YOU ARE AGREEING TO THIS AGREEMENT AS A REPRESENTATIVE OF AN ENTITY, YOU REPRESENT THAT YOU HAVE THE AUTHORITY TO BIND THAT ENTITY AND “USER” REFERS TO THAT ENTITY AND ALL THE USERS ACCESSING THE SOFTWARE BY, THROUGH OR ON BEHALF OF THAT ENTITY. IF USER DOES NOT AGREE WITH ALL OF THE TERMS OF THIS AGREEMENT, DO NOT INSTALL, COPY OR OTHERWISE USE THE SOFTWARE OR ITS DOCUMENTATION. 1. DEFINITIONS. 1. “User System” means User’s website(s), computers, servers and other equipment and software upon and with which the Software is run. 2. “Documentation” means the user instructions and help files made available by Licensor for use with the Software, as may be updated from time to time by Licensor. 3. “Intellectual Property Rights” means all intellectual property rights or similar proprietary rights, including (a) patent rights and utility models, (b) copyrights and database rights, (c) trademarks, trade names, domain names and trade dress and the goodwill associated therewith, (d) trade secrets, (e) mask works, and (f) industrial design rights; in each case, including any registrations of, applications to register, and renewals and extensions of, any of the foregoing in any jurisdiction in the world. 4. “Open Source Software” means all software that is available under the GNU Affero General Public License (AGPL), GNU General Public License (GPL), GNU Lesser General Public License (LGPL), Mozilla Public License (MPL), Apache License, BSD licenses, or any other license that is approved by or similar to those approved by the Open Source Initiative (www.opensource.org). 2. LICENSES AND RESTRICTIONS. 1. License. Subject to User’s compliance with the terms and conditions of this Agreement, Licensor hereby grants to User, during the term of this Agreement, a limited, non-exclusive, non-transferable and non-sublicensable right to (i) install and execute one (1) copy of the Software in accordance with the Documentation, solely in binary form, and not for the benefit of any other person or entity, and (ii) access and use the Documentation, solely for User’s own internally purposes in support of End User’s permitted use of the Software. 2. Restrictions. User shall not, directly or indirectly, or permit any User or third party to: (a) reverse engineer, decompile, disassemble or otherwise attempt to discover the source code or underlying ideas or algorithms of the Software; (b) modify, translate, or create derivative works based on any element of the Software or any related Documentation (except to the extent applicable laws specifically prohibit such restriction for interoperability purposes, in which case you agree to first contact Licensor and provide Licensor an opportunity to create such changes as are needed for interoperability purposes); (c) use, rent, lease, distribute, sell, resell, assign, or otherwise transfer the Software or any copy thereof; (d) use the Software for timesharing purposes or otherwise for the benefit of any person or entity other than for the benefit of User and Users; (e) remove any proprietary notices from the Software or the Documentation or attempt to defeat any copy protection device included with the Software; or (f) use the Software for any purpose other than its intended purpose. 3. Reservation of Rights. Nothing in this Agreement shall be construed to give User a right to use or otherwise obtain access to any source code from which the Software is compiled or interpreted. Except as expressly granted in this Agreement, there are no other licenses granted to User, express, implied or by way of estoppel. All rights not granted in this Agreement are reserved by Licensor. 4. Open Source Software. Notwithstanding the foregoing, certain items of software included with the Software are Open Source Software and remains subject Open Source Software licenses. Such Open Source Software is not subject to the terms and conditions of this Agreement. Instead, each such item of Open Source Software is licensed under the terms of the end user license that accompanies such Open Source Software and nothing in this Agreement limits your rights under, or grants you rights that supersede, the terms and conditions of any applicable end user license for such Open Source Software. If required by any license for particular Open Source Software, Licensor makes such Open Source Software, and any Licensor modifications to that Open Source Software, available as further described in the Documentation. USE OF THE SOFTWARE IN ANY MANNER OTHER THAN AS PROVIDED IN THIS AGREEMENT IS STRICTLY PROHIBITED AND MAY INFRINGE ON THE INTELLECTUAL PROPERTY RIGHTS OF LICENSOR AND/OR ITS LICENSOR(S), SUBJECTING USER TO CIVIL AND CRIMINAL PENALTIES, INCLUDING WITHOUT LIMITATION MONETARY DAMAGES AND IMPRISONMENT FOR COPYRIGHT INFRINGEMENT. 3. USER OBLIGATIONS. 1. User System. User is responsible for (a) obtaining, deploying and maintaining the User System, and all computer hardware, software, modems, routers and other communications equipment necessary for User and its Users to install and use the Software; and (b) paying all third party fees and access charges incurred in connection with the foregoing. Licensor shall not be responsible for supplying any hardware, software or other equipment to User under this Agreement. 2. Compliance with Laws. User agrees to use the Software in compliance with all applicable laws, including local laws of the country or region in which User resides, and in compliance with all United States export laws and regulations. User shall not use the Software for any purpose prohibited by applicable law. 3. Trademarks and Tradenames. With regard to all copies of the Software permitted herein, User shall reproduce on such copies all Licensor copyright notices, and other proprietary notices appearing on and in the original copy of the software received from Licensor. Except as set forth in the foregoing sentence, User will not, during the term of this Agreement or thereafter, use any trademark of Licensor, or any word and/or symbol likely to be confused with any Licensor trademark, either alone or in any combination with other words and/or symbols. 4. SUPPORT AND MAINTENANCE. 1. Support. Licensor is not responsible for maintenance or support of the Software, or the equipment on which the Software resides or is used, under this Agreement. By accepting the license granted under this Agreement, User agrees that Licensor will be under no obligation to provide any support, maintenance or service in connection with the Software or such equipment. 2. Upgrades and Updates. Licensor may from time to time in its sole discretion develop and provide updates for the Software, which may include upgrades, bug fixes, patches, other error corrections, and/or new features (collectively, including related documentation, “Updates”). Updates may also modify or delete in their entirety certain features and functionality. You agree that Licensor has no obligation to provide any Updates or to continue to provide or enable any particular features or functionality. 5. REPRESENTATIONS AND WARRANTIES; DISCLAIMER. 1. Mutual Representations and Warranties. Each party represents, warrants and covenants that: (a) it has the full power and authority to enter into this Agreement and to perform its obligations hereunder, without the need for any consents, approvals or immunities not yet obtained; and (b) its acceptance of and performance under this Agreement shall not breach any oral or written agreement with any third party or any obligation owed by it to any third party to keep any information or materials in confidence or in trust. 2. Disclaimer. EXCEPT FOR THE WARRANTIES SET FORTH IN THIS SECTION 5, THE SOFTWARE AND THE DOCUMENTATION ARE PROVIDED ON AN AS-IS BASIS. USER’S USE OF THE SOFTWARE AND THE DOCUMENTATION IS AT USER’S OWN RISK. LICENSOR DOES NOT MAKE, AND HEREBY DISCLAIMS, ANY AND ALL OTHER EXPRESS, STATUTORY AND IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE, QUALITY, SUITABILITY, OPERABILITY, CONDITION, SYSTEM INTEGRATION, NON-INTERFERENCE, WORKMANSHIP, TRUTH, ACCURACY (OF DATA OR ANY OTHER INFORMATION OR CONTENT), ABSENCE OF DEFECTS, WHETHER LATENT OR PATENT, AND ANY WARRANTIES ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE. LICENSOR ALSO DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN, PERFORMED AND/OR PROVIDED BY THE SOFTWARE WILL MEET USER’S REQUIREMENTS, THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, THAT THE SOFTWARE WILL BE COMPATIBLE OR WORK WITH ANY THIRD-PARTY SOFTWARE, APPLICATIONS OR DEVICES, OR THAT DEFECTS IN THE SOFTWARE WILL BE CORRECTED. USER EXPRESSLY ACKNOWLEDGES AND AGREES THAT, TO THE EXTENT PERMITTED BY APPLICABLE LAW, ITS USE OF THE SOFTWARE IS AT ITS SOLE RISK AND THAT THE ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT IS WITH USER. USER FURTHER ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS NOT INTENDED OR SUITABLE FOR USE IN SITUATIONS OR ENVIRONMENTS WHERE THE FAILURE OR TIME DELAYS OF, OR ERRORS OR INACCURACIES IN THE CONTENT, DATA OR INFORMATION PROVIDED BY THE SOFTWARE COULD LEAD TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. ANY WARRANTIES MADE BY LICENSOR ARE FOR THE BENEFIT OF USER ONLY AND NOT FOR THE BENEFIT OF ANY THIRD PARTY. THE SOFTWARE AND THE DOCUMENTATION ARE LICENSED AND NOT SOLD. NO AGENT OF LICENSOR IS AUTHORIZED TO ALTER OR EXPAND THE WARRANTIES OF LICENSOR AS SET FORTH HEREIN. 6. INDEMNIFICATION. User shall defend Licensor and its licensors and their respective officers, directors and employees (“Licensor Indemnified Parties”) from and against any and all Third-Party Claims which arise out of or relate to: (a) User’s use or alleged use of the Software other than as permitted under this Agreement; or (b) arising out of or relating to any violation of Section 2.2, or any violation of applicable laws. User shall pay all damages, costs and expenses, including attorneys’ fees and costs (whether by settlement or award of by a final judicial judgment) incurred by the Licensor Indemnified Parties from any such Third-Party Claim. In no event shall Licensor settle any claim without User’s prior written approval. Licensor may, at its own expense, engage separate counsel to advise Licensor regarding a Third-Party Claim and to participate in the defense of the claim, subject to User’s right to control the defense and settlement. If you are a New Jersey resident, this indemnification clause is to be only as broad and inclusive as is permitted by the law of the state of New Jersey. 7. CONFIDENTIALITY. 1. Confidential Information. User acknowledges that the Software contains valuable proprietary information and trade secrets and that unauthorized or improper use of the Software will result in irreparable harm to Licensor for which monetary damages would be inadequate and for which Licensor may be entitled to immediate injunctive relief. Accordingly, you will maintain the confidentiality of the proprietary information and not sell, license, publish, display, distribute, disclose or otherwise make available such proprietary information to any third party, nor use such information except as authorized by this Agreement. 2. Injunctive Relief. User agrees that any unauthorized disclosure of confidential information may cause immediate and irreparable injury to Licensor and that, in the event of such breach, Licensor will be entitled, in addition to any other available remedies, to seek immediate injunctive and other equitable relief, without bond and without the necessity of showing actual monetary damages. 8. PROPRIETARY RIGHTS. 1. Licensor. As between Licensor and User, all right, title and interest in the Software, the Documentation, and any other Licensor materials furnished or made available hereunder, and all modifications and enhancements thereof, and all suggestions, ideas and feedback proposed by User regarding the Software, including all copyright rights, patent rights and other Intellectual Property Rights in each of the foregoing, belong to and are retained solely by Licensor or Licensor’s licensors and providers, as applicable. User hereby does and will irrevocably assign to Licensor all evaluations, ideas, feedback and suggestions made by User to Licensor regarding the Software or the Documentation (collectively, “Feedback”) and all Intellectual Property Rights in the Feedback. 9. LIMITATION OF LIABILITY. 1. No Consequential Damages. NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR ANY DAMAGES FOR LOST DATA, BUSINESS INTERRUPTION, LOST PROFITS, LOST REVENUE OR LOST BUSINESS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT, EVEN IF LICENSOR OR ITS LICENSORS OR USER HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, INCLUDING WITHOUT LIMITATION, ANY SUCH DAMAGES ARISING OUT OF THE LICENSING, PROVISION OR USE OF THE SOFTWARE OR THE RESULTS OF THE USE OF THE SOFTWARE. LICENSOR WILL NOT BE LIABLE FOR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES. 2. LIMITS ON LIABILITY. NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR CUMULATIVE, AGGREGATE DAMAGES GREATER THAN FIVE HUNDRED DOLLARS (US $500). 3. ESSENTIAL PURPOSE. USER ACKNOWLEDGES THAT THE TERMS IN THIS SECTION 9 (LIMITATION OF LIABILITY) SHALL APPLY TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW AND SHALL APPLY EVEN IF AN EXCLUSIVE OR LIMITED REMEDY STATED HEREIN FAILS OF ITS ESSENTIAL PURPOSE. SOME JURISDICTIONS DO NOT ALLOW CERTAIN LIMITATIONS OF LIABILITY, SO SOME OR ALL OF THE ABOVE LIMITATIONS OF LIABILITY MAY NOT APPLY TO YOU. IF YOU ARE A NEW JERSEY RESIDENT, THIS LIMITATION OF LIABILITY SECTION IS TO BE ONLY AS BROAD AND INCLUSIVE AS IS PERMITTED BY THE LAW OF THE STATE OF NEW JERSEY. 10. TERM AND TERMINATION. 1. Term. This Agreement and User’s right to use the Software commences on earlier of the date that User: (a) installs the Software, (b) begins using the Software or (c) otherwise demonstrates assent to this Agreement. User’s right to use the Software shall continue until such time as this Agreement is terminated (the “Term”). 2. Termination for Cause. A party may terminate this Agreement, upon written notice to the other party in the event the other party files a petition for bankruptcy or has a petition for bankruptcy filed against it that is not dismissed within sixty (60) calendar days after filing or admits its inability to pay its debts as they mature, makes an assignment for the benefit of its creditors or ceases to function as a going concern or to conduct its operations in the normal course of business and such termination shall occur immediately upon notice. Licensor may terminate this Agreement at any time without notice if it ceases to support the Software, which Licensor may do in its sole discretion. In addition, this Agreement will terminate immediately and automatically without any notice if User breaches any of its terms and conditions. 3. Termination for Convenience. Either party may terminate this Agreement for convenience on at least thirty (30) calendar days prior written notice to the other party. User may also terminate this Agreement by ceasing all use of the Software. 4. Effects of Termination. Upon expiration or termination of this Agreement, User’s shall cease all use of the Software and the Documentation and shall destroy all copies of the Software in User’s possession or control. 5. Survival. This Section and Sections 1, 2.2 (Restrictions), 2.3 (Reservation of Rights), 2.4 (Open Source Software), 5.2 (Disclaimer), 6 (Indemnification), 7 (Confidentiality), 8 (Proprietary Rights), 9 (Limitation of Liability), 10.4 (Effects of Termination) and 11 (Miscellaneous) shall survive any termination or expiration of this Agreement. 11. MISCELLANEOUS. 1. Notices. Licensor may give notice to User by means of a general notice through electronic mail to User’s e-mail address, or by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service to User’s address on record with Licensor. User may give notice to Licensor by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service addressed to Licensor, Lightbend Inc., 580 California, #1231, San Francisco, CA 94104, Attention: User Support. Notice shall be deemed to have been given upon receipt or, if earlier, two (2) business days after mailing, as applicable. All communications and notices to be made or given pursuant to this Agreement shall be in the English language. 2. Governing Law. This Agreement and the rights and obligations of the parties to and under this agreement shall be governed by and construed under the laws of the United States and the State of California as applied to agreements entered into and to be performed in such State without giving effect to conflicts of laws rules or principles. The parties agree that the United Nations Convention on Contracts for the International Sale of Goods is specifically excluded from application to this Agreement and that the application of the Uniform Computer Information Transactions Act (UCITA) is specifically disclaimed. Any dispute arising out of or in connection with this Agreement, including but not limited to any question regarding its existence, interpretation, validity, performance, or termination, or any dispute between the parties arising from the parties' relationship created by this Agreement, shall be referred to and finally resolved by arbitration administered by the American Arbitration Association under its rules. The number of arbitrators shall be one (1). The parties shall endeavor to agree upon the sole arbitrator and jointly nominate the arbitrator. If the parties cannot agree upon the sole arbitrator within a time prescribed by AAA, the parties shall request the AAA to propose five (5) arbitrators and each party shall rank the proposed arbitrators. The AAA shall appoint an arbitrator from the list of five (5), based upon the parties' rankings. 3. U.S. Government Users. If User is a Federal Government entity, Licensor provides the Software and the Documentation, including related software and technology, for ultimate Federal Government end use solely in accordance with the following: Government technical data rights include only those rights customarily provided to the public with a commercial item or process and Government software rights related to the Software and the Documentation include only those rights customarily provided to the public, as defined in this Agreement. The technical data rights and customary commercial software license is provided in accordance with FAR 12.211 (Technical Data) and FAR 12.212 (Software) and, for Department of Defense transactions, DFAR 252.227-7015 (Technical Data – Commercial Items) and DFAR 227.7202-3 (Rights in Commercial Computer Software or Computer Software Documentation). If greater rights are needed, a mutually acceptable written addendum specifically conveying such rights must be included in this Agreement. 4. Export. The Software utilizes software and technology that may be subject to United States and foreign export controls. User acknowledges and agrees that the Software shall not be used, and none of the underlying information, software, or technology may be transferred or otherwise exported or re-exported to countries as to which the United States maintains an embargo (collectively, “Embargoed Countries”), or to or by a national or resident thereof, or any person or entity on the U.S. Department of Treasury’s List of Specially Designated Nationals or the U.S. Department of Commerce’s Table of Denial Orders (collectively, “Designated Nationals”). The lists of Embargoed Countries and Designated Nationals are subject to change without notice. By using the Software, User represents and warrants that it is not located in, under the control of, or a national or resident of an Embargoed Country or Designated National. The Software may use encryption technology that is subject to licensing requirements under the U.S. Export Administration Regulations, 15 C.F.R. Parts 730-774 and Council Regulation (EC) No. 1334/2000. User agrees to comply strictly with all applicable export laws and assume sole responsibility for obtaining licenses to export or re-export as may be required. Licensor and its licensors make no representation that the Software is appropriate or available for use in other locations. By using the Software, User represents and warrants that it is not located in any such country or on any such list. 5. General. User shall not assign its rights hereunder, or delegate the performance of any of its duties or obligations hereunder, whether by merger, acquisition, sale of assets, operation of law, or otherwise, without the prior written consent of Licensor. Any purported assignment in violation of the preceding sentence is null and void. Subject to the foregoing, this Agreement shall be binding upon, and inure to the benefit of, the successors and assigns of the parties thereto. Except as otherwise specified in this Agreement, this Agreement may be amended or supplemented only by a writing that refers explicitly to this Agreement and that is signed on behalf of both parties. No waiver will be implied from conduct or failure to enforce rights. No waiver will be effective unless in a writing signed on behalf of the party against whom the waiver is asserted. If any of this Agreement is found invalid or unenforceable that term will be enforced to the maximum extent permitted by law and the remainder of this Agreement will remain in full force. Nothing contained herein shall be construed as creating an agency, partnership, or other form of joint enterprise between the parties. This Agreement constitutes the entire agreement between the parties relating to this subject matter and supersedes all prior or simultaneous understandings, representations, discussions, negotiations, and agreements, whether written or oral. Neither party shall be liable to the other party or any third party for failure or delay in performing its obligations under this Agreement when such failure or delay is due to any cause beyond the control of the party concerned, including, without limitation, force majeure, governmental orders or restrictions, fire, or flood, provided that upon cessation of such events such party shall thereupon promptly perform or complete the performance of its obligations hereunder. ================================================ FILE: migrator/src/test/resources/general.conf ================================================ # Copyright (C) 2019 - 2022 Lightbend Inc. # // This file contains the general settings which are shared in all akka-persistence-jdbc tests akka { stdout-loglevel = off // defaults to WARNING can be disabled with off. The stdout-loglevel is only in effect during system startup and shutdown log-dead-letters-during-shutdown = on loglevel = debug log-dead-letters = on log-config-on-start = off // Log the complete configuration at INFO level when the actor system is started loggers = ["akka.event.slf4j.Slf4jLogger"] logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" actor { // Required until https://github.com/akka/akka/pull/28333 is available allow-java-serialization = on debug { receive = on // log all messages sent to an actor if that actors receive method is a LoggingReceive autoreceive = off // log all special messages like Kill, PoisoffPill etc sent to all actors lifecycle = off // log all actor lifecycle events of all actors fsm = off // enable logging of all events, transitioffs and timers of FSM Actors that extend LoggingFSM event-stream = off // enable logging of subscriptions (subscribe/unsubscribe) on the ActorSystem.eventStream } } } docker { host = "localhost" host = ${?VM_HOST} } jdbc-journal { event-adapters { event-adapter = "akka.persistence.jdbc.migrator.MigratorSpec$AccountEventAdapter" } event-adapter-bindings { "akka.persistence.jdbc.migrator.MigratorSpec$AccountEvent" = event-adapter } } // Default configurations of legacy and non-legacy snapshot tables are both set with the same name (tableName = "snapshot"); So we have to distinguish them with a different name jdbc-snapshot-store.tables.legacy_snapshot.tableName = "legacy_snapshot" slick.db.idleTimeout = 10000 // 10 seconds ================================================ FILE: migrator/src/test/resources/h2-application.conf ================================================ # Copyright (C) 2019 - 2022 Lightbend Inc. // general.conf is included only for shared settings used for the akka-persistence-jdbc tests include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" } snapshot-store { plugin = "jdbc-snapshot-store" } } } jdbc-journal { slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { slick = ${slick} } slick { profile = "slick.jdbc.H2Profile$" db { url = "jdbc:h2:mem:test-database;DATABASE_TO_UPPER=false;" user = "root" password = "root" driver = "org.h2.Driver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: migrator/src/test/resources/mysql-application.conf ================================================ # Copyright (C) 2019 - 2022 Lightbend Inc. // general.conf is included only for shared settings used for the akka-persistence-jdbc tests include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } jdbc-journal { slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { slick = ${slick} } slick { profile = "slick.jdbc.MySQLProfile$" db { host = ${docker.host} host = ${?DB_HOST} url = "jdbc:mysql://"${slick.db.host}":3306/docker?cachePrepStmts=true&cacheCallableStmts=true&cacheServerConfiguration=true&useLocalSessionState=true&elideSetAutoCommits=true&alwaysSendSetIsolation=false&enableQueryTimeouts=false&connectionAttributes=none&verifyServerCertificate=false&useSSL=false&allowPublicKeyRetrieval=true&useUnicode=true&useLegacyDatetimeCode=false&serverTimezone=UTC&rewriteBatchedStatements=true" user = "root" password = "root" driver = "com.mysql.cj.jdbc.Driver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: migrator/src/test/resources/oracle-application.conf ================================================ # Copyright (C) 2019 - 2022 Lightbend Inc. // general.conf is included only for shared settings used for the akka-persistence-jdbc tests include "general.conf" include "oracle-schema-overrides.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } jdbc-journal { slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { slick = ${slick} } slick { profile = "slick.jdbc.OracleProfile$" db { host = ${docker.host} host = ${?DB_HOST} url = "jdbc:oracle:thin:@//"${slick.db.host}":1521/FREEPDB1" user = "system" password = "oracle" driver = "oracle.jdbc.OracleDriver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: migrator/src/test/resources/postgres-application.conf ================================================ # Copyright (C) 2019 - 2022 Lightbend Inc. // general.conf is included only for shared settings used for the akka-persistence-jdbc tests include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } jdbc-journal { slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { slick = ${slick} } slick { profile = "slick.jdbc.PostgresProfile$" db { host = "localhost" host = ${?DB_HOST} url = "jdbc:postgresql://"${slick.db.host}":5432/docker?reWriteBatchedInserts=true" user = "docker" password = "docker" driver = "org.postgresql.Driver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: migrator/src/test/resources/schema/h2/h2-create-schema-legacy.sql ================================================ CREATE TABLE IF NOT EXISTS PUBLIC."journal" ( "ordering" BIGINT AUTO_INCREMENT, "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" BIGINT NOT NULL, "deleted" BOOLEAN DEFAULT FALSE NOT NULL, "tags" VARCHAR(255) DEFAULT NULL, "message" BYTEA NOT NULL, PRIMARY KEY("persistence_id", "sequence_number") ); CREATE UNIQUE INDEX IF NOT EXISTS "journal_ordering_idx" ON PUBLIC."journal"("ordering"); CREATE TABLE IF NOT EXISTS PUBLIC."legacy_snapshot" ( "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" BIGINT NOT NULL, "created" BIGINT NOT NULL, "snapshot" BYTEA NOT NULL, PRIMARY KEY("persistence_id", "sequence_number") ); CREATE TABLE IF NOT EXISTS "durable_state" ( "global_offset" BIGINT NOT NULL AUTO_INCREMENT, "persistence_id" VARCHAR(255) NOT NULL, "revision" BIGINT NOT NULL, "state_payload" BLOB NOT NULL, "state_serial_id" INTEGER NOT NULL, "state_serial_manifest" VARCHAR, "tag" VARCHAR, "state_timestamp" BIGINT NOT NULL, PRIMARY KEY("persistence_id") ); CREATE INDEX "state_tag_idx" on "durable_state" ("tag"); CREATE INDEX "state_global_offset_idx" on "durable_state" ("global_offset"); ================================================ FILE: migrator/src/test/resources/schema/h2/h2-create-schema.sql ================================================ CREATE TABLE IF NOT EXISTS "event_journal" ( "ordering" BIGINT UNIQUE NOT NULL AUTO_INCREMENT, "deleted" BOOLEAN DEFAULT false NOT NULL, "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" BIGINT NOT NULL, "writer" VARCHAR NOT NULL, "write_timestamp" BIGINT NOT NULL, "adapter_manifest" VARCHAR NOT NULL, "event_payload" BLOB NOT NULL, "event_ser_id" INTEGER NOT NULL, "event_ser_manifest" VARCHAR NOT NULL, "meta_payload" BLOB, "meta_ser_id" INTEGER, "meta_ser_manifest" VARCHAR, PRIMARY KEY("persistence_id","sequence_number") ); CREATE UNIQUE INDEX "event_journal_ordering_idx" on "event_journal" ("ordering"); CREATE TABLE IF NOT EXISTS "event_tag" ( "event_id" BIGINT, "persistence_id" VARCHAR(255), "sequence_number" BIGINT, "tag" VARCHAR NOT NULL, PRIMARY KEY("persistence_id", "sequence_number", "tag"), CONSTRAINT fk_event_journal FOREIGN KEY("persistence_id", "sequence_number") REFERENCES "event_journal"("persistence_id", "sequence_number") ON DELETE CASCADE ); CREATE TABLE IF NOT EXISTS "snapshot" ( "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" BIGINT NOT NULL, "created" BIGINT NOT NULL,"snapshot_ser_id" INTEGER NOT NULL, "snapshot_ser_manifest" VARCHAR NOT NULL, "snapshot_payload" BLOB NOT NULL, "meta_ser_id" INTEGER, "meta_ser_manifest" VARCHAR, "meta_payload" BLOB, PRIMARY KEY("persistence_id","sequence_number") ); CREATE SEQUENCE IF NOT EXISTS "global_offset_seq"; CREATE TABLE IF NOT EXISTS "durable_state" ( "global_offset" BIGINT DEFAULT NEXT VALUE FOR "global_offset_seq", "persistence_id" VARCHAR(255) NOT NULL, "revision" BIGINT NOT NULL, "state_payload" BLOB NOT NULL, "state_serial_id" INTEGER NOT NULL, "state_serial_manifest" VARCHAR, "tag" VARCHAR, "state_timestamp" BIGINT NOT NULL, PRIMARY KEY("persistence_id") ); CREATE INDEX IF NOT EXISTS "state_tag_idx" on "durable_state" ("tag"); CREATE INDEX IF NOT EXISTS "state_global_offset_idx" on "durable_state" ("global_offset"); ================================================ FILE: migrator/src/test/resources/schema/h2/h2-drop-schema-legacy.sql ================================================ DROP TABLE IF EXISTS PUBLIC."journal"; DROP TABLE IF EXISTS PUBLIC."legacy_snapshot"; DROP TABLE IF EXISTS PUBLIC."durable_state"; ================================================ FILE: migrator/src/test/resources/schema/h2/h2-drop-schema.sql ================================================ DROP TABLE IF EXISTS PUBLIC."event_tag"; DROP TABLE IF EXISTS PUBLIC."event_journal"; DROP TABLE IF EXISTS PUBLIC."snapshot"; DROP TABLE IF EXISTS PUBLIC."durable_state"; DROP SEQUENCE IF EXISTS PUBLIC."global_offset_seq"; ================================================ FILE: migrator/src/test/resources/schema/mysql/mysql-create-schema-legacy.sql ================================================ CREATE TABLE IF NOT EXISTS journal ( ordering SERIAL, persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, deleted BOOLEAN DEFAULT FALSE NOT NULL, tags VARCHAR(255) DEFAULT NULL, message BLOB NOT NULL, PRIMARY KEY(persistence_id, sequence_number) ); CREATE UNIQUE INDEX journal_ordering_idx ON journal(ordering); CREATE TABLE IF NOT EXISTS legacy_snapshot ( persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, created BIGINT NOT NULL, snapshot BLOB NOT NULL, PRIMARY KEY (persistence_id, sequence_number) ); ================================================ FILE: migrator/src/test/resources/schema/mysql/mysql-create-schema.sql ================================================ CREATE TABLE IF NOT EXISTS event_journal ( ordering SERIAL, deleted BOOLEAN DEFAULT false NOT NULL, persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, writer TEXT NOT NULL, write_timestamp BIGINT NOT NULL, adapter_manifest TEXT NOT NULL, event_payload BLOB NOT NULL, event_ser_id INTEGER NOT NULL, event_ser_manifest TEXT NOT NULL, meta_payload BLOB, meta_ser_id INTEGER,meta_ser_manifest TEXT, PRIMARY KEY(persistence_id,sequence_number) ); CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering); CREATE TABLE IF NOT EXISTS event_tag ( event_id BIGINT UNSIGNED, persistence_id VARCHAR(255), sequence_number BIGINT, tag VARCHAR(255) NOT NULL, PRIMARY KEY(persistence_id, sequence_number, tag), FOREIGN KEY (persistence_id, sequence_number) REFERENCES event_journal(persistence_id, sequence_number) ON DELETE CASCADE ); CREATE TABLE IF NOT EXISTS snapshot ( persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, created BIGINT NOT NULL, snapshot_ser_id INTEGER NOT NULL, snapshot_ser_manifest TEXT NOT NULL, snapshot_payload BLOB NOT NULL, meta_ser_id INTEGER, meta_ser_manifest TEXT, meta_payload BLOB, PRIMARY KEY (persistence_id, sequence_number)); ================================================ FILE: migrator/src/test/resources/schema/mysql/mysql-drop-schema-legacy.sql ================================================ DROP TABLE IF EXISTS journal; DROP TABLE IF EXISTS legacy_snapshot; ================================================ FILE: migrator/src/test/resources/schema/mysql/mysql-drop-schema.sql ================================================ DROP TABLE IF EXISTS event_tag; DROP TABLE IF EXISTS event_journal; DROP TABLE IF EXISTS snapshot; ================================================ FILE: migrator/src/test/resources/schema/oracle/oracle-create-schema-legacy.sql ================================================ CREATE SEQUENCE "ordering_seq" START WITH 1 INCREMENT BY 1 NOMAXVALUE / CREATE TABLE "journal" ( "ordering" NUMERIC, "deleted" char check ("deleted" in (0,1)) NOT NULL, "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC NOT NULL, "tags" VARCHAR(255) DEFAULT NULL, "message" BLOB NOT NULL, PRIMARY KEY("persistence_id", "sequence_number") ) / CREATE UNIQUE INDEX "journal_ordering_idx" ON "journal"("ordering") / CREATE OR REPLACE TRIGGER "ordering_seq_trigger" BEFORE INSERT ON "journal" FOR EACH ROW BEGIN SELECT "ordering_seq".NEXTVAL INTO :NEW."ordering" FROM DUAL; END; / CREATE OR REPLACE PROCEDURE "reset_sequence" IS l_value NUMBER; BEGIN EXECUTE IMMEDIATE 'SELECT "ordering_seq".nextval FROM dual' INTO l_value; EXECUTE IMMEDIATE 'ALTER SEQUENCE "ordering_seq" INCREMENT BY -' || l_value || ' MINVALUE 0'; EXECUTE IMMEDIATE 'SELECT "ordering_seq".nextval FROM dual' INTO l_value; EXECUTE IMMEDIATE 'ALTER SEQUENCE "ordering_seq" INCREMENT BY 1 MINVALUE 0'; END; / CREATE TABLE "legacy_snapshot" ( "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC NOT NULL, "created" NUMERIC NOT NULL, "snapshot" BLOB NOT NULL, PRIMARY KEY ("persistence_id", "sequence_number") ) / ================================================ FILE: migrator/src/test/resources/schema/oracle/oracle-create-schema.sql ================================================ CREATE SEQUENCE EVENT_JOURNAL__ORDERING_SEQ START WITH 1 INCREMENT BY 1 NOMAXVALUE / CREATE TABLE EVENT_JOURNAL ( ORDERING NUMERIC UNIQUE, DELETED CHAR(1) DEFAULT 0 NOT NULL check (DELETED in (0, 1)), PERSISTENCE_ID VARCHAR(255) NOT NULL, SEQUENCE_NUMBER NUMERIC NOT NULL, WRITER VARCHAR(255) NOT NULL, WRITE_TIMESTAMP NUMBER(19) NOT NULL, ADAPTER_MANIFEST VARCHAR(255), EVENT_PAYLOAD BLOB NOT NULL, EVENT_SER_ID NUMBER(10) NOT NULL, EVENT_SER_MANIFEST VARCHAR(255), META_PAYLOAD BLOB, META_SER_ID NUMBER(10), META_SER_MANIFEST VARCHAR(255), PRIMARY KEY(PERSISTENCE_ID, SEQUENCE_NUMBER) ) / CREATE OR REPLACE TRIGGER EVENT_JOURNAL__ORDERING_TRG before insert on EVENT_JOURNAL REFERENCING NEW AS NEW FOR EACH ROW WHEN (new.ORDERING is null) begin select EVENT_JOURNAL__ORDERING_seq.nextval into :new.ORDERING from sys.dual; end; / CREATE TABLE EVENT_TAG ( EVENT_ID NUMERIC, PERSISTENCE_ID VARCHAR(255), SEQUENCE_NUMBER NUMERIC, TAG VARCHAR(255) NOT NULL, PRIMARY KEY(PERSISTENCE_ID, SEQUENCE_NUMBER, TAG), FOREIGN KEY(PERSISTENCE_ID, SEQUENCE_NUMBER) REFERENCES EVENT_JOURNAL(PERSISTENCE_ID, SEQUENCE_NUMBER) ON DELETE CASCADE ) / CREATE TABLE SNAPSHOT ( PERSISTENCE_ID VARCHAR(255) NOT NULL, SEQUENCE_NUMBER NUMERIC NOT NULL, CREATED NUMERIC NOT NULL, SNAPSHOT_SER_ID NUMBER(10) NOT NULL, SNAPSHOT_SER_MANIFEST VARCHAR(255), SNAPSHOT_PAYLOAD BLOB NOT NULL, META_SER_ID NUMBER(10), META_SER_MANIFEST VARCHAR(255), META_PAYLOAD BLOB, PRIMARY KEY(PERSISTENCE_ID,SEQUENCE_NUMBER) ) / CREATE OR REPLACE PROCEDURE "reset_sequence" IS l_value NUMBER; BEGIN EXECUTE IMMEDIATE 'SELECT EVENT_JOURNAL__ORDERING_SEQ.nextval FROM dual' INTO l_value; EXECUTE IMMEDIATE 'ALTER SEQUENCE EVENT_JOURNAL__ORDERING_SEQ INCREMENT BY -' || l_value || ' MINVALUE 0'; EXECUTE IMMEDIATE 'SELECT EVENT_JOURNAL__ORDERING_SEQ.nextval FROM dual' INTO l_value; EXECUTE IMMEDIATE 'ALTER SEQUENCE EVENT_JOURNAL__ORDERING_SEQ INCREMENT BY 1 MINVALUE 0'; END; / ================================================ FILE: migrator/src/test/resources/schema/oracle/oracle-drop-schema-legacy.sql ================================================ -- (ddl lock timeout in seconds) this allows tests which are still writing to the db to finish gracefully ALTER SESSION SET ddl_lock_timeout = 150 / DROP TABLE "journal" CASCADE CONSTRAINT / DROP TABLE "legacy_snapshot" CASCADE CONSTRAINT / DROP TABLE "deleted_to" CASCADE CONSTRAINT / DROP TRIGGER "ordering_seq_trigger" / DROP PROCEDURE "reset_sequence" / DROP SEQUENCE "ordering_seq" / ================================================ FILE: migrator/src/test/resources/schema/oracle/oracle-drop-schema.sql ================================================ ALTER SESSION SET ddl_lock_timeout = 15 / DROP TABLE EVENT_TAG CASCADE CONSTRAINT / DROP TABLE EVENT_JOURNAL CASCADE CONSTRAINT / DROP TABLE SNAPSHOT CASCADE CONSTRAINT / DROP TABLE SNAPSHOT CASCADE CONSTRAINT / DROP SEQUENCE EVENT_JOURNAL__ORDERING_SEQ / DROP TRIGGER EVENT_JOURNAL__ORDERING_TRG / ================================================ FILE: migrator/src/test/resources/schema/postgres/postgres-create-schema-legacy.sql ================================================ CREATE TABLE IF NOT EXISTS public.journal ( ordering BIGSERIAL, persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, deleted BOOLEAN DEFAULT FALSE NOT NULL, tags VARCHAR(255) DEFAULT NULL, message BYTEA NOT NULL, PRIMARY KEY(persistence_id, sequence_number) ); CREATE UNIQUE INDEX IF NOT EXISTS journal_ordering_idx ON public.journal(ordering); CREATE TABLE IF NOT EXISTS public.legacy_snapshot ( persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, created BIGINT NOT NULL, snapshot BYTEA NOT NULL, PRIMARY KEY(persistence_id, sequence_number) ); CREATE TABLE IF NOT EXISTS public.durable_state ( global_offset BIGSERIAL, persistence_id VARCHAR(255) NOT NULL, revision BIGINT NOT NULL, state_payload BYTEA NOT NULL, state_serial_id INTEGER NOT NULL, state_serial_manifest VARCHAR(255), tag VARCHAR, state_timestamp BIGINT NOT NULL, PRIMARY KEY(persistence_id) ); CREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag); CREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_state (global_offset); ================================================ FILE: migrator/src/test/resources/schema/postgres/postgres-create-schema.sql ================================================ CREATE TABLE IF NOT EXISTS public.event_journal( ordering BIGSERIAL, persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, deleted BOOLEAN DEFAULT FALSE NOT NULL, writer VARCHAR(255) NOT NULL, write_timestamp BIGINT, adapter_manifest VARCHAR(255), event_ser_id INTEGER NOT NULL, event_ser_manifest VARCHAR(255) NOT NULL, event_payload BYTEA NOT NULL, meta_ser_id INTEGER, meta_ser_manifest VARCHAR(255), meta_payload BYTEA, PRIMARY KEY(persistence_id, sequence_number) ); CREATE UNIQUE INDEX event_journal_ordering_idx ON public.event_journal(ordering); CREATE TABLE IF NOT EXISTS public.event_tag( event_id BIGINT, persistence_id VARCHAR(255), sequence_number BIGINT, tag VARCHAR(256), PRIMARY KEY(persistence_id, sequence_number, tag), CONSTRAINT fk_event_journal FOREIGN KEY(persistence_id, sequence_number) REFERENCES event_journal(persistence_id, sequence_number) ON DELETE CASCADE ); CREATE TABLE IF NOT EXISTS public.snapshot ( persistence_id VARCHAR(255) NOT NULL, sequence_number BIGINT NOT NULL, created BIGINT NOT NULL, snapshot_ser_id INTEGER NOT NULL, snapshot_ser_manifest VARCHAR(255) NOT NULL, snapshot_payload BYTEA NOT NULL, meta_ser_id INTEGER, meta_ser_manifest VARCHAR(255), meta_payload BYTEA, PRIMARY KEY(persistence_id, sequence_number) ); CREATE TABLE IF NOT EXISTS public.durable_state ( global_offset BIGSERIAL, persistence_id VARCHAR(255) NOT NULL, revision BIGINT NOT NULL, state_payload BYTEA NOT NULL, state_serial_id INTEGER NOT NULL, state_serial_manifest VARCHAR(255), tag VARCHAR, state_timestamp BIGINT NOT NULL, PRIMARY KEY(persistence_id) ); CREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag); CREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_state (global_offset); ================================================ FILE: migrator/src/test/resources/schema/postgres/postgres-drop-schema-legacy.sql ================================================ DROP TABLE IF EXISTS public.journal; DROP TABLE IF EXISTS public.legacy_snapshot; DROP TABLE IF EXISTS public.durable_state; ================================================ FILE: migrator/src/test/resources/schema/postgres/postgres-drop-schema.sql ================================================ DROP TABLE IF EXISTS public.event_tag; DROP TABLE IF EXISTS public.event_journal; DROP TABLE IF EXISTS public.snapshot; DROP TABLE IF EXISTS public.durable_state; ================================================ FILE: migrator/src/test/resources/schema/sqlserver/sqlserver-create-schema-legacy.sql ================================================ IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'"journal"') AND type in (N'U')) begin CREATE TABLE journal ( "ordering" BIGINT IDENTITY(1,1) NOT NULL, "deleted" BIT DEFAULT 0 NOT NULL, "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC(10,0) NOT NULL, "tags" VARCHAR(255) NULL DEFAULT NULL, "message" VARBINARY(max) NOT NULL, PRIMARY KEY ("persistence_id", "sequence_number") ) CREATE UNIQUE INDEX journal_ordering_idx ON journal (ordering) end; IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'"snapshot"') AND type in (N'U')) CREATE TABLE legacy_snapshot ( "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC(10,0) NOT NULL, "created" NUMERIC NOT NULL, "snapshot" VARBINARY(max) NOT NULL, PRIMARY KEY ("persistence_id", "sequence_number") ); end; ================================================ FILE: migrator/src/test/resources/schema/sqlserver/sqlserver-create-schema.sql ================================================ CREATE TABLE event_journal( "ordering" BIGINT IDENTITY(1,1) NOT NULL, "deleted" BIT DEFAULT 0 NOT NULL, "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC(10,0) NOT NULL, "writer" VARCHAR(255) NOT NULL, "write_timestamp" BIGINT NOT NULL, "adapter_manifest" VARCHAR(MAX) NOT NULL, "event_payload" VARBINARY(MAX) NOT NULL, "event_ser_id" INTEGER NOT NULL, "event_ser_manifest" VARCHAR(MAX) NOT NULL, "meta_payload" VARBINARY(MAX), "meta_ser_id" INTEGER, "meta_ser_manifest" VARCHAR(MAX) PRIMARY KEY ("persistence_id", "sequence_number") ); CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering); CREATE TABLE event_tag ( "event_id" BIGINT, "persistence_id" VARCHAR(255), "sequence_number" NUMERIC(10,0), "tag" VARCHAR(255) NOT NULL, PRIMARY KEY ("event_id", "tag"), constraint "fk_event_journal" foreign key("event_id") references "dbo"."event_journal"("ordering") on delete CASCADE ); CREATE TABLE "snapshot" ( "persistence_id" VARCHAR(255) NOT NULL, "sequence_number" NUMERIC(10,0) NOT NULL, "created" BIGINT NOT NULL, "snapshot_ser_id" INTEGER NOT NULL, "snapshot_ser_manifest" VARCHAR(255) NOT NULL, "snapshot_payload" VARBINARY(MAX) NOT NULL, "meta_ser_id" INTEGER, "meta_ser_manifest" VARCHAR(255), "meta_payload" VARBINARY(MAX), PRIMARY KEY ("persistence_id", "sequence_number") ) ================================================ FILE: migrator/src/test/resources/schema/sqlserver/sqlserver-drop-schema-legacy.sql ================================================ DROP TABLE IF EXISTS journal; DROP TABLE IF EXISTS legacy_snapshot; ================================================ FILE: migrator/src/test/resources/schema/sqlserver/sqlserver-drop-schema.sql ================================================ DROP TABLE IF EXISTS event_tag; DROP TABLE IF EXISTS event_journal; DROP TABLE IF EXISTS snapshot; ================================================ FILE: migrator/src/test/resources/sqlserver-application.conf ================================================ # Copyright (C) 2019 - 2022 Lightbend Inc. include "general.conf" akka { persistence { journal { plugin = "jdbc-journal" // Enable the line below to automatically start the journal when the actorsystem is started // auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" // Enable the line below to automatically start the snapshot-store when the actorsystem is started // auto-start-snapshot-stores = ["jdbc-snapshot-store"] } } } jdbc-journal { tables { journal { schemaName = "dbo" } } slick = ${slick} } # the akka-persistence-snapshot-store in use jdbc-snapshot-store { tables { snapshot { schemaName = "dbo" } } slick = ${slick} } # the akka-persistence-query provider in use jdbc-read-journal { tables { journal { schemaName = "dbo" } } slick = ${slick} } slick { profile = "slick.jdbc.SQLServerProfile$" db { host = ${docker.host} host = ${?DB_HOST} url = "jdbc:sqlserver://"${slick.db.host}":1433;databaseName=docker;integratedSecurity=false" user = "sa" password = "docker123abc#" driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver" numThreads = 5 maxConnections = 5 minConnections = 1 } } ================================================ FILE: migrator/src/test/scala/akka/persistence/jdbc/migrator/JournalMigratorTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.migrator import akka.Done import akka.pattern.ask import akka.persistence.jdbc.db.SlickDatabase import akka.persistence.jdbc.migrator.MigratorSpec._ abstract class JournalMigratorTest(configName: String) extends MigratorSpec(configName) { it should "migrate the event journal" in { withLegacyActorSystem { implicit systemLegacy => withReadJournal { implicit readJournal => withTestActors() { (actorA1, actorA2, actorA3) => eventually { countJournal().futureValue shouldBe 0 (actorA1 ? CreateAccount(1)).futureValue // balance 1 (actorA2 ? CreateAccount(2)).futureValue // balance 2 (actorA3 ? CreateAccount(3)).futureValue // balance 3 (actorA1 ? Deposit(3)).futureValue // balance 4 (actorA2 ? Deposit(2)).futureValue // balance 4 (actorA3 ? Deposit(1)).futureValue // balance 4 (actorA1 ? Withdraw(3)).futureValue // balance 1 (actorA2 ? Withdraw(2)).futureValue // balance 1 (actorA3 ? Withdraw(1)).futureValue // balance 1 (actorA1 ? State).mapTo[Int].futureValue shouldBe 1 (actorA2 ? State).mapTo[Int].futureValue shouldBe 2 (actorA3 ? State).mapTo[Int].futureValue shouldBe 3 countJournal().futureValue shouldBe 9 } } } } // legacy persistence withActorSystem { implicit systemNew => withReadJournal { implicit readJournal => eventually { countJournal().futureValue shouldBe 0 // before migration JournalMigrator(SlickDatabase.profile(config, "slick")).migrate().futureValue shouldBe Done countJournal().futureValue shouldBe 9 // after migration } withTestActors() { (actorB1, actorB2, actorB3) => eventually { (actorB1 ? State).mapTo[Int].futureValue shouldBe 1 (actorB2 ? State).mapTo[Int].futureValue shouldBe 2 (actorB3 ? State).mapTo[Int].futureValue shouldBe 3 } } } } // new persistence } it should "migrate the event journal preserving the order of events" in { withLegacyActorSystem { implicit systemLegacy => withReadJournal { implicit readJournal => withTestActors() { (actorA1, actorA2, actorA3) => (actorA1 ? CreateAccount(0)).futureValue (actorA2 ? CreateAccount(0)).futureValue (actorA3 ? CreateAccount(0)).futureValue for (i <- 1 to 999) { (actorA1 ? Deposit(i)).futureValue (actorA2 ? Deposit(i)).futureValue (actorA3 ? Deposit(i)).futureValue } eventually { countJournal().futureValue shouldBe 3000 } } } } // legacy persistence withActorSystem { implicit systemNew => withReadJournal { implicit readJournal => eventually { countJournal().futureValue shouldBe 0 // before migration JournalMigrator(SlickDatabase.profile(config, "slick")).migrate().futureValue shouldBe Done countJournal().futureValue shouldBe 3000 // after migration val allEvents: Seq[Seq[AccountEvent]] = events().futureValue allEvents.size shouldBe 3 val seq1: Seq[Int] = allEvents.head.map(_.amount) val seq2: Seq[Int] = allEvents(1).map(_.amount) val seq3: Seq[Int] = allEvents(2).map(_.amount) val expectedResult: Seq[Int] = 0 to 999 seq1 shouldBe expectedResult seq2 shouldBe expectedResult seq3 shouldBe expectedResult } } } // new persistence } it should "migrate the event journal preserving tags" in { withLegacyActorSystem { implicit systemLegacy => withReadJournal { implicit readJournal => withTestActors() { (actorA1, actorA2, actorA3) => (actorA1 ? CreateAccount(0)).futureValue (actorA2 ? CreateAccount(0)).futureValue (actorA3 ? CreateAccount(0)).futureValue for (i <- 1 to 999) { (actorA1 ? Deposit(i)).futureValue (actorA2 ? Deposit(i)).futureValue (actorA3 ? Deposit(i)).futureValue } eventually { countJournal().futureValue shouldBe 3000 } } } } // legacy persistence withActorSystem { implicit systemNew => withReadJournal { implicit readJournal => eventually { countJournal().futureValue shouldBe 0 // before migration JournalMigrator(SlickDatabase.profile(config, "slick")).migrate().futureValue shouldBe Done countJournal().futureValue shouldBe 3000 // after migration val evenEvents: Seq[AccountEvent] = eventsByTag(MigratorSpec.Even).futureValue evenEvents.size shouldBe 1500 evenEvents.forall(e => e.amount % 2 == 0) shouldBe true val oddEvents: Seq[AccountEvent] = eventsByTag(MigratorSpec.Odd).futureValue oddEvents.size shouldBe 1500 oddEvents.forall(e => e.amount % 2 == 1) shouldBe true } } } // new persistence } } class H2JournalMigratorTest extends JournalMigratorTest("h2-application.conf") with MigratorSpec.H2Cleaner ================================================ FILE: migrator/src/test/scala/akka/persistence/jdbc/migrator/MigratorSpec.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.migrator import akka.actor.{ ActorRef, ActorSystem, Props, Stash } import akka.event.LoggingReceive import akka.pattern.ask import akka.persistence.jdbc.SimpleSpec import akka.persistence.jdbc.config.{ JournalConfig, SlickConfiguration } import akka.persistence.jdbc.db.SlickDatabase import akka.persistence.jdbc.migrator.MigratorSpec._ import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal import akka.persistence.jdbc.testkit.internal._ import akka.persistence.journal.EventSeq.single import akka.persistence.journal.{ EventAdapter, EventSeq, Tagged } import akka.persistence.query.PersistenceQuery import akka.persistence.{ PersistentActor, SaveSnapshotSuccess, SnapshotMetadata, SnapshotOffer } import akka.stream.Materializer import akka.stream.scaladsl.Sink import akka.util.Timeout import com.typesafe.config.{ Config, ConfigFactory, ConfigValue, ConfigValueFactory } import org.scalatest.BeforeAndAfterEach import org.slf4j.{ Logger, LoggerFactory } import slick.jdbc.JdbcBackend.{ Database, Session } import java.sql.Statement import scala.concurrent.duration.DurationInt import scala.concurrent.{ ExecutionContextExecutor, Future } abstract class MigratorSpec(val config: Config) extends SimpleSpec with BeforeAndAfterEach { // The db is initialized in the before and after each bocks var dbOpt: Option[Database] = None implicit val pc: PatienceConfig = PatienceConfig(timeout = 10.seconds) implicit val timeout: Timeout = Timeout(1.minute) private val logger: Logger = LoggerFactory.getLogger(this.getClass) private val cfg: Config = config.getConfig("jdbc-journal") private val journalConfig: JournalConfig = new JournalConfig(cfg) protected val newJournalTableName: String = journalConfig.eventJournalTableConfiguration.tableName protected val legacyJournalTableName: String = journalConfig.journalTableConfiguration.tableName protected val newTables: Seq[String] = List(journalConfig.eventTagTableConfiguration.tableName, journalConfig.eventJournalTableConfiguration.tableName) protected val legacyTables: Seq[String] = List(journalConfig.journalTableConfiguration.tableName) protected val tables: Seq[String] = legacyTables ++ newTables def this(config: String = "postgres-application.conf", configOverrides: Map[String, ConfigValue] = Map.empty) = this(configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) => conf.withValue(path, configValue) }) def db: Database = dbOpt.getOrElse { val db = SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig("slick")), "slick.db") dbOpt = Some(db) db } protected def dropAndCreate(schemaType: SchemaType): Unit = { // blocking calls, usually done in our before test methods // legacy SchemaUtilsImpl.dropWithSlick(schemaType, logger, db, legacy = true) SchemaUtilsImpl.createWithSlick(schemaType, logger, db, legacy = true) // new SchemaUtilsImpl.dropWithSlick(schemaType, logger, db, legacy = false) SchemaUtilsImpl.createWithSlick(schemaType, logger, db, legacy = false) } def withSession[A](f: Session => A)(db: Database): A = { val session = db.createSession() try f(session) finally session.close() } def withStatement[A](f: Statement => A)(db: Database): A = withSession(session => session.withStatement()(f))(db) def closeDb(): Unit = { dbOpt.foreach(_.close()) dbOpt = None } override protected def afterEach(): Unit = { super.afterEach() closeDb() } override protected def afterAll(): Unit = { super.afterAll() closeDb() } protected def setupEmpty(persistenceId: Int)(implicit system: ActorSystem): ActorRef = system.actorOf(Props(new TestAccountActor(persistenceId))) def withTestActors(seq: Int = 1)(f: (ActorRef, ActorRef, ActorRef) => Unit)(implicit system: ActorSystem): Unit = { implicit val ec: ExecutionContextExecutor = system.dispatcher val refs = (seq until seq + 3).map(setupEmpty).toList try { // make sure we notice early if the actors failed to start (because of issues with journal) makes debugging // failing tests easier as we know it is not the actual interaction from the test that is the problem Future.sequence(refs.map(_ ? State)).futureValue f(refs.head, refs.drop(1).head, refs.drop(2).head) } finally killActors(refs: _*) } def withActorSystem(f: ActorSystem => Unit): Unit = { implicit val system: ActorSystem = ActorSystem("migrator-test", config) f(system) system.terminate().futureValue } def withLegacyActorSystem(f: ActorSystem => Unit): Unit = { val configOverrides: Map[String, ConfigValue] = Map( "jdbc-journal.dao" -> ConfigValueFactory.fromAnyRef( "akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao"), "jdbc-snapshot-store.dao" -> ConfigValueFactory.fromAnyRef( "akka.persistence.jdbc.snapshot.dao.legacy.ByteArraySnapshotDao"), "jdbc-read-journal.dao" -> ConfigValueFactory.fromAnyRef( "akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao")) val legacyDAOConfig = configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) => conf.withValue(path, configValue) } implicit val system: ActorSystem = ActorSystem("migrator-test", legacyDAOConfig) f(system) system.terminate().futureValue } def withReadJournal(f: JdbcReadJournal => Unit)(implicit system: ActorSystem): Unit = { val readJournal: JdbcReadJournal = PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier) f(readJournal) } def countJournal(filterPid: String => Boolean = _ => true)( implicit system: ActorSystem, mat: Materializer, readJournal: JdbcReadJournal): Future[Long] = readJournal .currentPersistenceIds() .filter(filterPid(_)) .mapAsync(1) { pid => readJournal .currentEventsByPersistenceId(pid, 0, Long.MaxValue) .map(_ => 1L) .runWith(Sink.seq) .map(_.sum)(system.dispatcher) } .runWith(Sink.seq) .map(_.sum)(system.dispatcher) def eventsByTag(tag: String)(implicit mat: Materializer, readJournal: JdbcReadJournal): Future[Seq[AccountEvent]] = readJournal .currentEventsByTag(tag, offset = 0) .map(_.event) .collect { case e: AccountEvent => e } .runWith(Sink.seq) def events(filterPid: String => Boolean = _ => true)( implicit mat: Materializer, readJournal: JdbcReadJournal): Future[Seq[Seq[AccountEvent]]] = readJournal .currentPersistenceIds() .filter(filterPid(_)) .mapAsync(1) { pid => readJournal .currentEventsByPersistenceId(pid, fromSequenceNr = 0, toSequenceNr = Long.MaxValue) .map(e => e.event) .collect { case e: AccountEvent => e } .runWith(Sink.seq) } .runWith(Sink.seq) } object MigratorSpec { private final val Zero: Int = 0 private final val SnapshotInterval: Int = 10 val Even: String = "EVEN" val Odd: String = "ODD" /** Commands */ sealed trait AccountCommand extends Serializable final case class CreateAccount(amount: Int) extends AccountCommand final case class Deposit(amount: Int) extends AccountCommand final case class Withdraw(amount: Int) extends AccountCommand object State extends AccountCommand /** Events */ sealed trait AccountEvent extends Serializable { val amount: Int } final case class AccountCreated(override val amount: Int) extends AccountEvent final case class Deposited(override val amount: Int) extends AccountEvent final case class Withdrawn(override val amount: Int) extends AccountEvent /** Reply */ final case class CurrentBalance(balance: Int) class AccountEventAdapter extends EventAdapter { override def manifest(event: Any): String = event.getClass.getSimpleName def fromJournal(event: Any, manifest: String): EventSeq = event match { case event: AccountEvent => single(event) case _ => sys.error(s"Unexpected case '${event.getClass.getName}'") } def toJournal(event: Any): Any = event match { case event: AccountEvent => val tag: String = if (event.amount % 2 == 0) Even else Odd Tagged(event, Set(tag)) case _ => sys.error(s"Unexpected case '${event.getClass.getName}'") } } /** Actor */ class TestAccountActor(id: Int) extends PersistentActor with Stash { override val persistenceId: String = s"test-account-$id" var state: Int = Zero private def saveSnapshot(): Unit = { if (state % SnapshotInterval == 0) { saveSnapshot(state) } } override def receiveCommand: Receive = LoggingReceive { case SaveSnapshotSuccess(_: SnapshotMetadata) => () case CreateAccount(balance) => persist(AccountCreated(balance)) { (event: AccountCreated) => updateState(event) saveSnapshot() sender() ! akka.actor.Status.Success(event) } case Deposit(balance) => persist(Deposited(balance)) { (event: Deposited) => updateState(event) saveSnapshot() sender() ! akka.actor.Status.Success(event) } case Withdraw(balance) => persist(Withdrawn(balance)) { (event: Withdrawn) => updateState(event) saveSnapshot() sender() ! akka.actor.Status.Success(event) } case State => sender() ! akka.actor.Status.Success(state) } def updateState(event: AccountEvent): Unit = event match { case AccountCreated(amount) => state = state + amount case Deposited(amount) => state = state + amount case Withdrawn(amount) => state = state - amount } override def receiveRecover: Receive = LoggingReceive { case SnapshotOffer(_, snapshot: Int) => state = snapshot case event: AccountEvent => updateState(event) } } trait PostgresCleaner extends MigratorSpec { def clearPostgres(): Unit = { tables.foreach { name => withStatement(stmt => stmt.executeUpdate(s"DELETE FROM $name"))(db) } } override def beforeAll(): Unit = { dropAndCreate(Postgres) super.beforeAll() } override def beforeEach(): Unit = { dropAndCreate(Postgres) super.beforeEach() } } trait MysqlCleaner extends MigratorSpec { def clearMySQL(): Unit = { withStatement { stmt => stmt.execute("SET FOREIGN_KEY_CHECKS = 0") tables.foreach { name => stmt.executeUpdate(s"TRUNCATE $name") } stmt.execute("SET FOREIGN_KEY_CHECKS = 1") }(db) } override def beforeAll(): Unit = { dropAndCreate(MySQL) super.beforeAll() } override def beforeEach(): Unit = { clearMySQL() super.beforeEach() } } trait OracleCleaner extends MigratorSpec { def clearOracle(): Unit = { tables.foreach { name => withStatement(stmt => stmt.executeUpdate(s"""DELETE FROM "$name" """))(db) } withStatement(stmt => stmt.executeUpdate("""BEGIN "reset_sequence"; END; """))(db) } override def beforeAll(): Unit = { dropAndCreate(Oracle) super.beforeAll() } override def beforeEach(): Unit = { clearOracle() super.beforeEach() } } trait SqlServerCleaner extends MigratorSpec { var initial = true def clearSqlServer(): Unit = { val reset = if (initial) { initial = false 1 } else { 0 } withStatement { stmt => tables.foreach { name => stmt.executeUpdate(s"DELETE FROM $name") } stmt.executeUpdate(s"DBCC CHECKIDENT('$legacyJournalTableName', RESEED, $reset)") stmt.executeUpdate(s"DBCC CHECKIDENT('$newJournalTableName', RESEED, $reset)") }(db) } override def beforeAll(): Unit = { dropAndCreate(SqlServer) super.beforeAll() } override def afterAll(): Unit = { dropAndCreate(SqlServer) super.afterAll() } override def beforeEach(): Unit = { clearSqlServer() super.beforeEach() } } trait H2Cleaner extends MigratorSpec { def clearH2(): Unit = { tables.foreach { name => withStatement(stmt => stmt.executeUpdate(s"DELETE FROM $name"))(db) } } override def beforeEach(): Unit = { dropAndCreate(H2) super.beforeEach() } } } ================================================ FILE: migrator/src/test/scala/akka/persistence/jdbc/migrator/SnapshotMigratorTest.scala ================================================ /* * Copyright (C) 2014 - 2019 Dennis Vriend * Copyright (C) 2019 - 2025 Lightbend Inc. */ package akka.persistence.jdbc.migrator import akka.Done import akka.pattern.ask import akka.persistence.jdbc.db.SlickDatabase import akka.persistence.jdbc.migrator.MigratorSpec._ abstract class SnapshotMigratorTest(configName: String) extends MigratorSpec(configName) { it should "migrate snapshots" in { withLegacyActorSystem { implicit systemLegacy => withReadJournal { implicit readJournal => withTestActors() { (actorA1, actorA2, actorA3) => (actorA1 ? CreateAccount(1)).futureValue (actorA2 ? CreateAccount(1)).futureValue (actorA3 ? CreateAccount(1)).futureValue for (_ <- 1 to 99) { (actorA1 ? Deposit(1)).futureValue (actorA2 ? Deposit(1)).futureValue (actorA3 ? Deposit(1)).futureValue } eventually { (actorA1 ? State).mapTo[Int].futureValue shouldBe 100 (actorA2 ? State).mapTo[Int].futureValue shouldBe 100 (actorA3 ? State).mapTo[Int].futureValue shouldBe 100 countJournal().futureValue shouldBe 300 } } } } // legacy persistence withActorSystem { implicit systemNew => withReadJournal { implicit readJournal => eventually { countJournal().futureValue shouldBe 0 // before migration SnapshotMigrator(SlickDatabase.profile(config, "slick")).migrateAll().futureValue shouldBe Done countJournal().futureValue shouldBe 0 // after migration } withTestActors() { (actorB1, actorB2, actorB3) => eventually { (actorB1 ? State).mapTo[Int].futureValue shouldBe 100 (actorB2 ? State).mapTo[Int].futureValue shouldBe 100 (actorB3 ? State).mapTo[Int].futureValue shouldBe 100 } } } } // new persistence } } class H2SnapshotMigratorTest extends SnapshotMigratorTest("h2-application.conf") with MigratorSpec.H2Cleaner ================================================ FILE: migrator-integration/LICENSE ================================================ LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT THIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS "AGREEMENT") IS A LEGAL AGREEMENT BETWEEN YOU ("USER") AND LIGHTBEND, INC. ("LICENSOR"). BY CLICKING THE "I ACCEPT" BUTTON, OR INSTALLING, COPYING OR OTHERWISE USING LIGHTBEND’S SOFTWARE (THE “SOFTWARE”) AND ITS ASSOCIATED USER DOCUMENTATION, USER ACKNOWLEDGES THAT USER HAS REVIEWED AND ACCEPTS THIS AGREEMENT AND AGREES TO BE BOUND BY ALL OF ITS TERMS. IF YOU ARE AGREEING TO THIS AGREEMENT AS AN INDIVIDUAL, “USER” REFERS TO YOU INDIVIDUALLY. IF YOU ARE AGREEING TO THIS AGREEMENT AS A REPRESENTATIVE OF AN ENTITY, YOU REPRESENT THAT YOU HAVE THE AUTHORITY TO BIND THAT ENTITY AND “USER” REFERS TO THAT ENTITY AND ALL THE USERS ACCESSING THE SOFTWARE BY, THROUGH OR ON BEHALF OF THAT ENTITY. IF USER DOES NOT AGREE WITH ALL OF THE TERMS OF THIS AGREEMENT, DO NOT INSTALL, COPY OR OTHERWISE USE THE SOFTWARE OR ITS DOCUMENTATION. 1. DEFINITIONS. 1. “User System” means User’s website(s), computers, servers and other equipment and software upon and with which the Software is run. 2. “Documentation” means the user instructions and help files made available by Licensor for use with the Software, as may be updated from time to time by Licensor. 3. “Intellectual Property Rights” means all intellectual property rights or similar proprietary rights, including (a) patent rights and utility models, (b) copyrights and database rights, (c) trademarks, trade names, domain names and trade dress and the goodwill associated therewith, (d) trade secrets, (e) mask works, and (f) industrial design rights; in each case, including any registrations of, applications to register, and renewals and extensions of, any of the foregoing in any jurisdiction in the world. 4. “Open Source Software” means all software that is available under the GNU Affero General Public License (AGPL), GNU General Public License (GPL), GNU Lesser General Public License (LGPL), Mozilla Public License (MPL), Apache License, BSD licenses, or any other license that is approved by or similar to those approved by the Open Source Initiative (www.opensource.org). 2. LICENSES AND RESTRICTIONS. 1. License. Subject to User’s compliance with the terms and conditions of this Agreement, Licensor hereby grants to User, during the term of this Agreement, a limited, non-exclusive, non-transferable and non-sublicensable right to (i) install and execute one (1) copy of the Software in accordance with the Documentation, solely in binary form, and not for the benefit of any other person or entity, and (ii) access and use the Documentation, solely for User’s own internally purposes in support of End User’s permitted use of the Software. 2. Restrictions. User shall not, directly or indirectly, or permit any User or third party to: (a) reverse engineer, decompile, disassemble or otherwise attempt to discover the source code or underlying ideas or algorithms of the Software; (b) modify, translate, or create derivative works based on any element of the Software or any related Documentation (except to the extent applicable laws specifically prohibit such restriction for interoperability purposes, in which case you agree to first contact Licensor and provide Licensor an opportunity to create such changes as are needed for interoperability purposes); (c) use, rent, lease, distribute, sell, resell, assign, or otherwise transfer the Software or any copy thereof; (d) use the Software for timesharing purposes or otherwise for the benefit of any person or entity other than for the benefit of User and Users; (e) remove any proprietary notices from the Software or the Documentation or attempt to defeat any copy protection device included with the Software; or (f) use the Software for any purpose other than its intended purpose. 3. Reservation of Rights. Nothing in this Agreement shall be construed to give User a right to use or otherwise obtain access to any source code from which the Software is compiled or interpreted. Except as expressly granted in this Agreement, there are no other licenses granted to User, express, implied or by way of estoppel. All rights not granted in this Agreement are reserved by Licensor. 4. Open Source Software. Notwithstanding the foregoing, certain items of software included with the Software are Open Source Software and remains subject Open Source Software licenses. Such Open Source Software is not subject to the terms and conditions of this Agreement. Instead, each such item of Open Source Software is licensed under the terms of the end user license that accompanies such Open Source Software and nothing in this Agreement limits your rights under, or grants you rights that supersede, the terms and conditions of any applicable end user license for such Open Source Software. If required by any license for particular Open Source Software, Licensor makes such Open Source Software, and any Licensor modifications to that Open Source Software, available as further described in the Documentation. USE OF THE SOFTWARE IN ANY MANNER OTHER THAN AS PROVIDED IN THIS AGREEMENT IS STRICTLY PROHIBITED AND MAY INFRINGE ON THE INTELLECTUAL PROPERTY RIGHTS OF LICENSOR AND/OR ITS LICENSOR(S), SUBJECTING USER TO CIVIL AND CRIMINAL PENALTIES, INCLUDING WITHOUT LIMITATION MONETARY DAMAGES AND IMPRISONMENT FOR COPYRIGHT INFRINGEMENT. 3. USER OBLIGATIONS. 1. User System. User is responsible for (a) obtaining, deploying and maintaining the User System, and all computer hardware, software, modems, routers and other communications equipment necessary for User and its Users to install and use the Software; and (b) paying all third party fees and access charges incurred in connection with the foregoing. Licensor shall not be responsible for supplying any hardware, software or other equipment to User under this Agreement. 2. Compliance with Laws. User agrees to use the Software in compliance with all applicable laws, including local laws of the country or region in which User resides, and in compliance with all United States export laws and regulations. User shall not use the Software for any purpose prohibited by applicable law. 3. Trademarks and Tradenames. With regard to all copies of the Software permitted herein, User shall reproduce on such copies all Licensor copyright notices, and other proprietary notices appearing on and in the original copy of the software received from Licensor. Except as set forth in the foregoing sentence, User will not, during the term of this Agreement or thereafter, use any trademark of Licensor, or any word and/or symbol likely to be confused with any Licensor trademark, either alone or in any combination with other words and/or symbols. 4. SUPPORT AND MAINTENANCE. 1. Support. Licensor is not responsible for maintenance or support of the Software, or the equipment on which the Software resides or is used, under this Agreement. By accepting the license granted under this Agreement, User agrees that Licensor will be under no obligation to provide any support, maintenance or service in connection with the Software or such equipment. 2. Upgrades and Updates. Licensor may from time to time in its sole discretion develop and provide updates for the Software, which may include upgrades, bug fixes, patches, other error corrections, and/or new features (collectively, including related documentation, “Updates”). Updates may also modify or delete in their entirety certain features and functionality. You agree that Licensor has no obligation to provide any Updates or to continue to provide or enable any particular features or functionality. 5. REPRESENTATIONS AND WARRANTIES; DISCLAIMER. 1. Mutual Representations and Warranties. Each party represents, warrants and covenants that: (a) it has the full power and authority to enter into this Agreement and to perform its obligations hereunder, without the need for any consents, approvals or immunities not yet obtained; and (b) its acceptance of and performance under this Agreement shall not breach any oral or written agreement with any third party or any obligation owed by it to any third party to keep any information or materials in confidence or in trust. 2. Disclaimer. EXCEPT FOR THE WARRANTIES SET FORTH IN THIS SECTION 5, THE SOFTWARE AND THE DOCUMENTATION ARE PROVIDED ON AN AS-IS BASIS. USER’S USE OF THE SOFTWARE AND THE DOCUMENTATION IS AT USER’S OWN RISK. LICENSOR DOES NOT MAKE, AND HEREBY DISCLAIMS, ANY AND ALL OTHER EXPRESS, STATUTORY AND IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE, QUALITY, SUITABILITY, OPERABILITY, CONDITION, SYSTEM INTEGRATION, NON-INTERFERENCE, WORKMANSHIP, TRUTH, ACCURACY (OF DATA OR ANY OTHER INFORMATION OR CONTENT), ABSENCE OF DEFECTS, WHETHER LATENT OR PATENT, AND ANY WARRANTIES ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE. LICENSOR ALSO DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN, PERFORMED AND/OR PROVIDED BY THE SOFTWARE WILL MEET USER’S REQUIREMENTS, THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, THAT THE SOFTWARE WILL BE COMPATIBLE OR WORK WITH ANY THIRD-PARTY SOFTWARE, APPLICATIONS OR DEVICES, OR THAT DEFECTS IN THE SOFTWARE WILL BE CORRECTED. USER EXPRESSLY ACKNOWLEDGES AND AGREES THAT, TO THE EXTENT PERMITTED BY APPLICABLE LAW, ITS USE OF THE SOFTWARE IS AT ITS SOLE RISK AND THAT THE ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT IS WITH USER. USER FURTHER ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS NOT INTENDED OR SUITABLE FOR USE IN SITUATIONS OR ENVIRONMENTS WHERE THE FAILURE OR TIME DELAYS OF, OR ERRORS OR INACCURACIES IN THE CONTENT, DATA OR INFORMATION PROVIDED BY THE SOFTWARE COULD LEAD TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. ANY WARRANTIES MADE BY LICENSOR ARE FOR THE BENEFIT OF USER ONLY AND NOT FOR THE BENEFIT OF ANY THIRD PARTY. THE SOFTWARE AND THE DOCUMENTATION ARE LICENSED AND NOT SOLD. NO AGENT OF LICENSOR IS AUTHORIZED TO ALTER OR EXPAND THE WARRANTIES OF LICENSOR AS SET FORTH HEREIN. 6. INDEMNIFICATION. User shall defend Licensor and its licensors and their respective officers, directors and employees (“Licensor Indemnified Parties”) from and against any and all Third-Party Claims which arise out of or relate to: (a) User’s use or alleged use of the Software other than as permitted under this Agreement; or (b) arising out of or relating to any violation of Section 2.2, or any violation of applicable laws. User shall pay all damages, costs and expenses, including attorneys’ fees and costs (whether by settlement or award of by a final judicial judgment) incurred by the Licensor Indemnified Parties from any such Third-Party Claim. In no event shall Licensor settle any claim without User’s prior written approval. Licensor may, at its own expense, engage separate counsel to advise Licensor regarding a Third-Party Claim and to participate in the defense of the claim, subject to User’s right to control the defense and settlement. If you are a New Jersey resident, this indemnification clause is to be only as broad and inclusive as is permitted by the law of the state of New Jersey. 7. CONFIDENTIALITY. 1. Confidential Information. User acknowledges that the Software contains valuable proprietary information and trade secrets and that unauthorized or improper use of the Software will result in irreparable harm to Licensor for which monetary damages would be inadequate and for which Licensor may be entitled to immediate injunctive relief. Accordingly, you will maintain the confidentiality of the proprietary information and not sell, license, publish, display, distribute, disclose or otherwise make available such proprietary information to any third party, nor use such information except as authorized by this Agreement. 2. Injunctive Relief. User agrees that any unauthorized disclosure of confidential information may cause immediate and irreparable injury to Licensor and that, in the event of such breach, Licensor will be entitled, in addition to any other available remedies, to seek immediate injunctive and other equitable relief, without bond and without the necessity of showing actual monetary damages. 8. PROPRIETARY RIGHTS. 1. Licensor. As between Licensor and User, all right, title and interest in the Software, the Documentation, and any other Licensor materials furnished or made available hereunder, and all modifications and enhancements thereof, and all suggestions, ideas and feedback proposed by User regarding the Software, including all copyright rights, patent rights and other Intellectual Property Rights in each of the foregoing, belong to and are retained solely by Licensor or Licensor’s licensors and providers, as applicable. User hereby does and will irrevocably assign to Licensor all evaluations, ideas, feedback and suggestions made by User to Licensor regarding the Software or the Documentation (collectively, “Feedback”) and all Intellectual Property Rights in the Feedback. 9. LIMITATION OF LIABILITY. 1. No Consequential Damages. NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR ANY DAMAGES FOR LOST DATA, BUSINESS INTERRUPTION, LOST PROFITS, LOST REVENUE OR LOST BUSINESS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT, EVEN IF LICENSOR OR ITS LICENSORS OR USER HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, INCLUDING WITHOUT LIMITATION, ANY SUCH DAMAGES ARISING OUT OF THE LICENSING, PROVISION OR USE OF THE SOFTWARE OR THE RESULTS OF THE USE OF THE SOFTWARE. LICENSOR WILL NOT BE LIABLE FOR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES. 2. LIMITS ON LIABILITY. NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR CUMULATIVE, AGGREGATE DAMAGES GREATER THAN FIVE HUNDRED DOLLARS (US $500). 3. ESSENTIAL PURPOSE. USER ACKNOWLEDGES THAT THE TERMS IN THIS SECTION 9 (LIMITATION OF LIABILITY) SHALL APPLY TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW AND SHALL APPLY EVEN IF AN EXCLUSIVE OR LIMITED REMEDY STATED HEREIN FAILS OF ITS ESSENTIAL PURPOSE. SOME JURISDICTIONS DO NOT ALLOW CERTAIN LIMITATIONS OF LIABILITY, SO SOME OR ALL OF THE ABOVE LIMITATIONS OF LIABILITY MAY NOT APPLY TO YOU. IF YOU ARE A NEW JERSEY RESIDENT, THIS LIMITATION OF LIABILITY SECTION IS TO BE ONLY AS BROAD AND INCLUSIVE AS IS PERMITTED BY THE LAW OF THE STATE OF NEW JERSEY. 10. TERM AND TERMINATION. 1. Term. This Agreement and User’s right to use the Software commences on earlier of the date that User: (a) installs the Software, (b) begins using the Software or (c) otherwise demonstrates assent to this Agreement. User’s right to use the Software shall continue until such time as this Agreement is terminated (the “Term”). 2. Termination for Cause. A party may terminate this Agreement, upon written notice to the other party in the event the other party files a petition for bankruptcy or has a petition for bankruptcy filed against it that is not dismissed within sixty (60) calendar days after filing or admits its inability to pay its debts as they mature, makes an assignment for the benefit of its creditors or ceases to function as a going concern or to conduct its operations in the normal course of business and such termination shall occur immediately upon notice. Licensor may terminate this Agreement at any time without notice if it ceases to support the Software, which Licensor may do in its sole discretion. In addition, this Agreement will terminate immediately and automatically without any notice if User breaches any of its terms and conditions. 3. Termination for Convenience. Either party may terminate this Agreement for convenience on at least thirty (30) calendar days prior written notice to the other party. User may also terminate this Agreement by ceasing all use of the Software. 4. Effects of Termination. Upon expiration or termination of this Agreement, User’s shall cease all use of the Software and the Documentation and shall destroy all copies of the Software in User’s possession or control. 5. Survival. This Section and Sections 1, 2.2 (Restrictions), 2.3 (Reservation of Rights), 2.4 (Open Source Software), 5.2 (Disclaimer), 6 (Indemnification), 7 (Confidentiality), 8 (Proprietary Rights), 9 (Limitation of Liability), 10.4 (Effects of Termination) and 11 (Miscellaneous) shall survive any termination or expiration of this Agreement. 11. MISCELLANEOUS. 1. Notices. Licensor may give notice to User by means of a general notice through electronic mail to User’s e-mail address, or by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service to User’s address on record with Licensor. User may give notice to Licensor by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service addressed to Licensor, Lightbend Inc., 580 California, #1231, San Francisco, CA 94104, Attention: User Support. Notice shall be deemed to have been given upon receipt or, if earlier, two (2) business days after mailing, as applicable. All communications and notices to be made or given pursuant to this Agreement shall be in the English language. 2. Governing Law. This Agreement and the rights and obligations of the parties to and under this agreement shall be governed by and construed under the laws of the United States and the State of California as applied to agreements entered into and to be performed in such State without giving effect to conflicts of laws rules or principles. The parties agree that the United Nations Convention on Contracts for the International Sale of Goods is specifically excluded from application to this Agreement and that the application of the Uniform Computer Information Transactions Act (UCITA) is specifically disclaimed. Any dispute arising out of or in connection with this Agreement, including but not limited to any question regarding its existence, interpretation, validity, performance, or termination, or any dispute between the parties arising from the parties' relationship created by this Agreement, shall be referred to and finally resolved by arbitration administered by the American Arbitration Association under its rules. The number of arbitrators shall be one (1). The parties shall endeavor to agree upon the sole arbitrator and jointly nominate the arbitrator. If the parties cannot agree upon the sole arbitrator within a time prescribed by AAA, the parties shall request the AAA to propose five (5) arbitrators and each party shall rank the proposed arbitrators. The AAA shall appoint an arbitrator from the list of five (5), based upon the parties' rankings. 3. U.S. Government Users. If User is a Federal Government entity, Licensor provides the Software and the Documentation, including related software and technology, for ultimate Federal Government end use solely in accordance with the following: Government technical data rights include only those rights customarily provided to the public with a commercial item or process and Government software rights related to the Software and the Documentation include only those rights customarily provided to the public, as defined in this Agreement. The technical data rights and customary commercial software license is provided in accordance with FAR 12.211 (Technical Data) and FAR 12.212 (Software) and, for Department of Defense transactions, DFAR 252.227-7015 (Technical Data – Commercial Items) and DFAR 227.7202-3 (Rights in Commercial Computer Software or Computer Software Documentation). If greater rights are needed, a mutually acceptable written addendum specifically conveying such rights must be included in this Agreement. 4. Export. The Software utilizes software and technology that may be subject to United States and foreign export controls. User acknowledges and agrees that the Software shall not be used, and none of the underlying information, software, or technology may be transferred or otherwise exported or re-exported to countries as to which the United States maintains an embargo (collectively, “Embargoed Countries”), or to or by a national or resident thereof, or any person or entity on the U.S. Department of Treasury’s List of Specially Designated Nationals or the U.S. Department of Commerce’s Table of Denial Orders (collectively, “Designated Nationals”). The lists of Embargoed Countries and Designated Nationals are subject to change without notice. By using the Software, User represents and warrants that it is not located in, under the control of, or a national or resident of an Embargoed Country or Designated National. The Software may use encryption technology that is subject to licensing requirements under the U.S. Export Administration Regulations, 15 C.F.R. Parts 730-774 and Council Regulation (EC) No. 1334/2000. User agrees to comply strictly with all applicable export laws and assume sole responsibility for obtaining licenses to export or re-export as may be required. Licensor and its licensors make no representation that the Software is appropriate or available for use in other locations. By using the Software, User represents and warrants that it is not located in any such country or on any such list. 5. General. User shall not assign its rights hereunder, or delegate the performance of any of its duties or obligations hereunder, whether by merger, acquisition, sale of assets, operation of law, or otherwise, without the prior written consent of Licensor. Any purported assignment in violation of the preceding sentence is null and void. Subject to the foregoing, this Agreement shall be binding upon, and inure to the benefit of, the successors and assigns of the parties thereto. Except as otherwise specified in this Agreement, this Agreement may be amended or supplemented only by a writing that refers explicitly to this Agreement and that is signed on behalf of both parties. No waiver will be implied from conduct or failure to enforce rights. No waiver will be effective unless in a writing signed on behalf of the party against whom the waiver is asserted. If any of this Agreement is found invalid or unenforceable that term will be enforced to the maximum extent permitted by law and the remainder of this Agreement will remain in full force. Nothing contained herein shall be construed as creating an agency, partnership, or other form of joint enterprise between the parties. This Agreement constitutes the entire agreement between the parties relating to this subject matter and supersedes all prior or simultaneous understandings, representations, discussions, negotiations, and agreements, whether written or oral. Neither party shall be liable to the other party or any third party for failure or delay in performing its obligations under this Agreement when such failure or delay is due to any cause beyond the control of the party concerned, including, without limitation, force majeure, governmental orders or restrictions, fire, or flood, provided that upon cessation of such events such party shall thereupon promptly perform or complete the performance of its obligations hereunder. ================================================ FILE: migrator-integration/src/test/scala/akka/persistence/jdbc/migrator/integration/JournalMigratorTest.scala ================================================ package akka.persistence.jdbc.migrator.integration import akka.persistence.jdbc.migrator.MigratorSpec._ import akka.persistence.jdbc.migrator.JournalMigratorTest class PostgresJournalMigratorTest extends JournalMigratorTest("postgres-application.conf") with PostgresCleaner class MySQLJournalMigratorTest extends JournalMigratorTest("mysql-application.conf") with MysqlCleaner class OracleJournalMigratorTest extends JournalMigratorTest("oracle-application.conf") with OracleCleaner class SqlServerJournalMigratorTest extends JournalMigratorTest("sqlserver-application.conf") with SqlServerCleaner ================================================ FILE: migrator-integration/src/test/scala/akka/persistence/jdbc/migrator/integration/SnapshotMigratorTest.scala ================================================ package akka.persistence.jdbc.migrator.integration import akka.persistence.jdbc.migrator.MigratorSpec._ import akka.persistence.jdbc.migrator.SnapshotMigratorTest class PostgresSnapshotMigratorTest extends SnapshotMigratorTest("postgres-application.conf") with PostgresCleaner class MySQLSnapshotMigratorTest extends SnapshotMigratorTest("mysql-application.conf") with MysqlCleaner class OracleSnapshotMigratorTest extends SnapshotMigratorTest("oracle-application.conf") with OracleCleaner class SqlServerSnapshotMigratorTest extends SnapshotMigratorTest("sqlserver-application.conf") with SqlServerCleaner ================================================ FILE: project/AutomaticModuleName.scala ================================================ /** * Copyright (C) 2009-2018 Lightbend Inc. */ import sbt.Keys._ import sbt.{ Def, _ } /** * Helper to set Automatic-Module-Name in projects. * * !! DO NOT BE TEMPTED INTO AUTOMATICALLY DERIVING THE NAMES FROM PROJECT NAMES !! * * The names carry a lot of implications and DO NOT have to always align 1:1 with the group ids or package names, * though there should be of course a strong relationship between them. */ object AutomaticModuleName { private val AutomaticModuleName = "Automatic-Module-Name" def settings(name: String): Seq[Def.Setting[Task[Seq[PackageOption]]]] = Seq(Compile / packageBin / packageOptions += Package.ManifestAttributes(AutomaticModuleName -> name)) } ================================================ FILE: project/Dependencies.scala ================================================ import sbt._ object Dependencies { // Java Platform version for JavaDoc creation lazy val JavaDocLinkVersion = scala.util.Properties.javaSpecVersion val Scala213 = "2.13.17" val Scala3 = "3.3.7" val ScalaVersions = Seq(Scala213, Scala3) val AkkaVersion = "2.10.11" val AkkaBinaryVersion = VersionNumber(AkkaVersion).numbers match { case Seq(major, minor, _*) => s"$major.$minor" } val SlickVersion = "3.6.1" val ScalaTestVersion = "3.2.19" val JdbcDrivers = Seq( "org.postgresql" % "postgresql" % "42.7.7", "com.h2database" % "h2" % "2.3.232", "com.mysql" % "mysql-connector-j" % "9.4.0", "com.microsoft.sqlserver" % "mssql-jdbc" % "7.4.1.jre8") val Libraries: Seq[ModuleID] = Seq( "com.typesafe.akka" %% "akka-persistence-query" % AkkaVersion, "com.typesafe.slick" %% "slick" % SlickVersion, "org.slf4j" % "slf4j-api" % "2.0.17", "com.typesafe.slick" %% "slick-hikaricp" % SlickVersion, "ch.qos.logback" % "logback-classic" % "1.5.18" % Test, "com.typesafe.akka" %% "akka-slf4j" % AkkaVersion % Test, "com.typesafe.akka" %% "akka-persistence-tck" % AkkaVersion % Test, "com.typesafe.akka" %% "akka-stream-testkit" % AkkaVersion % Test, "com.typesafe.akka" %% "akka-testkit" % AkkaVersion % Test, "org.scalatest" %% "scalatest" % ScalaTestVersion % Test) ++ JdbcDrivers.map(_ % Test) val Migration: Seq[ModuleID] = Seq( "com.typesafe" % "config" % "1.4.5", "ch.qos.logback" % "logback-classic" % "1.5.18", "org.testcontainers" % "postgresql" % "1.21.3" % Test, "org.scalatest" %% "scalatest" % ScalaTestVersion % Test) ++ JdbcDrivers.map(_ % Provided) } ================================================ FILE: project/IntegrationTests.scala ================================================ import de.heikoseeberger.sbtheader.HeaderPlugin.autoImport.headerSettings import sbt._ import sbt.Keys._ object IntegrationTests { def settings: Seq[Def.Setting[_]] = Seq(publish / skip := true, doc / sources := Seq.empty, Test / fork := true) } ================================================ FILE: project/ProjectAutoPlugin.scala ================================================ import com.geirsson.CiReleasePlugin import de.heikoseeberger.sbtheader.HeaderPlugin import de.heikoseeberger.sbtheader.HeaderPlugin.autoImport.{ headerLicense, HeaderLicense } import sbt.Keys._ import sbt._ import sbt.plugins.JvmPlugin import sbtdynver.DynVerPlugin.autoImport.dynverSonatypeSnapshots object ProjectAutoPlugin extends AutoPlugin { object autoImport {} override val requires = JvmPlugin && HeaderPlugin override def globalSettings = Seq( organization := "com.lightbend.akka", organizationName := "Lightbend Inc.", organizationHomepage := Some(url("https://akka.io")), homepage := Some(url("https://doc.akka.io/libraries/akka-persistence-jdbc/current/")), scmInfo := Some( ScmInfo(url("https://github.com/akka/akka-persistence-jdbc"), "git@github.com:akka/akka-persistence-jdbc.git")), developers += Developer( "contributors", "Contributors", "akka.official@gmail.com", url("https://github.com/akka/akka-persistence-jdbc/graphs/contributors")), releaseNotesURL := ( if ((ThisBuild / isSnapshot).value) None else Some(url(s"https://github.com/akka/akka-persistence-jdbc/releases/tag/v${version.value}")) ), licenses := { val tagOrBranch = if (version.value.endsWith("SNAPSHOT")) "master" else "v" + version.value Seq(("BUSL-1.1", url(s"https://raw.githubusercontent.com/akka/akka-persistence-jdbc/${tagOrBranch}/LICENSE"))) }, description := "A plugin for storing events in an event journal akka-persistence-jdbc", startYear := Some(2014)) override val trigger: PluginTrigger = allRequirements override val projectSettings: Seq[Setting[_]] = Seq( crossVersion := CrossVersion.binary, crossScalaVersions := Dependencies.ScalaVersions, scalaVersion := Dependencies.Scala213, // append -SNAPSHOT to version when isSnapshot ThisBuild / dynverSonatypeSnapshots := true, Test / fork := false, Test / parallelExecution := false, Test / logBuffered := true, javacOptions ++= Seq("--release", "11"), scalacOptions ++= (CrossVersion.partialVersion(scalaVersion.value) match { case Some((2, _)) => Seq( "-encoding", "UTF-8", "-unchecked", "-Xlog-reflective-calls", "-language:higherKinds", "-language:implicitConversions", "-Ydelambdafy:method", "-release", "11") case Some((3, _)) => Seq( "-encoding", "UTF-8", "-unchecked", "-language:higherKinds", "-language:implicitConversions", "-release", "11") case _ => Seq.empty }), Compile / scalacOptions ++= (CrossVersion.partialVersion(scalaVersion.value) match { case Some((2, 13)) => disciplineScalacOptions case _ => Nil }).toSeq, Compile / doc / scalacOptions := scalacOptions.value ++ (CrossVersion.partialVersion(scalaVersion.value) match { case Some((2, _)) => Seq( "-doc-title", "Akka Persistence JDBC", "-doc-version", version.value, "-sourcepath", (ThisBuild / baseDirectory).value.toString, "-skip-packages", "akka.pattern", // for some reason Scaladoc creates this "-doc-source-url", { val branch = if (isSnapshot.value) "master" else s"v${version.value}" s"https://github.com/akka/akka-persistence-jdbc/tree/${branch}€{FILE_PATH_EXT}#L€{FILE_LINE}" }, "-doc-canonical-base-url", "https://doc.akka.io/api/akka-persistence-jdbc/current/", "-jdk-api-doc-base", s"https://docs.oracle.com/en/java/javase/${Dependencies.JavaDocLinkVersion}/docs/api") case Some((3, _)) => Seq( "-doc-title", "Akka Persistence JDBC", "-doc-version", version.value, "-sourcepath", (ThisBuild / baseDirectory).value.toString, "-doc-source-url", { val branch = if (isSnapshot.value) "master" else s"v${version.value}" s"https://github.com/akka/akka-persistence-jdbc/tree/${branch}€{FILE_PATH_EXT}#L€{FILE_LINE}" }, "-doc-canonical-base-url", "https://doc.akka.io/api/akka-persistence-jdbc/current/", s"-external-mappings:https://docs.oracle.com/en/java/javase/${Dependencies.JavaDocLinkVersion}/docs/api") case _ => throw new IllegalArgumentException("Unsupported Major Scala Version") }), // show full stack traces and test case durations Test / testOptions += Tests.Argument("-oDF"), headerLicense := Some(HeaderLicense.Custom("""|Copyright (C) 2014 - 2019 Dennis Vriend |Copyright (C) 2019 - 2025 Lightbend Inc. |""".stripMargin)), resolvers += Resolver.jcenterRepo) val disciplineScalacOptions = Set( // "-Xfatal-warnings", "-feature", "-deprecation", "-Xlint", "-Ywarn-dead-code", "-Ywarn-unused:_", "-Ywarn-extra-implicit") } ================================================ FILE: project/Publish.scala ================================================ /* * Copyright (C) 2023 Lightbend Inc. */ import java.util.concurrent.atomic.AtomicBoolean import scala.language.postfixOps import sbt.{ Def, _ } import Keys._ import com.geirsson.CiReleasePlugin import com.jsuereth.sbtpgp.PgpKeys.publishSigned import xerial.sbt.Sonatype.autoImport.sonatypeProfileName /** * For projects that are not published. */ object NoPublish extends AutoPlugin { override def requires = plugins.JvmPlugin override def projectSettings = Seq(publish / skip := true, publishArtifact := false, publish := {}, publishLocal := {}) } object Publish extends AutoPlugin { override def requires = plugins.JvmPlugin && ProjectAutoPlugin override def trigger = AllRequirements lazy val beforePublishTask = taskKey[Unit]("setup before publish") lazy val beforePublishDone = new AtomicBoolean(false) def beforePublish(snapshot: Boolean) = { if (beforePublishDone.compareAndSet(false, true)) { CiReleasePlugin.setupGpg() if (!snapshot) cloudsmithCredentials(validate = true) } } override def projectSettings: Seq[Def.Setting[_]] = Seq( sonatypeProfileName := "com.lightbend", beforePublishTask := beforePublish(isSnapshot.value), publishSigned := publishSigned.dependsOn(beforePublishTask).value, publishTo := (if (isSnapshot.value) Some("Cloudsmith API".at("https://maven.cloudsmith.io/lightbend/akka-snapshots/")) else Some("Cloudsmith API".at("https://maven.cloudsmith.io/lightbend/akka/"))), credentials ++= cloudsmithCredentials(validate = false)) def cloudsmithCredentials(validate: Boolean): Seq[Credentials] = { (sys.env.get("PUBLISH_USER"), sys.env.get("PUBLISH_PASSWORD")) match { case (Some(user), Some(password)) => Seq(Credentials("Cloudsmith API", "maven.cloudsmith.io", user, password)) case _ => if (validate) throw new Exception("Publishing credentials expected in `PUBLISH_USER` and `PUBLISH_PASSWORD`.") else Nil } } } ================================================ FILE: project/build.properties ================================================ # # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. # sbt.version=1.11.7 ================================================ FILE: project/plugins.sbt ================================================ // compliance addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.10.0") addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.6") addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.1.4") addSbtPlugin("com.lightbend.sbt" % "sbt-java-formatter" % "0.8.0") // for dependency analysis addDependencyTreePlugin // release addSbtPlugin("com.github.sbt" % "sbt-ci-release" % "1.9.3") addSbtPlugin("com.github.sbt" % "sbt-dynver" % "5.1.1") // docs addSbtPlugin("io.akka" % "sbt-paradox-akka" % "25.10.2") addSbtPlugin("com.github.sbt" % "sbt-site" % "1.7.0") addSbtPlugin("com.github.sbt" % "sbt-site-paradox" % "1.7.0") addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.6.0") addSbtPlugin("com.lightbend.sbt" % "sbt-publish-rsync" % "0.3") ================================================ FILE: project/project-info.conf ================================================ project-info { version: "current" scaladoc: "https://doc.akka.io/api/akka-persistence-jdbc/"${project-info.version}"/akka/persistence/jdbc/" scala-versions: ["2.13", "3.3"] shared-info { jdk-versions: ["Eclipse Temurin JDK 11", "Eclipse Temurin JDK 17", "Eclipse Temurin JDK 21"] snapshots: { url: "snapshots.html" text: "Snapshots are available" new-tab: false } issues: { url: "https://github.com/akka/akka-persistence-jdbc/issues" text: "GitHub issues" } release-notes: { url: "https://github.com/akka/akka-persistence-jdbc/releases" text: "GitHub releases" } forums: [ { text: "Lightbend Discuss" url: "https://discuss.akka.io/c/akka/" } ] } core: ${project-info.shared-info} { title: "Akka Persistence JDBC" jpms-name: "akka.persistence.jdbc" levels: [ { readiness: Supported since: "2022-10-05" since-version: "5.1.0" }, { readiness: Supported since: "2021-01-21" since-version: "5.0.0" }, { readiness: Supported since: "2020-06-09" since-version: "4.0.0" }, { readiness: CommunityDriven since: "2014-07-04" since-version: "1.0.0" } ] } } ================================================ FILE: scripts/cat-log.sh ================================================ #!/bin/sh # ---------- helper script to separate log files in build printf "\n\n\n" ls -lh $1 printf "\n\n" cat $1 ================================================ FILE: scripts/create-release-issue.sh ================================================ #!/bin/bash VERSION=$1 if [ -z $VERSION ] then echo specify the version name to be released, eg. 1.0.0 else sed -e 's/\$VERSION\$/'$VERSION'/g' docs/release-train-issue-template.md > /tmp/release-$VERSION.md echo Created $(gh issue create -F /tmp/release-$VERSION.md --title "Release $VERSION" --milestone $VERSION --web) fi ================================================ FILE: scripts/docker-compose.yml ================================================ services: postgres: image: postgres:latest container_name: postgres-test environment: - "TZ=Europe/Amsterdam" - "POSTGRES_USER=docker" - "POSTGRES_PASSWORD=docker" ports: - "5432:5432" # credentials (docker:docker) mysql: image: mysql:latest container_name: mysql-test environment: - "TZ=Europe/Amsterdam" - "MYSQL_ROOT_PASSWORD=root" - "MYSQL_DATABASE=docker" ports: - "3306:3306" # credentials (root:root) oracle: image: gvenzl/oracle-free:slim container_name: oracle-test environment: - "TZ=Europe/Amsterdam" - "DBCA_TOTAL_MEMORY=1024" - "ORACLE_PASSWORD=oracle" ports: - "1521:1521" # DB_CONN: credentials (system:oracle) sqlserver: image: mcr.microsoft.com/mssql/server:2019-latest container_name: sqlserver-test environment: - "TZ=Europe/Amsterdam" - "ACCEPT_EULA=Y" - "MSSQL_SA_PASSWORD=docker123abc#" ports: - "1433:1433" # credentials (sa:docker123abc#) ================================================ FILE: scripts/launch-all.sh ================================================ #!/bin/bash # # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. # export VM_HOST="${VM_HOST:-localhost}" # Wait for a certain service to become available # Usage: wait 3306 Mysql wait() { while true; do if ! nc -z $VM_HOST $1 then echo "$2 not available, retrying..." sleep 1 else echo "$2 is available" break; fi done; } docker compose -f scripts/docker-compose.yml kill docker compose -f scripts/docker-compose.yml rm -f docker compose -f scripts/docker-compose.yml up -d wait 3306 MySQL wait 5432 Postgres wait 1521 Oracle wait 1433 SqlServer ================================================ FILE: scripts/launch-mysql.sh ================================================ #!/bin/bash # # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. # export VM_HOST="${VM_HOST:-localhost}" # Wait for a certain service to become available # Usage: wait 3306 Mysql wait() { while true; do if ! nc -z $VM_HOST $1 then echo "$2 not available, retrying..." sleep 1 else echo "$2 is available" break; fi done; } docker compose -f scripts/docker-compose.yml kill mysql docker compose -f scripts/docker-compose.yml rm -f mysql docker compose -f scripts/docker-compose.yml up -d mysql wait 3306 MySQL ================================================ FILE: scripts/launch-oracle.sh ================================================ #!/bin/bash # # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. # export VM_HOST="${VM_HOST:-localhost}" # Wait for a certain service to become available # Usage: wait 3306 Mysql wait() { while true; do if ! nc -z $VM_HOST $1 then echo "$2 not available, retrying..." sleep 1 else echo "$2 is available" break; fi done; } docker compose -f scripts/docker-compose.yml kill oracle docker compose -f scripts/docker-compose.yml rm -f oracle docker compose -f scripts/docker-compose.yml up -d oracle wait 1521 Oracle ================================================ FILE: scripts/launch-postgres.sh ================================================ #!/bin/bash # # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. # export VM_HOST="${VM_HOST:-localhost}" # Wait for a certain service to become available # Usage: wait 3306 Mysql wait() { while true; do if ! nc -z $VM_HOST $1 then echo "$2 not available, retrying..." sleep 1 else echo "$2 is available" break; fi done; } docker compose -f scripts/docker-compose.yml kill postgres docker compose -f scripts/docker-compose.yml rm -f postgres docker compose -f scripts/docker-compose.yml up -d postgres wait 5432 Postgres ================================================ FILE: scripts/launch-sqlserver.sh ================================================ #!/bin/bash # # Copyright 2016 Dennis Vriend # Copyright (C) 2019 - 2022 Lightbend Inc. # export VM_HOST="${VM_HOST:-localhost}" # Wait for a certain service to become available # Usage: wait 1433 SqlServer wait() { while true; do if ! nc -z $VM_HOST $1 then echo "$2 not available, retrying..." sleep 1 else echo "$2 is available" break; fi done; } docker compose -f scripts/docker-compose.yml kill sqlserver docker compose -f scripts/docker-compose.yml rm -f sqlserver docker compose -f scripts/docker-compose.yml up -d sqlserver wait 1433 SqlServer docker exec sqlserver-test /opt/mssql-tools18/bin/sqlcmd -N o -S localhost -U sa -P docker123abc# -Q "create database docker" ================================================ FILE: scripts/link-validator.conf ================================================ // config for https://github.com/ennru/site-link-validator/ site-link-validator { root-dir = "./docs/target/site/" # relative to `root-dir` start-file = "libraries/akka-persistence-jdbc/snapshot/index.html" # Resolves URLs with the given prefix as local files instead link-mappings = [ { prefix = "https://doc.akka.io/libraries/akka-persistence-jdbc/snapshot/" replace = "/libraries/akka-persistence-jdbc/snapshot/" } { prefix = "https://doc.akka.io/api/akka-persistence-jdbc/snapshot/" replace = "/api/akka-persistence-jdbc/snapshot/" } ] ignore-missing-local-files-regex = "" ignore-prefixes = [ # GitHub will block with "429 Too Many Requests" "https://github.com/akka/akka-persistence-jdbc/" # MVN repository forbids access after a few requests "https://mvnrepository.com/artifact/", "https://repo.akka.io/" ] non-https-whitelist = [ "http://logback.qos.ch/" "http://www.slf4j.org/" ] } ================================================ FILE: scripts/mysql-cli.sh ================================================ #!/bin/bash echo "================== Help for mysql cli =========================" echo "=================================================================" docker exec -it mysql-test mysql --user=root --password=root mysql ================================================ FILE: scripts/oracle-cli.sh ================================================ #!/bin/bash echo "================== Help for oracle cli ========================" echo "=================================================================" docker exec -it oracle-test sqlplus system/oracle ================================================ FILE: scripts/psql-cli.sh ================================================ #!/bin/bash echo "================== Help for psql =========================" echo "\l or \list : shows all databases" echo "\d : shows all tables, views and sequences" echo "\dn : shows all schemas" echo "\d table_name : describe table, view, sequence, or index" echo "\c database_name : connect to a database" echo "\q : quit" echo "\? : for more commands" echo "==================== Extensions ===========================" echo "create extension pgcrypto; : installs cryptographic functions" echo "==================== Some SQL =============================" echo "select gen_random_uuid(); : returns a random uuid (pgcrypto)" echo "select version(); : return the server version" echo "select current_date; : returns the current date" echo "=================================================================" docker exec -it postgres-test psql --dbname=docker --username=docker ================================================ FILE: scripts/sqlserver-cli.sh ================================================ #!/bin/bash echo "================== Help for SqlServer cli ========================" echo "=================================================================" docker exec -it sqlserver-test /opt/mssql-tools18/bin/sqlcmd -N o -S localhost -U sa -P docker123abc# -d docker