Showing preview only (888K chars total). Download the full file or copy to clipboard to get everything.
Repository: dnvriend/akka-persistence-jdbc
Branch: main
Commit: e2f37890c1dc
Files: 271
Total size: 800.0 KB
Directory structure:
gitextract_t3d9jgyh/
├── .fossa.yml
├── .github/
│ └── workflows/
│ ├── checks.yml
│ ├── fossa.yml
│ ├── link-validator.yml
│ ├── release.yml
│ ├── test.yml
│ └── weekly.yml
├── .gitignore
├── .sbtopts
├── .scala-steward.conf
├── .scalafmt.conf
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── RELEASING.md
├── build.sbt
├── core/
│ ├── lib/
│ │ └── ojdbc6.jar
│ └── src/
│ ├── main/
│ │ ├── mima-filters/
│ │ │ ├── 3.5.3.backwards.excludes/
│ │ │ │ ├── issue-322-messagesWithBatch.excludes
│ │ │ │ └── issue-91-ordering-offset.excludes
│ │ │ ├── 4.x.x.backwards.excludes/
│ │ │ │ └── pr-401-highest-seq-nr.excludes
│ │ │ ├── 5.0.1.backwards.excludes/
│ │ │ │ └── pr-570-akka-serialization.excludes
│ │ │ ├── 5.0.2.backwards.excludes/
│ │ │ │ └── issue-585-performance-regression.excludes
│ │ │ ├── 5.1.0.backwards.excludes/
│ │ │ │ └── issue-557-logical-delete.excludes
│ │ │ ├── 5.4.0.backwards.excludes/
│ │ │ │ ├── issue-710-tag-fk.excludes
│ │ │ │ └── issue-775-slick-3.50.excludes
│ │ │ ├── 5.5.0.backwards.excludes/
│ │ │ │ └── issue-891-durable-store.excludes
│ │ │ └── 5.5.2.backwards.excludes/
│ │ │ └── pr-928-cleanup-tool.excludes
│ │ ├── resources/
│ │ │ ├── reference.conf
│ │ │ └── schema/
│ │ │ ├── h2/
│ │ │ │ ├── h2-create-schema-legacy.sql
│ │ │ │ ├── h2-create-schema.sql
│ │ │ │ ├── h2-drop-schema-legacy.sql
│ │ │ │ └── h2-drop-schema.sql
│ │ │ ├── mysql/
│ │ │ │ ├── mysql-create-schema-legacy.sql
│ │ │ │ ├── mysql-create-schema.sql
│ │ │ │ ├── mysql-drop-schema-legacy.sql
│ │ │ │ ├── mysql-drop-schema.sql
│ │ │ │ └── mysql-event-tag-migration.sql
│ │ │ ├── oracle/
│ │ │ │ ├── oracle-create-schema-legacy.sql
│ │ │ │ ├── oracle-create-schema.sql
│ │ │ │ ├── oracle-drop-schema-legacy.sql
│ │ │ │ ├── oracle-drop-schema.sql
│ │ │ │ └── oracle-event-tag-migration.sql
│ │ │ ├── postgres/
│ │ │ │ ├── postgres-create-schema-legacy.sql
│ │ │ │ ├── postgres-create-schema.sql
│ │ │ │ ├── postgres-drop-schema-legacy.sql
│ │ │ │ ├── postgres-drop-schema.sql
│ │ │ │ └── postgres-event-tag-migration.sql
│ │ │ └── sqlserver/
│ │ │ ├── sqlserver-create-schema-legacy.sql
│ │ │ ├── sqlserver-create-schema-varchar.sql
│ │ │ ├── sqlserver-create-schema.sql
│ │ │ ├── sqlserver-drop-schema-legacy.sql
│ │ │ ├── sqlserver-drop-schema.sql
│ │ │ └── sqlserver-event-tag-migration.sql
│ │ └── scala/
│ │ └── akka/
│ │ └── persistence/
│ │ └── jdbc/
│ │ ├── AkkaSerialization.scala
│ │ ├── JournalRow.scala
│ │ ├── cleanup/
│ │ │ ├── javadsl/
│ │ │ │ └── EventSourcedCleanup.scala
│ │ │ └── scaladsl/
│ │ │ └── EventSourcedCleanup.scala
│ │ ├── config/
│ │ │ └── AkkaPersistenceConfig.scala
│ │ ├── db/
│ │ │ ├── SlickDatabase.scala
│ │ │ └── SlickExtension.scala
│ │ ├── journal/
│ │ │ ├── JdbcAsyncWriteJournal.scala
│ │ │ └── dao/
│ │ │ ├── BaseDao.scala
│ │ │ ├── BaseJournalDaoWithReadMessages.scala
│ │ │ ├── DefaultJournalDao.scala
│ │ │ ├── FlowControl.scala
│ │ │ ├── H2Compat.scala
│ │ │ ├── JournalDao.scala
│ │ │ ├── JournalDaoInstantiation.scala
│ │ │ ├── JournalDaoWithReadMessages.scala
│ │ │ ├── JournalDaoWithUpdates.scala
│ │ │ ├── JournalQueries.scala
│ │ │ ├── JournalTables.scala
│ │ │ └── legacy/
│ │ │ ├── ByteArrayJournalDao.scala
│ │ │ ├── ByteArrayJournalSerializer.scala
│ │ │ ├── JournalQueries.scala
│ │ │ ├── JournalTables.scala
│ │ │ └── package.scala
│ │ ├── query/
│ │ │ ├── JdbcReadJournalProvider.scala
│ │ │ ├── JournalSequenceActor.scala
│ │ │ ├── dao/
│ │ │ │ ├── DefaultReadJournalDao.scala
│ │ │ │ ├── ReadJournalDao.scala
│ │ │ │ ├── ReadJournalQueries.scala
│ │ │ │ └── legacy/
│ │ │ │ ├── ByteArrayReadJournalDao.scala
│ │ │ │ └── ReadJournalQueries.scala
│ │ │ ├── javadsl/
│ │ │ │ └── JdbcReadJournal.scala
│ │ │ ├── package.scala
│ │ │ └── scaladsl/
│ │ │ └── JdbcReadJournal.scala
│ │ ├── serialization/
│ │ │ ├── PersistentReprSerializer.scala
│ │ │ └── SnapshotSerializer.scala
│ │ ├── snapshot/
│ │ │ ├── JdbcSnapshotStore.scala
│ │ │ └── dao/
│ │ │ ├── DefaultSnapshotDao.scala
│ │ │ ├── SnapshotDao.scala
│ │ │ ├── SnapshotDaoInstantiation.scala
│ │ │ ├── SnapshotQueries.scala
│ │ │ ├── SnapshotTables.scala
│ │ │ └── legacy/
│ │ │ ├── ByteArraySnapshotDao.scala
│ │ │ ├── ByteArraySnapshotSerializer.scala
│ │ │ ├── SnapshotQueries.scala
│ │ │ └── SnapshotTables.scala
│ │ ├── state/
│ │ │ ├── DurableStateQueries.scala
│ │ │ ├── DurableStateTables.scala
│ │ │ ├── JdbcDurableStateStoreProvider.scala
│ │ │ ├── OffsetOps.scala
│ │ │ ├── SequenceNextValUpdater.scala
│ │ │ ├── javadsl/
│ │ │ │ └── JdbcDurableStateStore.scala
│ │ │ └── scaladsl/
│ │ │ ├── DurableStateSequenceActor.scala
│ │ │ └── JdbcDurableStateStore.scala
│ │ ├── testkit/
│ │ │ ├── internal/
│ │ │ │ ├── SchemaType.scala
│ │ │ │ └── SchemaUtilsImpl.scala
│ │ │ ├── javadsl/
│ │ │ │ └── SchemaUtils.scala
│ │ │ └── scaladsl/
│ │ │ └── SchemaUtils.scala
│ │ └── util/
│ │ ├── BlockingOps.scala
│ │ ├── ByteArrayOps.scala
│ │ ├── ConfigOps.scala
│ │ ├── InputStreamOps.scala
│ │ ├── PluginVersionChecker.scala
│ │ ├── StringOps.scala
│ │ └── TrySeq.scala
│ └── test/
│ ├── LICENSE
│ ├── java/
│ │ └── akka/
│ │ └── persistence/
│ │ └── jdbc/
│ │ ├── JavadslSnippets.java
│ │ └── state/
│ │ └── JavadslSnippets.java
│ ├── resources/
│ │ ├── general.conf
│ │ ├── h2-application.conf
│ │ ├── h2-default-mode-application.conf
│ │ ├── h2-shared-db-application.conf
│ │ ├── h2-two-read-journals-application.conf
│ │ ├── jndi-application.conf
│ │ ├── jndi-shared-db-application.conf
│ │ ├── logback-test.xml
│ │ ├── mysql-application.conf
│ │ ├── mysql-shared-db-application.conf
│ │ ├── oracle-application.conf
│ │ ├── oracle-schema-overrides.conf
│ │ ├── oracle-shared-db-application.conf
│ │ ├── postgres-application.conf
│ │ ├── postgres-shared-db-application.conf
│ │ ├── sqlserver-application.conf
│ │ └── sqlserver-shared-db-application.conf
│ └── scala/
│ └── akka/
│ └── persistence/
│ └── jdbc/
│ ├── ScaladslSnippets.scala
│ ├── SharedActorSystemTestSpec.scala
│ ├── SimpleSpec.scala
│ ├── SingleActorSystemPerTestSpec.scala
│ ├── TablesTestSpec.scala
│ ├── cleanup/
│ │ └── scaladsl/
│ │ └── EventSourcedCleanupTest.scala
│ ├── configuration/
│ │ ├── AkkaPersistenceConfigTest.scala
│ │ ├── ConfigOpsTest.scala
│ │ └── JNDIConfigTest.scala
│ ├── journal/
│ │ ├── JdbcJournalPerfSpec.scala
│ │ ├── JdbcJournalSpec.scala
│ │ └── dao/
│ │ ├── ByteArrayJournalSerializerTest.scala
│ │ ├── JournalTablesTest.scala
│ │ ├── TagsSerializationTest.scala
│ │ └── TrySeqTest.scala
│ ├── query/
│ │ ├── AllPersistenceIdsTest.scala
│ │ ├── CurrentEventsByPersistenceIdTest.scala
│ │ ├── CurrentEventsByTagTest.scala
│ │ ├── CurrentPersistenceIdsTest.scala
│ │ ├── EventAdapterTest.scala
│ │ ├── EventsByPersistenceIdTest.scala
│ │ ├── EventsByTagMigrationTest.scala
│ │ ├── EventsByTagTest.scala
│ │ ├── EventsByUnfrequentTagTest.scala
│ │ ├── HardDeleteQueryTest.scala
│ │ ├── JournalDaoStreamMessagesMemoryTest.scala
│ │ ├── JournalSequenceActorTest.scala
│ │ ├── MultipleReadJournalTest.scala
│ │ ├── QueryTestSpec.scala
│ │ ├── TaggingEventAdapter.scala
│ │ └── dao/
│ │ ├── ReadJournalTablesTest.scala
│ │ └── TestProbeReadJournalDao.scala
│ ├── serialization/
│ │ └── StoreOnlySerializableMessagesTest.scala
│ ├── snapshot/
│ │ ├── JdbcSnapshotStoreSpec.scala
│ │ └── dao/
│ │ └── legacy/
│ │ └── SnapshotTablesTest.scala
│ ├── state/
│ │ ├── Payloads.scala
│ │ ├── ScaladslSnippets.scala
│ │ └── scaladsl/
│ │ ├── DataGenerationHelper.scala
│ │ ├── DurableStateSequenceActorTest.scala
│ │ ├── DurableStateStorePluginSpec.scala
│ │ ├── JdbcDurableStateSpec.scala
│ │ ├── StateSpecBase.scala
│ │ └── TestProbeDurableStateStoreQuery.scala
│ └── util/
│ ├── ClasspathResources.scala
│ └── DropCreate.scala
├── doc/
│ └── deadlock.md
├── docs/
│ ├── LICENSE
│ ├── release-train-issue-template.md
│ └── src/
│ └── main/
│ └── paradox/
│ ├── _template/
│ │ └── projectSpecificFooter.st
│ ├── assets/
│ │ └── js/
│ │ └── warnOldVersion.js
│ ├── configuration.md
│ ├── custom-dao.md
│ ├── durable-state-store.md
│ ├── index.md
│ ├── migration.md
│ ├── overview.md
│ ├── query.md
│ └── snapshots.md
├── integration/
│ ├── LICENSE
│ └── src/
│ └── test/
│ └── scala/
│ └── akka/
│ └── persistence/
│ └── jdbc/
│ └── integration/
│ ├── AllPersistenceIdsTest.scala
│ ├── CurrentEventsByPersistenceIdTest.scala
│ ├── CurrentEventsByTagTest.scala
│ ├── CurrentPersistenceIdsTest.scala
│ ├── EventAdapterTest.scala
│ ├── EventSourcedCleanupTest.scala
│ ├── EventsByPersistenceIdTest.scala
│ ├── EventsByTagMigrationTest.scala
│ ├── EventsByTagTest.scala
│ ├── HardDeleteQueryTest.scala
│ ├── JdbcJournalPerfSpec.scala
│ ├── JdbcJournalSpec.scala
│ ├── JdbcSnapshotStoreSpec.scala
│ ├── JournalDaoStreamMessagesMemoryTest.scala
│ ├── JournalSequenceActorTest.scala
│ ├── PostgresDurableStateStorePluginSpec.scala
│ ├── PostgresScalaJdbcDurableStateChangesByTagTest.scala
│ └── StoreOnlySerializableMessagesTest.scala
├── migrator/
│ └── src/
│ ├── main/
│ │ └── scala/
│ │ └── akka/
│ │ └── persistence/
│ │ └── jdbc/
│ │ └── migrator/
│ │ ├── JournalMigrator.scala
│ │ └── SnapshotMigrator.scala
│ └── test/
│ ├── LICENSE
│ ├── resources/
│ │ ├── general.conf
│ │ ├── h2-application.conf
│ │ ├── mysql-application.conf
│ │ ├── oracle-application.conf
│ │ ├── postgres-application.conf
│ │ ├── schema/
│ │ │ ├── h2/
│ │ │ │ ├── h2-create-schema-legacy.sql
│ │ │ │ ├── h2-create-schema.sql
│ │ │ │ ├── h2-drop-schema-legacy.sql
│ │ │ │ └── h2-drop-schema.sql
│ │ │ ├── mysql/
│ │ │ │ ├── mysql-create-schema-legacy.sql
│ │ │ │ ├── mysql-create-schema.sql
│ │ │ │ ├── mysql-drop-schema-legacy.sql
│ │ │ │ └── mysql-drop-schema.sql
│ │ │ ├── oracle/
│ │ │ │ ├── oracle-create-schema-legacy.sql
│ │ │ │ ├── oracle-create-schema.sql
│ │ │ │ ├── oracle-drop-schema-legacy.sql
│ │ │ │ └── oracle-drop-schema.sql
│ │ │ ├── postgres/
│ │ │ │ ├── postgres-create-schema-legacy.sql
│ │ │ │ ├── postgres-create-schema.sql
│ │ │ │ ├── postgres-drop-schema-legacy.sql
│ │ │ │ └── postgres-drop-schema.sql
│ │ │ └── sqlserver/
│ │ │ ├── sqlserver-create-schema-legacy.sql
│ │ │ ├── sqlserver-create-schema.sql
│ │ │ ├── sqlserver-drop-schema-legacy.sql
│ │ │ └── sqlserver-drop-schema.sql
│ │ └── sqlserver-application.conf
│ └── scala/
│ └── akka/
│ └── persistence/
│ └── jdbc/
│ └── migrator/
│ ├── JournalMigratorTest.scala
│ ├── MigratorSpec.scala
│ └── SnapshotMigratorTest.scala
├── migrator-integration/
│ ├── LICENSE
│ └── src/
│ └── test/
│ └── scala/
│ └── akka/
│ └── persistence/
│ └── jdbc/
│ └── migrator/
│ └── integration/
│ ├── JournalMigratorTest.scala
│ └── SnapshotMigratorTest.scala
├── project/
│ ├── AutomaticModuleName.scala
│ ├── Dependencies.scala
│ ├── IntegrationTests.scala
│ ├── ProjectAutoPlugin.scala
│ ├── Publish.scala
│ ├── build.properties
│ ├── plugins.sbt
│ └── project-info.conf
└── scripts/
├── cat-log.sh
├── create-release-issue.sh
├── docker-compose.yml
├── launch-all.sh
├── launch-mysql.sh
├── launch-oracle.sh
├── launch-postgres.sh
├── launch-sqlserver.sh
├── link-validator.conf
├── mysql-cli.sh
├── oracle-cli.sh
├── psql-cli.sh
└── sqlserver-cli.sh
================================================
FILE CONTENTS
================================================
================================================
FILE: .fossa.yml
================================================
version: 3
# https://github.com/fossas/fossa-cli/blob/master/docs/references/files/fossa-yml.md
paths:
exclude:
- ./integration
- ./migrator-integration
================================================
FILE: .github/workflows/checks.yml
================================================
name: Basic checks
on:
pull_request:
push:
branches:
- main
tags-ignore: [ v.* ]
permissions:
contents: read
jobs:
check-code-style:
name: Check Code Style
runs-on: Akka-Default
steps:
- name: Checkout
# https://github.com/actions/checkout/releases
uses: actions/checkout@v6
with:
# we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves
fetch-depth: 0
- name: Checkout GitHub merge
if: github.event.pull_request
run: |-
git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch
git checkout scratch
- name: Cache Coursier cache
# https://github.com/coursier/cache-action/releases
uses: coursier/cache-action@v8.1.0
- name: Set up JDK 11
# https://github.com/coursier/setup-action/releases
uses: coursier/setup-action@v3.0.0
with:
jvm: temurin:1.11.0
- name: Run akka/github-actions-scripts
uses: akka/github-actions-scripts/setup_global_resolver@main
- name: Code style check and binary-compatibility check
# Run locally with: sbt 'verifyCodeStyle ; mimaReportBinaryIssues'
run: sbt "; verifyCodeStyle; mimaReportBinaryIssues"
check-code-compilation:
name: Check Code Compilation
runs-on: Akka-Default
steps:
- name: Checkout
# https://github.com/actions/checkout/releases
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Checkout GitHub merge
if: github.event.pull_request
run: |-
git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch
git checkout scratch
- name: Cache Coursier cache
# https://github.com/coursier/cache-action/releases
uses: coursier/cache-action@v8.1.0
- name: Set up JDK 11
# https://github.com/coursier/setup-action/releases
uses: coursier/setup-action@v3.0.0
with:
jvm: temurin:1.11.0
- name: Run akka/github-actions-scripts
uses: akka/github-actions-scripts/setup_global_resolver@main
- name: Compile all code with fatal warnings for Java 11 and Scala 2.13
# Run locally with: sbt 'clean ; +Test/compile ; +It/compile'
run: sbt "; Test/compile"
- name: Compile all code with Scala 3.3
run: sbt "++3.3; Test/compile"
check-docs:
name: Check Docs
runs-on: Akka-Default
steps:
- name: Checkout
# https://github.com/actions/checkout/releases
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Checkout GitHub merge
if: github.event.pull_request
run: |-
git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch
git checkout scratch
- name: Cache Coursier cache
# https://github.com/coursier/cache-action/releases
uses: coursier/cache-action@v8.1.0
- name: Set up JDK 11
# https://github.com/coursier/setup-action/releases
uses: coursier/setup-action@v3.0.0
with:
jvm: temurin:1.11.0
- name: Run akka/github-actions-scripts
uses: akka/github-actions-scripts/setup_global_resolver@main
- name: Create all API docs for artifacts/website and all reference docs
run: sbt docs/paradox
================================================
FILE: .github/workflows/fossa.yml
================================================
name: Dependency License Scanning
on:
workflow_dispatch:
schedule:
- cron: '0 0 * * 0' # At 00:00 on Sunday
permissions:
contents: read
jobs:
fossa:
name: Fossa
runs-on: Akka-Default
if: github.repository == 'akka/akka-persistence-jdbc'
steps:
- name: Checkout
# https://github.com/actions/checkout/releases
uses: actions/checkout@v6
with:
# we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves
fetch-depth: 0
- name: Cache Coursier cache
# https://github.com/coursier/cache-action/releases
uses: coursier/cache-action@v8.1.0
- name: Set up JDK 11
# https://github.com/coursier/setup-action/releases
uses: coursier/setup-action@v3.0.0
with:
jvm: temurin:1.11.0
- name: Run akka/github-actions-scripts
uses: akka/github-actions-scripts/setup_global_resolver@main
- name: FOSSA policy check
run: |-
curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash
fossa analyze && fossa test
env:
FOSSA_API_KEY: "${{secrets.FOSSA_API_KEY}}"
================================================
FILE: .github/workflows/link-validator.yml
================================================
name: Link Validator
on:
workflow_dispatch:
pull_request:
schedule:
- cron: '40 6 1 * *'
permissions:
contents: read
jobs:
validate-links:
runs-on: Akka-Default
if: github.repository == 'akka/akka-persistence-jdbc'
steps:
- name: Checkout
# https://github.com/actions/checkout/releases
uses: actions/checkout@v6
with:
# See https://github.com/actions/checkout/issues/299#issuecomment-677674415
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 100
- name: Fetch tags
run: git fetch --depth=100 origin +refs/tags/*:refs/tags/*
- name: Cache Coursier cache
# https://github.com/coursier/cache-action/releases
uses: coursier/cache-action@v8.1.0
- name: Set up JDK 25
# https://github.com/coursier/setup-action/releases
uses: coursier/setup-action@v3.0.0
with:
jvm: temurin:1.25
apps: cs
- name: Run akka/github-actions-scripts
uses: akka/github-actions-scripts/setup_global_resolver@main
- name: sbt site
run: sbt docs/makeSite
- name: Run Link Validator
run: cs launch net.runne::site-link-validator:0.2.3 -- scripts/link-validator.conf
================================================
FILE: .github/workflows/release.yml
================================================
name: Release
on:
push:
branches:
- main
tags: ["v*"]
permissions:
contents: read
jobs:
release:
# runs on main repo only
if: github.event.repository.fork == false
name: Release
# the release environment provides access to secrets required in the release process
# https://github.com/akka/akka-persistence-jdbc/settings/environments/164872635/edit
environment: release
runs-on: Akka-Default
steps:
- name: Checkout
# https://github.com/actions/checkout/releases
uses: actions/checkout@v6
with:
# we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves
fetch-depth: 0
- name: Checkout GitHub merge
if: github.event.pull_request
run: |-
git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch
git checkout scratch
- name: Cache Coursier cache
# https://github.com/coursier/cache-action/releases
uses: coursier/cache-action@v8.1.0
- name: Set up JDK 11
# https://github.com/coursier/setup-action/releases
uses: coursier/setup-action@v3.0.0
with:
jvm: temurin:1.11.0.17
- name: Run akka/github-actions-scripts
uses: akka/github-actions-scripts/setup_global_resolver@main
- name: Publish artifacts for all Scala versions
env:
PGP_SECRET: ${{ secrets.PGP_SECRET }}
PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }}
PUBLISH_USER: ${{ secrets.PUBLISH_USER }}
PUBLISH_PASSWORD: ${{ secrets.PUBLISH_PASSWORD }}
run: sbt +publishSigned
documentation:
name: Documentation
runs-on: Akka-Default
if: github.event.repository.fork == false
steps:
- name: Checkout
# https://github.com/actions/checkout/releases
uses: actions/checkout@v6
with:
# we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves
fetch-depth: 0
- name: Set up JDK 25
# https://github.com/coursier/setup-action/releases
uses: coursier/setup-action@v3.0.0
with:
jvm: temurin:1.25
- name: Run akka/github-actions-scripts
uses: akka/github-actions-scripts/setup_global_resolver@main
- name: Publish
run: |-
eval "$(ssh-agent -s)"
echo $AKKA_RSYNC_GUSTAV | base64 -d > .github/id_rsa
chmod 600 .github/id_rsa
ssh-add .github/id_rsa
sbt publishRsync
env:
AKKA_RSYNC_GUSTAV: ${{ secrets.AKKA_RSYNC_GUSTAV }}
================================================
FILE: .github/workflows/test.yml
================================================
name: Integration Tests
on:
pull_request:
push:
branches:
- main
tags-ignore: [ v.* ]
permissions:
contents: read
jobs:
integration-test:
runs-on: Akka-Default
strategy:
fail-fast: false
matrix:
db:
- name: "H2"
test: "test"
- name: "MySQL"
test: '"integration/testOnly akka.persistence.jdbc.integration.MySQL*"'
script: 'launch-mysql.sh'
hasOldDao: true
- name: "Oracle"
test: '"integration/testOnly akka.persistence.jdbc.integration.Oracle*"'
script: 'launch-oracle.sh'
hasOldDao: true
- name: "Postgres"
test: '"integration/testOnly akka.persistence.jdbc.integration.Postgres*"'
script: 'launch-postgres.sh'
hasOldDao: true
- name: "SqlServer"
test: '"integration/testOnly akka.persistence.jdbc.integration.SqlServer*"'
script: 'launch-sqlserver.sh'
hasOldDao: true
name: Integration Test ${{ matrix.db.name }}
steps:
- name: Checkout
# https://github.com/actions/checkout/releases
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Checkout GitHub merge
if: github.event.pull_request
run: |-
git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch
git checkout scratch
- name: Cache Coursier cache
# https://github.com/coursier/cache-action/releases
uses: coursier/cache-action@v8.1.0
- name: Set up JDK 11
# https://github.com/coursier/setup-action/releases
uses: coursier/setup-action@v3.0.0
with:
jvm: temurin:1.11.0
- name: Run akka/github-actions-scripts
uses: akka/github-actions-scripts/setup_global_resolver@main
- name: Start DB in docker container
if: ${{ matrix.db.script }}
run: |-
./scripts/${{ matrix.db.script }}
- name: Run Integration tests for ${{ matrix.db.name }}
run: sbt ${{ matrix.db.test }} ${{ matrix.old-dao.extraOpts }}
- name: Run Integration tests for ${{ matrix.db.name }} (old dao)
if: ${{ matrix.db.hasOldDao }}
run: sbt ${{ matrix.db.test }} ${{ matrix.old-dao.extraOpts }} -Djdbc-journal.dao=akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao -Djdbc-snapshot-store.dao=akka.persistence.jdbc.snapshot.dao.legacy.ByteArraySnapshotDao -Djdbc-read-journal.dao=akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao
- name: Print logs on failure
if: ${{ failure() }}
run: find . -name "*.log" -exec ./scripts/cat-log.sh {} \;
================================================
FILE: .github/workflows/weekly.yml
================================================
name: Weekly Integration Tests
on:
schedule:
- cron: "0 0 * * 1"
workflow_dispatch:
permissions:
contents: read
jobs:
integration-test:
name: Weekly Integration Test ${{ matrix.db.name }}, ${{ matrix.db.jdk }}
runs-on: Akka-Default
strategy:
fail-fast: false
matrix:
db:
- name: "H2"
test: "test"
jdk: 'temurin:1.21'
- name: "H2"
test: "test"
jdk: 'temurin:1.25'
- name: "MySQL"
test: '"integration/testOnly akka.persistence.jdbc.integration.MySQL*"'
script: 'launch-mysql.sh'
jdk: 'temurin:1.21'
- name: "Oracle"
test: '"integration/testOnly akka.persistence.jdbc.integration.Oracle*"'
script: 'launch-oracle.sh'
jdk: 'temurin:1.21'
- name: "Postgres"
test: '"integration/testOnly akka.persistence.jdbc.integration.Postgres*"'
script: 'launch-postgres.sh'
jdk: 'temurin:1.21'
- name: "SqlServer"
test: '"integration/testOnly akka.persistence.jdbc.integration.SqlServer*"'
script: 'launch-sqlserver.sh'
jdk: 'temurin:1.21'
steps:
- name: Checkout
# https://github.com/actions/checkout/releases
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Checkout GitHub merge
if: github.event.pull_request
run: |-
git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch
git checkout scratch
- name: Cache Coursier cache
# https://github.com/coursier/cache-action/releases
uses: coursier/cache-action@v8.1.0
- name: Set up ${{ matrix.db.jdk }}
# https://github.com/coursier/setup-action/releases
uses: coursier/setup-action@v3.0.0
with:
jvm: ${{ matrix.db.jdk }}
- name: Run akka/github-actions-scripts
uses: akka/github-actions-scripts/setup_global_resolver@main
- name: Start DB in docker container
if: ${{ matrix.db.script }}
run: |-
./scripts/${{ matrix.db.script }}
- name: Run Integration tests for ${{ matrix.db.name }}
run: sbt ${{ matrix.db.test }}
- name: Print logs on failure
if: ${{ failure() }}
run: find . -name "*.log" -exec ./scripts/cat-log.sh {} \;
================================================
FILE: .gitignore
================================================
/RUNNING_PID
logs
target
.idea
*.iml
*.iws
.settings
.classpath
.project
.worksheet
.bsp
*.code-workspace
.bloop
.metals
metals.sbt
.DS_Store
================================================
FILE: .sbtopts
================================================
-J-Xms512M
-J-Xmx4096M
-J-XX:MaxGCPauseMillis=200
================================================
FILE: .scala-steward.conf
================================================
pullRequests.frequency = "@monthly"
updates.ignore = [
{ groupId = "org.scalameta", artifactId = "scalafmt-core" }
{ groupId = "org.scalameta", artifactId = "sbt-scalafmt" }
// explicit updates
{ groupId = "com.typesafe.akka" }
]
commits.message = "bump: ${artifactName} ${nextVersion} (was ${currentVersion})"
updatePullRequests = never
================================================
FILE: .scalafmt.conf
================================================
version = 3.0.8
style = defaultWithAlign
docstrings.style = Asterisk
docstrings.wrap = no
indentOperator.preset = spray
maxColumn = 120
rewrite.rules = [RedundantParens, SortImports, AvoidInfix]
unindentTopLevelOperators = true
align.tokens = [{code = "=>", owner = "Case"}]
align.openParenDefnSite = false
align.openParenCallSite = false
optIn.breakChainOnFirstMethodDot = false
optIn.configStyleArguments = false
danglingParentheses.defnSite = false
danglingParentheses.callSite = false
spaces.inImportCurlyBraces = true
rewrite.neverInfix.excludeFilters = [
and
min
max
until
to
by
eq
ne
"should.*"
"contain.*"
"must.*"
in
ignore
be
taggedAs
thrownBy
synchronized
have
when
size
only
noneOf
oneElementOf
noElementsOf
atLeastOneElementOf
atMostOneElementOf
allElementsOf
inOrderElementsOf
theSameElementsAs
]
rewriteTokens = {
"⇒": "=>"
"→": "->"
"←": "<-"
}
newlines.afterCurlyLambda = preserve
newlines.implicitParamListModifierPrefer = before
================================================
FILE: CONTRIBUTING.md
================================================
# Contributing to Akka Persistence JDBC
## General Workflow
This is the process for committing code into master.
1. Make sure you have signed the Lightbend CLA, if not, [sign it online](https://www.lightbend.com/contribute/cla/akka/current).
2. Before starting to work on a feature or a fix, make sure that there is a ticket for your work in the [issue tracker](https://github.com/akka/akka-persistence-jdbc/issues). If not, create it first.
3. Perform your work according to the [pull request requirements](#pull-request-requirements).
4. When the feature or fix is completed you should open a [Pull Request](https://help.github.com/articles/using-pull-requests) on [GitHub](https://github.com/akka/akka-persistence-jdbc/pulls).
5. The Pull Request should be reviewed by other maintainers (as many as feasible/practical). Note that the maintainers can consist of outside contributors, both within and outside Lightbend. Outside contributors are encouraged to participate in the review process, it is not a closed process.
6. After the review you should fix the issues (review comments, CI failures) by pushing a new commit for new review, iterating until the reviewers give their thumbs up and CI tests pass.
7. If the branch merge conflicts with its target, rebase your branch onto the target branch.
In case of questions about the contribution process or for discussion of specific issues please visit the [akka forum](https://discuss.akka.io/c/akka/).
## Pull Request Requirements
For a Pull Request to be considered at all it has to meet these requirements:
1. Pull Request branch should be given a unique descriptive name that explains its intent.
2. Code in the branch should live up to the current code standard:
- Not violate [DRY](http://programmer.97things.oreilly.com/wiki/index.php/Don%27t_Repeat_Yourself).
- [Boy Scout Rule](http://programmer.97things.oreilly.com/wiki/index.php/The_Boy_Scout_Rule) needs to have been applied.
3. Regardless if the code introduces new features or fixes bugs or regressions, it must have comprehensive tests.
4. The code must be well documented (see the [Documentation](#documentation) section below).
5. The commit messages must properly describe the changes, see [further below](#creating-commits-and-writing-commit-messages).
6. Do not use ``@author`` tags since it does not encourage [Collective Code Ownership](http://www.extremeprogramming.org/rules/collective.html). Contributors get the credit they deserve in the release notes.
If these requirements are not met then the code should **not** be merged into master, or even reviewed - regardless of how good or important it is. No exceptions.
## Documentation
Documentation should be written in two forms:
1. API documentation in the form of scaladoc/javadoc comments on the Scala and Java user API.
2. Guide documentation in [docs](docs/) subproject using [Paradox](https://github.com/lightbend/paradox) documentation tool. This documentation should give a short introduction of how a given connector should be used.
## External Dependencies
All the external runtime dependencies for the project, including transitive dependencies, must have an open source license that is equal to, or compatible with, [Apache 2](https://www.apache.org/licenses/LICENSE-2.0).
This must be ensured by manually verifying the license for all the dependencies for the project:
1. Whenever a committer to the project changes a version of a dependency (including Scala) in the build file.
2. Whenever a committer to the project adds a new dependency.
3. Whenever a new release is cut (public or private for a customer).
Every external dependency listed in the build file must have a trailing comment with the license name of the dependency.
Which licenses are compatible with Apache 2 are defined in [this doc](https://www.apache.org/legal/3party.html#category-a), where you can see that the licenses that are listed under ``Category A`` automatically compatible with Apache 2, while the ones listed under ``Category B`` needs additional action:
> Each license in this category requires some degree of [reciprocity](https://www.apache.org/legal/3party.html#define-reciprocal); therefore, additional action must be taken in order to minimize the chance that a user of an Apache product will create a derivative work of a reciprocally-licensed portion of an Apache product without being aware of the applicable requirements.
## Creating Commits And Writing Commit Messages
Follow these guidelines when creating public commits and writing commit messages.
1. If your work spans multiple local commits (for example; if you do safe point commits while working in a feature branch or work in a branch for long time doing merges/rebases etc.) then please do not commit it all but rewrite the history by squashing the commits into a single big commit which you write a good commit message for (like discussed in the following sections). For more info read this article: [Git Workflow](https://sandofsky.com/blog/git-workflow.html). Every commit should be able to be used in isolation, cherry picked etc.
2. First line should be a descriptive sentence what the commit is doing, including the ticket number. It should be possible to fully understand what the commit does—but not necessarily how it does it—by just reading this single line. We follow the “imperative present tense” style for commit messages ([more info here](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html)).
It is **not ok** to only list the ticket number, type "minor fix" or similar.
If the commit is a small fix, then you are done. If not, go to 3.
3. Following the single line description should be a blank line followed by an enumerated list with the details of the commit.
4. Add keywords for your commit (depending on the degree of automation we reach, the list may change over time):
* ``Review by @gituser`` - if you want to notify someone on the team. The others can, and are encouraged to participate.
Example:
Add eventsByTag query #123
* Details 1
* Details 2
* Details 3
## How To Enforce These Guidelines?
1. [Scalafmt](https://scalameta.org/scalafmt/) enforces some of the code style rules.
2. [sbt-header plugin](https://github.com/sbt/sbt-header) manages consistent copyright headers in every source file.
================================================
FILE: LICENSE
================================================
Business Source License 1.1
Parameters
Licensor: Lightbend, Inc.
Licensed Work: Akka Persistence JDBC 5.5.4
This license applies to all sub directories and files
UNLESS another license file is present in a sub
directory, then that other license applies to all files
in its directory and sub directories.
The Licensed Work is (c) 2025 Lightbend Inc.
Additional Use Grant:
If you develop an application using a version of Play Framework that
utilizes binary versions of akka-streams and its dependencies, you may
use such binary versions of akka-streams and its dependencies in the
development of your application only as they are incorporated into
Play Framework and solely to implement the functionality provided by
Play Framework; provided that, they are only used in the following way:
Connecting to a Play Framework websocket and/or Play Framework
request/response bodies for server and play-ws client.
Change Date: 2028-10-30
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please visit: https://akka.io
-----------------------------------------------------------------------------
Business Source License 1.1
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this License’s text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this License’s text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.
================================================
FILE: README.md
================================================
Akka
====
*Akka is a powerful platform that simplifies building and operating highly responsive, resilient, and scalable services.*
The platform consists of
* the [**Akka SDK**](https://doc.akka.io/) for straightforward, rapid development with AI assist and automatic clustering. Services built with the Akka SDK are automatically clustered and can be deployed on any infrastructure.
* and [**Akka Automated Operations**](https://doc.akka.io/operations/akka-platform.html), a managed solution that handles everything for Akka SDK services from auto-elasticity to multi-region high availability running safely within your VPC.
The **Akka SDK** and **Akka Automated Operations** are built upon the foundational [**Akka libraries**](https://doc.akka.io/libraries/akka-dependencies/current/), providing the building blocks for distributed systems.
JDBC plugin for Akka Persistence
================================
akka-persistence-jdbc writes journal and snapshot entries to a configured JDBC store. It implements the full akka-persistence-query API and is therefore very useful for implementing DDD-style
application models using Akka for creating reactive applications.
Please note that the H2 database is not recommended to be used as a production database, and support for H2 is primarily for testing purposes.
The Akka Persistence JDBC was originally created by @dnvriend.
Reference Documentation
-----------------------
The reference documentation for all Akka libraries is available via [doc.akka.io/libraries/](https://doc.akka.io/libraries/), details for the Akka JDBC plugin
for [Scala](https://doc.akka.io/libraries/akka-persistence-jdbc/current/?language=scala) and [Java](https://doc.akka.io/libraries/akka-persistence-jdbc/current/?language=java).
The current versions of all Akka libraries are listed on the [Akka Dependencies](https://doc.akka.io/libraries/akka-dependencies/current/) page. Releases of the Akka JDBC plugin in this repository are listed on the [GitHub releases](https://github.com/akka/akka-persistence-jdbc/releases) page.
## Build Token
To build locally, you need to fetch a token at https://account.akka.io/token that you have to place into `~/.sbt/1.0/akka-commercial.sbt` file like this:
```
ThisBuild / resolvers += "lightbend-akka".at("your token resolver here")
```
## Contributing
Contributions are *very* welcome! The Akka team appreciates community contributions by both those new to Akka and those more experienced.
If you find an issue that you'd like to see fixed, the quickest way to make that happen is to implement the fix and submit a pull request.
Refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for more details about the workflow, and general hints on how to prepare your pull request.
You can also ask for clarifications or guidance in GitHub issues directly, or in the [akka forum](https://discuss.akka.io/c/akka/).
## License
Akka is licensed under the Business Source License 1.1, please see the [Akka License FAQ](https://www.lightbend.com/akka/license-faq).
Tests and documentation are under a separate license, see the LICENSE file in each documentation and test root directory for details.
================================================
FILE: RELEASING.md
================================================
## Releasing
Use this command to create a release issue of [Release Train Issue Template](docs/release-train-issue-template.md) and follow the steps.
```bash
~/akka-persistence-jdbc> scripts/create-release-issue.sh `version-to-be-released`
```
### Releasing only updated docs
It is possible to release a revised documentation to the already existing release.
1. Create a new branch from a release tag. If a revised documentation is for the `v0.3` release, then the name of the new branch should be `docs/v0.3`.
1. Add and commit `version.sbt` file that pins the version to the one, that is being revised. Also set `isSnapshot` to `false` for the stable documentation links. For example:
```scala
ThisBuild / version := "4.0.0"
ThisBuild / isSnapshot := false
```
1. Make all of the required changes to the documentation.
1. Build documentation locally with `CI` settings:
```sh
env CI=true sbt docs/previewSite
```
1. If the generated documentation looks good, send it to Gustav:
```sh
env CI=true sbt docs/publishRsync
```
1. Do not forget to push the new branch back to GitHub.
1. Commit the changes to Gustav's local git repo
### Releasing a Snapshot
Snapshots are released automatically when commits are pushed to master.
================================================
FILE: build.sbt
================================================
import com.lightbend.paradox.apidoc.ApidocPlugin.autoImport.apidocRootPackage
import com.geirsson.CiReleasePlugin
lazy val `akka-persistence-jdbc` = project
.in(file("."))
.enablePlugins(ScalaUnidocPlugin)
.disablePlugins(MimaPlugin, SitePlugin, CiReleasePlugin)
.aggregate(core, docs, migrator)
.settings(name := "akka-persistence-jdbc-root", publish / skip := true)
lazy val core = project
.in(file("core"))
.enablePlugins(MimaPlugin)
.disablePlugins(SitePlugin, CiReleasePlugin)
.settings(
name := "akka-persistence-jdbc",
AutomaticModuleName.settings("akka.persistence.jdbc"),
libraryDependencies ++= Dependencies.Libraries,
// Workaround for https://github.com/slick/slick/issues/2933
libraryDependencies ++=
(if (scalaVersion.value.startsWith("2.13")) Seq("org.scala-lang" % "scala-reflect" % scalaVersion.value)
else Nil),
mimaReportSignatureProblems := true,
mimaPreviousArtifacts := {
if (scalaVersion.value.startsWith("3")) {
Set.empty
} else {
Set(
organization.value %% name.value % previousStableVersion.value.getOrElse(
throw new Error("Unable to determine previous version for MiMa")))
}
})
lazy val integration = project
.in(file("integration"))
.settings(IntegrationTests.settings)
.settings(name := "akka-persistence-jdbc-integration", libraryDependencies ++= Dependencies.Libraries)
.disablePlugins(MimaPlugin, SitePlugin, CiReleasePlugin)
.dependsOn(core % "compile->compile;test->test")
lazy val migrator = project
.in(file("migrator"))
.disablePlugins(SitePlugin, MimaPlugin, CiReleasePlugin)
.settings(
name := "akka-persistence-jdbc-migrator",
AutomaticModuleName.settings("akka.persistence.jdbc.migrator"),
libraryDependencies ++= Dependencies.Migration ++ Dependencies.Libraries,
// TODO remove this when ready to publish it
publish / skip := true)
.dependsOn(core % "compile->compile;test->test")
lazy val `migrator-integration` = project
.in(file("migrator-integration"))
.settings(IntegrationTests.settings)
.settings(name := "akka-persistence-jdbc-migrator-integration", libraryDependencies ++= Dependencies.Libraries)
.disablePlugins(MimaPlugin, SitePlugin, CiReleasePlugin)
.dependsOn(migrator)
lazy val docs = project
.enablePlugins(ProjectAutoPlugin, AkkaParadoxPlugin, ParadoxSitePlugin, PreprocessPlugin, PublishRsyncPlugin)
.disablePlugins(MimaPlugin, CiReleasePlugin)
.settings(
name := "Akka Persistence plugin for JDBC",
publish / skip := true,
makeSite := makeSite.dependsOn(LocalRootProject / ScalaUnidoc / doc).value,
previewPath := (Paradox / siteSubdirName).value,
Preprocess / siteSubdirName := s"api/akka-persistence-jdbc/${if (isSnapshot.value) "snapshot"
else version.value}",
Preprocess / sourceDirectory := (LocalRootProject / ScalaUnidoc / unidoc / target).value,
Paradox / siteSubdirName := s"libraries/akka-persistence-jdbc/${if (isSnapshot.value) "snapshot" else version.value}",
Compile / paradoxProperties ++= Map(
"project.url" -> "https://doc.akka.io/libraries/akka-persistence-jdbc/current/",
"github.base_url" -> "https://github.com/akka/akka-persistence-jdbc/",
"canonical.base_url" -> "https://doc.akka.io/libraries/akka-persistence-jdbc/current",
"akka.version" -> Dependencies.AkkaVersion,
"slick.version" -> Dependencies.SlickVersion,
"extref.github.base_url" -> s"https://github.com/akka/akka-persistence-jdbc/blob/${if (isSnapshot.value) "master"
else "v" + version.value}/%s",
// Slick
"extref.slick.base_url" -> s"https://scala-slick.org/doc/${Dependencies.SlickVersion}/%s",
// Akka
"extref.akka.base_url" -> s"https://doc.akka.io/libraries/akka-core/${Dependencies.AkkaBinaryVersion}/%s",
"scaladoc.akka.base_url" -> s"https://doc.akka.io/api/akka-core/${Dependencies.AkkaBinaryVersion}/",
"javadoc.akka.base_url" -> s"https://doc.akka.io/japi/akka-core/${Dependencies.AkkaBinaryVersion}/",
"javadoc.akka.link_style" -> "direct",
// Java
"javadoc.base_url" -> "https://docs.oracle.com/javase/8/docs/api/",
// Scala
"scaladoc.scala.base_url" -> s"https://www.scala-lang.org/api/${scalaBinaryVersion.value}.x/",
"scaladoc.akka.persistence.jdbc.base_url" -> s"/${(Preprocess / siteSubdirName).value}/"),
paradoxGroups := Map("Language" -> Seq("Java", "Scala")),
resolvers += Resolver.jcenterRepo,
publishRsyncArtifacts += makeSite.value -> "www/",
publishRsyncHost := "akkarepo@gustav.akka.io",
apidocRootPackage := "akka")
Global / onLoad := (Global / onLoad).value.andThen { s =>
val v = version.value
if (dynverGitDescribeOutput.value.hasNoTags)
throw new MessageOnlyException(
s"Failed to derive version from git tags. Maybe run `git fetch --unshallow`? Derived version: $v")
s
}
TaskKey[Unit]("verifyCodeFmt") := {
scalafmtCheckAll.all(ScopeFilter(inAnyProject)).result.value.toEither.left.foreach { _ =>
throw new MessageOnlyException(
"Unformatted Scala code found. Please run 'scalafmtAll' and commit the reformatted code")
}
(Compile / scalafmtSbtCheck).result.value.toEither.left.foreach { _ =>
throw new MessageOnlyException(
"Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code")
}
}
addCommandAlias("verifyCodeStyle", "headerCheck; verifyCodeFmt")
val isJdk11orHigher: Boolean = {
val result = VersionNumber(sys.props("java.specification.version")).matchesSemVer(SemanticSelector(">=11"))
if (!result)
throw new IllegalArgumentException("JDK 11 or higher is required")
result
}
================================================
FILE: core/src/main/mima-filters/3.5.3.backwards.excludes/issue-322-messagesWithBatch.excludes
================================================
# #322 Adding messagesWithBatch to Dao traits
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.journal.dao.JournalDao.messagesWithBatch")
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.journal.dao.H2JournalDao.messagesWithBatch")
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.journal.dao.JournalDaoWithUpdates.messagesWithBatch")
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.query.dao.BaseByteArrayReadJournalDao.ec")
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.query.dao.BaseByteArrayReadJournalDao.mat")
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.query.dao.H2ReadJournalDao.messagesWithBatch")
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.query.dao.OracleReadJournalDao.messagesWithBatch")
ProblemFilters.exclude[InheritedNewAbstractMethodProblem]("akka.persistence.jdbc.query.dao.ReadJournalDao.messagesWithBatch")
================================================
FILE: core/src/main/mima-filters/3.5.3.backwards.excludes/issue-91-ordering-offset.excludes
================================================
# #91 changing signature of messages and messagesWithBatch in JournalDaoWithReadMessages
# tuple (PersistentRepr, Long) to include the ordering number
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.serialization.FlowPersistentReprSerializer.deserializeFlowWithoutTags")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.ByteArrayJournalSerializer.deserializeFlowWithoutTags")
ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$ContinueDelayed$")
ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$FlowControl")
ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$Stop$")
ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$Continue$")
================================================
FILE: core/src/main/mima-filters/4.x.x.backwards.excludes/pr-401-highest-seq-nr.excludes
================================================
# https://github.com/akka/akka-persistence-jdbc/pull/401/files
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.JournalQueries.highestSequenceNrForPersistenceId")
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.JournalQueries.highestMarkedSequenceNrForPersistenceId")
================================================
FILE: core/src/main/mima-filters/5.0.1.backwards.excludes/pr-570-akka-serialization.excludes
================================================
# https://github.com/akka/akka-persistence-jdbc/pull/570/files
# The problem comes from an earlier PR where the class akka.persistence.jdbc.journal.dao.AkkaSerialization
# was moved to akka.persistence.jdbc.AkkaSerialization as it was also being used from durable state
ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.journal.dao.AkkaSerialization")
ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.journal.dao.AkkaSerialization$")
ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.journal.dao.AkkaSerialization$AkkaSerialized")
ProblemFilters.exclude[MissingClassProblem]("akka.persistence.jdbc.journal.dao.AkkaSerialization$AkkaSerialized$")
================================================
FILE: core/src/main/mima-filters/5.0.2.backwards.excludes/issue-585-performance-regression.excludes
================================================
# internals
ProblemFilters.exclude[IncompatibleTemplateDefProblem]("akka.persistence.jdbc.journal.dao.BaseDao")
ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.jdbc.journal.dao.DefaultJournalDao")
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.DefaultJournalDao.queueWriteJournalRows")
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.DefaultJournalDao.writeQueue")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.JournalQueries.insertAndReturn")
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.JournalQueries.writeJournalRows")
ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao")
ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao")
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.queueWriteJournalRows")
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.writeQueue")
================================================
FILE: core/src/main/mima-filters/5.1.0.backwards.excludes/issue-557-logical-delete.excludes
================================================
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.config.BaseDaoConfig.logicalDelete")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.config.ReadJournalConfig.includeDeleted")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.logWarnAboutLogicalDeletionDeprecation")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.logWarnAboutLogicalDeletionDeprecation")
================================================
FILE: core/src/main/mima-filters/5.4.0.backwards.excludes/issue-710-tag-fk.excludes
================================================
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.JournalTables#EventTags.eventId")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.eventId")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.copy")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.copy$default$1")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.copy$default$2")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.this")
ProblemFilters.exclude[MissingTypesProblem]("akka.persistence.jdbc.journal.dao.JournalTables$TagRow$")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.apply")
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.journal.dao.JournalTables#TagRow.unapply")
================================================
FILE: core/src/main/mima-filters/5.4.0.backwards.excludes/issue-775-slick-3.50.excludes
================================================
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.apply")
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.unapply")
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.tupled")
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.curried")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.database")
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.copy")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.copy$default$1")
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.db.EagerSlickDatabase.this")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.db.LazySlickDatabase.database")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.db.SlickDatabase.forConfig")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.db.SlickDatabase.database")
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.jdbc.db.SlickDatabase.database")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.JdbcAsyncWriteJournal.db")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.DefaultJournalDao.db")
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.journal.dao.DefaultJournalDao.this")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.db")
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.db")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.db")
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.this")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.query.dao.DefaultReadJournalDao.db")
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.query.dao.DefaultReadJournalDao.this")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.query.dao.legacy.BaseByteArrayReadJournalDao.db")
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.jdbc.query.dao.legacy.BaseByteArrayReadJournalDao.db")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao.db")
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao.this")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.query.dao.legacy.OracleReadJournalDao.db")
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.jdbc.query.dao.legacy.OracleReadJournalDao.db")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.snapshot.JdbcSnapshotStore.db")
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao.this")
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.snapshot.dao.legacy.ByteArraySnapshotDao.this")
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.persistence.jdbc.state.JdbcDurableStateStoreProvider.db")
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore.this")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.query.JournalSequenceActor.receive")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.query.JournalSequenceActor.receive$default$4")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.query.JournalSequenceActor.findGaps")
================================================
FILE: core/src/main/mima-filters/5.5.0.backwards.excludes/issue-891-durable-store.excludes
================================================
# internal api changes
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.state.JdbcDurableStateStoreProvider.this")
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore.this")
================================================
FILE: core/src/main/mima-filters/5.5.2.backwards.excludes/pr-928-cleanup-tool.excludes
================================================
# internal api changes
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.jdbc.journal.dao.JournalDao.deleteEventsTo")
ProblemFilters.exclude[NewMixinForwarderProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.delete")
================================================
FILE: core/src/main/resources/reference.conf
================================================
# Copyright 2016 Dennis Vriend
# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>
akka-persistence-jdbc {
# The tag separator to use when tagging events with more than one tag.
# This property affects jdbc-journal.tagSeparator and jdbc-read-journal.tagSeparator.
tagSeparator = ","
database-provider-fqcn = "akka.persistence.jdbc.db.DefaultSlickDatabaseProvider"
shared-databases {
// Shared databases can be defined here.
// This reference config contains a partial example if a shared database which is enabled by configuring "slick" as the shared db
// this example is ignored by default as long as no profile is default
slick {
# This property indicates which profile must be used by Slick.
# Possible values are:
# - slick.jdbc.PostgresProfile$
# - slick.jdbc.MySQLProfile$
# - slick.jdbc.H2Profile$
# - slick.jdbc.SQLServerProfile$
# - slick.jdbc.OracleProfile$
# (uncomment and set the property below to match your needs)
# profile = "slick.jdbc.PostgresProfile$"
db {
connectionPool = "HikariCP"
# The JDBC URL for the chosen database
# (uncomment and set the property below to match your needs)
# url = "jdbc:postgresql://localhost:5432/akka-plugin"
# The database username
# (uncomment and set the property below to match your needs)
# user = "akka-plugin"
# The username's password
# (uncomment and set the property below to match your needs)
# password = "akka-plugin"
# The JDBC driver to use
# (uncomment and set the property below to match your needs)
# driver = "org.postgresql.Driver"
# hikariCP settings; see: https://github.com/brettwooldridge/HikariCP
# Slick will use an async executor with a fixed size queue of 10.000 objects
# The async executor is a connection pool for asynchronous execution of blocking I/O actions.
# This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.
queueSize = 10000 // number of objects that can be queued by the async executor
# This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection
# from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown.
# 1000ms is the minimum value. Default: 180000 (3 minutes)
connectionTimeout = 180000
# This property controls the maximum amount of time that a connection will be tested for aliveness.
# This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000
validationTimeout = 5000
# 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool.
# Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation
# of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections
# are never removed from the pool. Default: 600000 (10 minutes)
idleTimeout = 600000
# 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout
# it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired,
# only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds
# less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime),
# subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)
maxLifetime = 1800000
# This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a
# possible connection leak. A value of 0 means leak detection is disabled.
# Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0
leakDetectionThreshold = 0
# ensures that the database does not get dropped while we are using it
keepAliveConnection = on
# See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing
# Keep in mind that the number of threads must equal the maximum number of connections.
numThreads = 20
maxConnections = 20
minConnections = 20
}
}
}
}
# the akka-persistence-journal in use
jdbc-journal {
class = "akka.persistence.jdbc.journal.JdbcAsyncWriteJournal"
tables {
# Only used in pre 5.0.0 Dao
legacy_journal {
tableName = "journal"
schemaName = ""
columnNames {
ordering = "ordering"
deleted = "deleted"
persistenceId = "persistence_id"
sequenceNumber = "sequence_number"
created = "created"
tags = "tags"
message = "message"
}
}
event_journal {
tableName = "event_journal"
schemaName = ""
columnNames {
ordering = "ordering"
deleted = "deleted"
persistenceId = "persistence_id"
sequenceNumber = "sequence_number"
writer = "writer"
writeTimestamp = "write_timestamp"
adapterManifest = "adapter_manifest"
eventPayload = "event_payload"
eventSerId = "event_ser_id"
eventSerManifest = "event_ser_manifest"
metaPayload = "meta_payload"
metaSerId = "meta_ser_id"
metaSerManifest = "meta_ser_manifest"
}
}
event_tag {
tableName = "event_tag"
schemaName = ""
columnNames {
# use for older foreign key.
eventId = "event_id"
persistenceId = "persistence_id"
sequenceNumber = "sequence_number"
tag = "tag"
}
# For rolling updates the event_tag table migration.
# switch those to enable new region key write and read.
legacy-tag-key = true
}
# Otherwise it would be a pinned dispatcher, see https://github.com/akka/akka/issues/31058
plugin-dispatcher = "akka.actor.default-dispatcher"
}
# The tag separator to use when tagging events with more than one tag.
# should not be configured directly, but through property akka-persistence-jdbc.tagSeparator
# in order to keep consistent behavior over write/read sides
# Only used for the legacy schema
tagSeparator = ${akka-persistence-jdbc.tagSeparator}
# If you have data from pre 5.0.0 use the legacy akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao
# Dao. Migration to the new dao will be added in the future.
dao = "akka.persistence.jdbc.journal.dao.DefaultJournalDao"
# The size of the buffer used when queueing up events for batch writing. This number must be bigger then the number
# of events that may be written concurrently. In other words this number must be bigger than the number of persistent
# actors that are actively persisting at the same time.
bufferSize = 1000
# The maximum size of the batches in which journal rows will be inserted
batchSize = 400
# The maximum size of the batches in which journal rows will be read when recovering
replayBatchSize = 400
# The maximum number of batch-inserts that may be running concurrently
parallelism = 8
# This setting can be used to configure usage of a shared database.
# To disable usage of a shared database, set to null or an empty string.
# When set to a non empty string, this setting does two things:
# - The actor which manages the write-journal will not automatically close the db when the actor stops (since it is shared)
# - If akka-persistence-jdbc.database-provider-fqcn is set to akka.persistence.jdbc.db.DefaultSlickDatabaseProvider
# then the shared database with the given name will be used. (shared databases are configured as part of akka-persistence-jdbc.shared-databases)
# Please note that the database will only be shared with the other journals if the use-shared-db is also set
# to the same value for these other journals.
use-shared-db = null
slick {
# This property indicates which profile must be used by Slick.
# Possible values are:
# - slick.jdbc.PostgresProfile$
# - slick.jdbc.MySQLProfile$
# - slick.jdbc.H2Profile$
# - slick.jdbc.SQLServerProfile$
# - slick.jdbc.OracleProfile$
# (uncomment and set the property below to match your needs)
# profile = "slick.jdbc.PostgresProfile$"
db {
connectionPool = "HikariCP"
# The JDBC URL for the chosen database
# (uncomment and set the property below to match your needs)
# url = "jdbc:postgresql://localhost:5432/akka-plugin"
# The database username
# (uncomment and set the property below to match your needs)
# user = "akka-plugin"
# The username's password
# (uncomment and set the property below to match your needs)
# password = "akka-plugin"
# The JDBC driver to use
# (uncomment and set the property below to match your needs)
# driver = "org.postgresql.Driver"
# hikariCP settings; see: https://github.com/brettwooldridge/HikariCP
# Slick will use an async executor with a fixed size queue of 10.000 objects
# The async executor is a connection pool for asynchronous execution of blocking I/O actions.
# This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.
queueSize = 10000 // number of objects that can be queued by the async executor
# This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection
# from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown.
# 1000ms is the minimum value. Default: 180000 (3 minutes)
connectionTimeout = 180000
# This property controls the maximum amount of time that a connection will be tested for aliveness.
# This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000
validationTimeout = 5000
# 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool.
# Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation
# of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections
# are never removed from the pool. Default: 600000 (10 minutes)
idleTimeout = 600000
# 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout
# it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired,
# only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds
# less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime),
# subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)
maxLifetime = 1800000
# This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a
# possible connection leak. A value of 0 means leak detection is disabled.
# Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0
leakDetectionThreshold = 0
# ensures that the database does not get dropped while we are using it
keepAliveConnection = on
# See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing
# Keep in mind that the number of threads must equal the maximum number of connections.
numThreads = 20
maxConnections = 20
minConnections = 20
}
}
}
# the akka-persistence-snapshot-store in use
jdbc-snapshot-store {
class = "akka.persistence.jdbc.snapshot.JdbcSnapshotStore"
tables {
legacy_snapshot {
tableName = "snapshot"
schemaName = ""
columnNames {
persistenceId = "persistence_id"
sequenceNumber = "sequence_number"
created = "created"
snapshot = "snapshot"
}
}
snapshot {
tableName = "snapshot"
schemaName = ""
columnNames {
persistenceId = "persistence_id"
sequenceNumber = "sequence_number"
created = "created"
snapshotPayload = "snapshot_payload"
snapshotSerId = "snapshot_ser_id"
snapshotSerManifest = "snapshot_ser_manifest"
metaPayload = "meta_payload"
metaSerId = "meta_ser_id"
metaSerManifest = "meta_ser_manifest"
}
}
# Otherwise it would be a pinned dispatcher, see https://github.com/akka/akka/issues/31058
plugin-dispatcher = "akka.actor.default-dispatcher"
}
# This setting can be used to configure usage of a shared database.
# To disable usage of a shared database, set to null or an empty string.
# When set to a non empty string, this setting does two things:
# - The actor which manages the snapshot-journal will not automatically close the db when the actor stops (since it is shared)
# - If akka-persistence-jdbc.database-provider-fqcn is set to akka.persistence.jdbc.db.DefaultSlickDatabaseProvider
# then the shared database with the given name will be used. (shared databases are configured as part of akka-persistence-jdbc.shared-databases)
# Please note that the database will only be shared with the other journals if the use-shared-db is also set
# to the same value for these other journals.
use-shared-db = null
dao = "akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao"
slick {
# This property indicates which profile must be used by Slick.
# Possible values are:
# - slick.jdbc.PostgresProfile$
# - slick.jdbc.MySQLProfile$
# - slick.jdbc.H2Profile$
# - slick.jdbc.SQLServerProfile$
# - slick.jdbc.OracleProfile$
# (uncomment and set the property below to match your needs)
# profile = "slick.jdbc.PostgresProfile$"
db {
connectionPool = "HikariCP"
# The JDBC URL for the chosen database
# (uncomment and set the property below to match your needs)
# url = "jdbc:postgresql://localhost:5432/akka-plugin"
# The database username
# (uncomment and set the property below to match your needs)
# user = "akka-plugin"
# The username's password
# (uncomment and set the property below to match your needs)
# password = "akka-plugin"
# The JDBC driver to use
# (uncomment and set the property below to match your needs)
# driver = "org.postgresql.Driver"
# hikariCP settings; see: https://github.com/brettwooldridge/HikariCP
# Slick will use an async executor with a fixed size queue of 10.000 objects
# The async executor is a connection pool for asynchronous execution of blocking I/O actions.
# This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.
queueSize = 10000 // number of objects that can be queued by the async executor
# This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection
# from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown.
# 1000ms is the minimum value. Default: 180000 (3 minutes)
connectionTimeout = 180000
# This property controls the maximum amount of time that a connection will be tested for aliveness.
# This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000
validationTimeout = 5000
# 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool.
# Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation
# of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections
# are never removed from the pool. Default: 600000 (10 minutes)
idleTimeout = 600000
# 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout
# it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired,
# only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds
# less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime),
# subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)
maxLifetime = 1800000
# This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a
# possible connection leak. A value of 0 means leak detection is disabled.
# Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0
leakDetectionThreshold = 0
# ensures that the database does not get dropped while we are using it
keepAliveConnection = on
# See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing
# Keep in mind that the number of threads must equal the maximum number of connections.
numThreads = 20
maxConnections = 20
minConnections = 20
}
}
}
# the akka-persistence-query provider in use
jdbc-read-journal {
class = "akka.persistence.jdbc.query.JdbcReadJournalProvider"
# Absolute path to the write journal plugin configuration section.
# Read journal uses event adapters from the write plugin
# to adapt events.
write-plugin = "jdbc-journal"
# New events are retrieved (polled) with this interval.
refresh-interval = "1s"
# How many events to fetch in one query (replay) and keep buffered until they
# are delivered downstreams.
max-buffer-size = "500"
# Number of 'max-buffer-size's to limit each events by tag query to
#
# Events by tag will fetch batches of elements limiting both using the DB LIMIT support and
# the "ordering" column of the journal. When executing a query starting from the beginning of the
# journal, for example adding a new projection to an existing application with a large number
# of already persisted events this can cause performance problems in some databases.
#
# This factor limits the "slices" of ordering the journal is queried for into smaller chunks,
# issuing more queries where each query covers a smaller slice of the journal instead of one
# covering the entire journal.
#
# Note that setting this too low will have a performance overhead in many queries being issued where
# each query returns no or very few entries, but what number is to low depends on how many tags are
# used and how well those are distributed, setting this value requires application specific benchmarking
# to find a good number.
#
# 0 means disable the factor and query the entire journal and limit to max-buffer-size elements
events-by-tag-buffer-sizes-per-query = 0
# If enabled, automatically close the database connection when the actor system is terminated
add-shutdown-hook = true
# This setting can be used to configure usage of a shared database.
# To disable usage of a shared database, set to null or an empty string.
# This setting only has effect if akka-persistence-jdbc.database-provider-fqcn is set to
# akka.persistence.jdbc.db.DefaultSlickDatabaseProvider. When this setting is set to a non empty string
# then the shared database with the given name will be used. (shared databases are configured as part of akka-persistence-jdbc.shared-databases)
# Please note that the database will only be shared with the other journals if the use-shared-db is also set
# to the same value for these other journals.
use-shared-db = null
dao = "akka.persistence.jdbc.query.dao.DefaultReadJournalDao"
# Settings for determining if ids (ordering column) in the journal are out of sequence.
journal-sequence-retrieval {
# The maximum number of ids that will be retrieved in each batch
batch-size = 10000
# In case a number in the sequence is missing, this is the amount of retries that will be done to see
# if the number is still found. Note that the time after which a number in the sequence is assumed missing is
# equal to maxTries * queryDelay
# (maxTries may not be zero)
max-tries = 10
# How often the actor will query for new data
query-delay = 1 second
# The maximum backoff time before trying to query again in case of database failures
max-backoff-query-delay = 1 minute
# The ask timeout to use when querying the journal sequence actor, the actor should normally respond very quickly,
# since it always replies with its current internal state
ask-timeout = 1 second
}
tables {
legacy_journal = ${jdbc-journal.tables.legacy_journal}
event_journal = ${jdbc-journal.tables.event_journal}
event_tag = ${jdbc-journal.tables.event_tag}
}
# The tag separator to use when tagging events with more than one tag.
# should not be configured directly, but through property akka-persistence-jdbc.tagSeparator
# in order to keep consistent behavior over write/read sides
tagSeparator = ${akka-persistence-jdbc.tagSeparator}
slick {
# This property indicates which profile must be used by Slick.
# Possible values are:
# - slick.jdbc.PostgresProfile$
# - slick.jdbc.MySQLProfile$
# - slick.jdbc.H2Profile$
# - slick.jdbc.SQLServerProfile$
# - slick.jdbc.OracleProfile$
# (uncomment and set the property below to match your needs)
# profile = "slick.jdbc.PostgresProfile$"
db {
connectionPool = "HikariCP"
# The JDBC URL for the chosen database
# (uncomment and set the property below to match your needs)
# url = "jdbc:postgresql://localhost:5432/akka-plugin"
# The database username
# (uncomment and set the property below to match your needs)
# user = "akka-plugin"
# The username's password
# (uncomment and set the property below to match your needs)
# password = "akka-plugin"
# The JDBC driver to use
# (uncomment and set the property below to match your needs)
# driver = "org.postgresql.Driver"
# hikariCP settings; see: https://github.com/brettwooldridge/HikariCP
# Slick will use an async executor with a fixed size queue of 10.000 objects
# The async executor is a connection pool for asynchronous execution of blocking I/O actions.
# This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.
queueSize = 10000 // number of objects that can be queued by the async executor
# This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection
# from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown.
# 1000ms is the minimum value. Default: 180000 (3 minutes)
connectionTimeout = 180000
# This property controls the maximum amount of time that a connection will be tested for aliveness.
# This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000
validationTimeout = 5000
# 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool.
# Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation
# of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections
# are never removed from the pool. Default: 600000 (10 minutes)
idleTimeout = 600000
# 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout
# it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired,
# only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds
# less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime),
# subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)
maxLifetime = 1800000
# This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a
# possible connection leak. A value of 0 means leak detection is disabled.
# Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0
leakDetectionThreshold = 0
# ensures that the database does not get dropped while we are using it
keepAliveConnection = on
# See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing
# Keep in mind that the number of threads must equal the maximum number of connections.
numThreads = 20
maxConnections = 20
minConnections = 20
}
}
}
# the akka-persistence-durable-state-store in use
jdbc-durable-state-store {
class = "akka.persistence.jdbc.state.JdbcDurableStateStoreProvider"
# number of records fetched from the store at once
batchSize = 500
# New states are retrieved (polled) with this interval.
refreshInterval = "1s"
tables {
durable_state {
## The table and column names are not always read and used in SQL statements. If you change
## these values you may need to edit some source code
## https://github.com/akka/akka-persistence-jdbc/issues/573
tableName = "durable_state"
schemaName = ""
columnNames {
globalOffset = "global_offset"
persistenceId = "persistence_id"
revision = "revision"
statePayload = "state_payload"
stateSerId = "state_serial_id"
stateSerManifest = "state_serial_manifest"
tag = "tag"
stateTimestamp = "state_timestamp"
}
}
}
# Settings for determining if global_offset column in the durable-state are out of sequence.
durable-state-sequence-retrieval {
# The maximum number of ids that will be retrieved in each batch
batch-size = 10000
# In case a number in the sequence is missing, this is the amount of retries that will be done to see
# if the number is still found. Note that the time after which a number in the sequence is assumed missing is
# equal to maxTries * queryDelay
# (maxTries may not be zero)
max-tries = 5
# How often the actor will query for new data
query-delay = 1 second
# The maximum backoff time before trying to query again in case of database failures
max-backoff-query-delay = 1 minute
# The ask timeout to use when querying the durable-state sequence actor, the actor should normally respond very quickly,
# since it always replies with its current internal state
ask-timeout = 1 second
# cache of revision numbers per persistence id
revision-cache-capacity = 10000
}
slick {
# This property indicates which profile must be used by Slick.
# Possible values are:
# - slick.jdbc.PostgresProfile$
# - slick.jdbc.MySQLProfile$
# - slick.jdbc.H2Profile$
# - slick.jdbc.SQLServerProfile$
# - slick.jdbc.OracleProfile$
# (uncomment and set the property below to match your needs)
# profile = "slick.jdbc.PostgresProfile$"
db {
connectionPool = "HikariCP"
# The JDBC URL for the chosen database
# (uncomment and set the property below to match your needs)
# url = "jdbc:postgresql://localhost:5432/akka-plugin"
# The database username
# (uncomment and set the property below to match your needs)
# user = "akka-plugin"
# The username's password
# (uncomment and set the property below to match your needs)
# password = "akka-plugin"
# The JDBC driver to use
# (uncomment and set the property below to match your needs)
# driver = "org.postgresql.Driver"
# hikariCP settings; see: https://github.com/brettwooldridge/HikariCP
# Slick will use an async executor with a fixed size queue of 10.000 objects
# The async executor is a connection pool for asynchronous execution of blocking I/O actions.
# This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.
queueSize = 10000 // number of objects that can be queued by the async executor
# This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection
# from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown.
# 1000ms is the minimum value. Default: 180000 (3 minutes)
connectionTimeout = 180000
# This property controls the maximum amount of time that a connection will be tested for aliveness.
# This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000
validationTimeout = 5000
# 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool.
# Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation
# of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections
# are never removed from the pool. Default: 600000 (10 minutes)
idleTimeout = 600000
# 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout
# it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired,
# only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds
# less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime),
# subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)
maxLifetime = 1800000
# This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a
# possible connection leak. A value of 0 means leak detection is disabled.
# Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0
leakDetectionThreshold = 0
# ensures that the database does not get dropped while we are using it
keepAliveConnection = on
# See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing
# Keep in mind that the number of threads must equal the maximum number of connections.
numThreads = 20
maxConnections = 20
minConnections = 20
}
}
}
================================================
FILE: core/src/main/resources/schema/h2/h2-create-schema-legacy.sql
================================================
CREATE TABLE IF NOT EXISTS PUBLIC."journal" (
"ordering" BIGINT AUTO_INCREMENT,
"persistence_id" VARCHAR(255) NOT NULL,
"sequence_number" BIGINT NOT NULL,
"deleted" BOOLEAN DEFAULT FALSE NOT NULL,
"tags" VARCHAR(255) DEFAULT NULL,
"message" BYTEA NOT NULL,
PRIMARY KEY("persistence_id", "sequence_number")
);
CREATE UNIQUE INDEX IF NOT EXISTS "journal_ordering_idx" ON PUBLIC."journal"("ordering");
CREATE TABLE IF NOT EXISTS PUBLIC."snapshot" (
"persistence_id" VARCHAR(255) NOT NULL,
"sequence_number" BIGINT NOT NULL,
"created" BIGINT NOT NULL,
"snapshot" BYTEA NOT NULL,
PRIMARY KEY("persistence_id", "sequence_number")
);
CREATE TABLE IF NOT EXISTS "durable_state" (
"global_offset" BIGINT NOT NULL AUTO_INCREMENT,
"persistence_id" VARCHAR(255) NOT NULL,
"revision" BIGINT NOT NULL,
"state_payload" BLOB NOT NULL,
"state_serial_id" INTEGER NOT NULL,
"state_serial_manifest" VARCHAR,
"tag" VARCHAR,
"state_timestamp" BIGINT NOT NULL,
PRIMARY KEY("persistence_id")
);
CREATE INDEX "state_tag_idx" on "durable_state" ("tag");
CREATE INDEX "state_global_offset_idx" on "durable_state" ("global_offset");
================================================
FILE: core/src/main/resources/schema/h2/h2-create-schema.sql
================================================
CREATE TABLE IF NOT EXISTS "event_journal" (
"ordering" BIGINT UNIQUE NOT NULL AUTO_INCREMENT,
"deleted" BOOLEAN DEFAULT false NOT NULL,
"persistence_id" VARCHAR(255) NOT NULL,
"sequence_number" BIGINT NOT NULL,
"writer" VARCHAR NOT NULL,
"write_timestamp" BIGINT NOT NULL,
"adapter_manifest" VARCHAR NOT NULL,
"event_payload" BLOB NOT NULL,
"event_ser_id" INTEGER NOT NULL,
"event_ser_manifest" VARCHAR NOT NULL,
"meta_payload" BLOB,
"meta_ser_id" INTEGER,
"meta_ser_manifest" VARCHAR,
PRIMARY KEY("persistence_id","sequence_number")
);
CREATE UNIQUE INDEX "event_journal_ordering_idx" on "event_journal" ("ordering");
CREATE TABLE IF NOT EXISTS "event_tag" (
"event_id" BIGINT,
"persistence_id" VARCHAR(255),
"sequence_number" BIGINT,
"tag" VARCHAR NOT NULL,
PRIMARY KEY("persistence_id", "sequence_number", "tag"),
CONSTRAINT fk_event_journal
FOREIGN KEY("persistence_id", "sequence_number")
REFERENCES "event_journal"("persistence_id", "sequence_number")
ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS "snapshot" (
"persistence_id" VARCHAR(255) NOT NULL,
"sequence_number" BIGINT NOT NULL,
"created" BIGINT NOT NULL,"snapshot_ser_id" INTEGER NOT NULL,
"snapshot_ser_manifest" VARCHAR NOT NULL,
"snapshot_payload" BLOB NOT NULL,
"meta_ser_id" INTEGER,
"meta_ser_manifest" VARCHAR,
"meta_payload" BLOB,
PRIMARY KEY("persistence_id","sequence_number")
);
CREATE SEQUENCE IF NOT EXISTS "global_offset_seq";
CREATE TABLE IF NOT EXISTS "durable_state" (
"global_offset" BIGINT DEFAULT NEXT VALUE FOR "global_offset_seq",
"persistence_id" VARCHAR(255) NOT NULL,
"revision" BIGINT NOT NULL,
"state_payload" BLOB NOT NULL,
"state_serial_id" INTEGER NOT NULL,
"state_serial_manifest" VARCHAR,
"tag" VARCHAR,
"state_timestamp" BIGINT NOT NULL,
PRIMARY KEY("persistence_id")
);
CREATE INDEX IF NOT EXISTS "state_tag_idx" on "durable_state" ("tag");
CREATE INDEX IF NOT EXISTS "state_global_offset_idx" on "durable_state" ("global_offset");
================================================
FILE: core/src/main/resources/schema/h2/h2-drop-schema-legacy.sql
================================================
DROP TABLE IF EXISTS PUBLIC."journal";
DROP TABLE IF EXISTS PUBLIC."snapshot";
DROP TABLE IF EXISTS PUBLIC."durable_state";
================================================
FILE: core/src/main/resources/schema/h2/h2-drop-schema.sql
================================================
DROP TABLE IF EXISTS PUBLIC."event_tag";
DROP TABLE IF EXISTS PUBLIC."event_journal";
DROP TABLE IF EXISTS PUBLIC."snapshot";
DROP TABLE IF EXISTS PUBLIC."durable_state";
DROP SEQUENCE IF EXISTS PUBLIC."global_offset_seq";
================================================
FILE: core/src/main/resources/schema/mysql/mysql-create-schema-legacy.sql
================================================
CREATE TABLE IF NOT EXISTS journal (
ordering SERIAL,
persistence_id VARCHAR(255) NOT NULL,
sequence_number BIGINT NOT NULL,
deleted BOOLEAN DEFAULT FALSE NOT NULL,
tags VARCHAR(255) DEFAULT NULL,
message BLOB NOT NULL,
PRIMARY KEY(persistence_id, sequence_number)
);
CREATE UNIQUE INDEX journal_ordering_idx ON journal(ordering);
CREATE TABLE IF NOT EXISTS snapshot (
persistence_id VARCHAR(255) NOT NULL,
sequence_number BIGINT NOT NULL,
created BIGINT NOT NULL,
snapshot BLOB NOT NULL,
PRIMARY KEY (persistence_id, sequence_number)
);
================================================
FILE: core/src/main/resources/schema/mysql/mysql-create-schema.sql
================================================
CREATE TABLE IF NOT EXISTS event_journal (
ordering SERIAL,
deleted BOOLEAN DEFAULT false NOT NULL,
persistence_id VARCHAR(255) NOT NULL,
sequence_number BIGINT NOT NULL,
writer TEXT NOT NULL,
write_timestamp BIGINT NOT NULL,
adapter_manifest TEXT NOT NULL,
event_payload BLOB NOT NULL,
event_ser_id INTEGER NOT NULL,
event_ser_manifest TEXT NOT NULL,
meta_payload BLOB,
meta_ser_id INTEGER,meta_ser_manifest TEXT,
PRIMARY KEY(persistence_id,sequence_number)
);
CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering);
CREATE TABLE IF NOT EXISTS event_tag (
event_id BIGINT UNSIGNED,
persistence_id VARCHAR(255),
sequence_number BIGINT,
tag VARCHAR(255) NOT NULL,
PRIMARY KEY(persistence_id, sequence_number, tag),
FOREIGN KEY (persistence_id, sequence_number)
REFERENCES event_journal(persistence_id, sequence_number)
ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS snapshot (
persistence_id VARCHAR(255) NOT NULL,
sequence_number BIGINT NOT NULL,
created BIGINT NOT NULL,
snapshot_ser_id INTEGER NOT NULL,
snapshot_ser_manifest TEXT NOT NULL,
snapshot_payload BLOB NOT NULL,
meta_ser_id INTEGER,
meta_ser_manifest TEXT,
meta_payload BLOB,
PRIMARY KEY (persistence_id, sequence_number));
================================================
FILE: core/src/main/resources/schema/mysql/mysql-drop-schema-legacy.sql
================================================
DROP TABLE IF EXISTS journal;
DROP TABLE IF EXISTS snapshot;
================================================
FILE: core/src/main/resources/schema/mysql/mysql-drop-schema.sql
================================================
DROP TABLE IF EXISTS event_tag;
DROP TABLE IF EXISTS event_journal;
DROP TABLE IF EXISTS snapshot;
================================================
FILE: core/src/main/resources/schema/mysql/mysql-event-tag-migration.sql
================================================
-- **************** first step ****************
-- add new column
ALTER TABLE event_tag
ADD persistence_id VARCHAR(255),
ADD sequence_number BIGINT;
-- **************** second step ****************
-- migrate rows
UPDATE event_tag
INNER JOIN event_journal ON event_tag.event_id = event_journal.ordering
SET event_tag.persistence_id = event_journal.persistence_id,
event_tag.sequence_number = event_journal.sequence_number;
-- drop old FK constraint
SELECT CONSTRAINT_NAME
INTO @fk_constraint_name
FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS
WHERE TABLE_NAME = 'event_tag';
SET @alter_query = CONCAT('ALTER TABLE event_tag DROP FOREIGN KEY ', @fk_constraint_name);
PREPARE stmt FROM @alter_query;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
-- drop old PK constraint
ALTER TABLE event_tag
DROP PRIMARY KEY;
-- create new PK constraint for PK column.
ALTER TABLE event_tag
ADD CONSTRAINT
PRIMARY KEY (persistence_id, sequence_number, tag);
-- create new FK constraint for PK column.
ALTER TABLE event_tag
ADD CONSTRAINT fk_event_journal_on_pk
FOREIGN KEY (persistence_id, sequence_number)
REFERENCES event_journal (persistence_id, sequence_number)
ON DELETE CASCADE;
-- alter the event_id to nullable, so we can skip the InsertAndReturn.
ALTER TABLE event_tag
MODIFY COLUMN event_id BIGINT UNSIGNED NULL;
================================================
FILE: core/src/main/resources/schema/oracle/oracle-create-schema-legacy.sql
================================================
CREATE SEQUENCE "ordering_seq" START WITH 1 INCREMENT BY 1 NOMAXVALUE
/
CREATE TABLE "journal" (
"ordering" NUMERIC,
"deleted" char check ("deleted" in (0,1)) NOT NULL,
"persistence_id" VARCHAR(255) NOT NULL,
"sequence_number" NUMERIC NOT NULL,
"tags" VARCHAR(255) DEFAULT NULL,
"message" BLOB NOT NULL,
PRIMARY KEY("persistence_id", "sequence_number")
)
/
CREATE UNIQUE INDEX "journal_ordering_idx" ON "journal"("ordering")
/
CREATE OR REPLACE TRIGGER "ordering_seq_trigger"
BEFORE INSERT ON "journal"
FOR EACH ROW
BEGIN
SELECT "ordering_seq".NEXTVAL INTO :NEW."ordering" FROM DUAL;
END;
/
CREATE OR REPLACE PROCEDURE "reset_sequence"
IS
l_value NUMBER;
BEGIN
EXECUTE IMMEDIATE 'SELECT "ordering_seq".nextval FROM dual' INTO l_value;
EXECUTE IMMEDIATE 'ALTER SEQUENCE "ordering_seq" INCREMENT BY -' || l_value || ' MINVALUE 0';
EXECUTE IMMEDIATE 'SELECT "ordering_seq".nextval FROM dual' INTO l_value;
EXECUTE IMMEDIATE 'ALTER SEQUENCE "ordering_seq" INCREMENT BY 1 MINVALUE 0';
END;
/
CREATE TABLE "snapshot" (
"persistence_id" VARCHAR(255) NOT NULL,
"sequence_number" NUMERIC NOT NULL,
"created" NUMERIC NOT NULL,
"snapshot" BLOB NOT NULL,
PRIMARY KEY ("persistence_id", "sequence_number")
)
/
================================================
FILE: core/src/main/resources/schema/oracle/oracle-create-schema.sql
================================================
CREATE SEQUENCE EVENT_JOURNAL__ORDERING_SEQ START WITH 1 INCREMENT BY 1 NOMAXVALUE
/
CREATE TABLE EVENT_JOURNAL (
ORDERING NUMERIC UNIQUE,
DELETED CHAR(1) DEFAULT 0 NOT NULL check (DELETED in (0, 1)),
PERSISTENCE_ID VARCHAR(255) NOT NULL,
SEQUENCE_NUMBER NUMERIC NOT NULL,
WRITER VARCHAR(255) NOT NULL,
WRITE_TIMESTAMP NUMBER(19) NOT NULL,
ADAPTER_MANIFEST VARCHAR(255),
EVENT_PAYLOAD BLOB NOT NULL,
EVENT_SER_ID NUMBER(10) NOT NULL,
EVENT_SER_MANIFEST VARCHAR(255),
META_PAYLOAD BLOB,
META_SER_ID NUMBER(10),
META_SER_MANIFEST VARCHAR(255),
PRIMARY KEY(PERSISTENCE_ID, SEQUENCE_NUMBER)
)
/
CREATE OR REPLACE TRIGGER EVENT_JOURNAL__ORDERING_TRG before insert on EVENT_JOURNAL REFERENCING NEW AS NEW FOR EACH ROW WHEN (new.ORDERING is null) begin select EVENT_JOURNAL__ORDERING_seq.nextval into :new.ORDERING from sys.dual; end;
/
CREATE TABLE EVENT_TAG (
EVENT_ID NUMERIC,
PERSISTENCE_ID VARCHAR(255),
SEQUENCE_NUMBER NUMERIC,
TAG VARCHAR(255) NOT NULL,
PRIMARY KEY(PERSISTENCE_ID, SEQUENCE_NUMBER, TAG),
FOREIGN KEY(PERSISTENCE_ID, SEQUENCE_NUMBER) REFERENCES EVENT_JOURNAL(PERSISTENCE_ID, SEQUENCE_NUMBER)
ON DELETE CASCADE
)
/
CREATE TABLE SNAPSHOT (
PERSISTENCE_ID VARCHAR(255) NOT NULL,
SEQUENCE_NUMBER NUMERIC NOT NULL,
CREATED NUMERIC NOT NULL,
SNAPSHOT_SER_ID NUMBER(10) NOT NULL,
SNAPSHOT_SER_MANIFEST VARCHAR(255),
SNAPSHOT_PAYLOAD BLOB NOT NULL,
META_SER_ID NUMBER(10),
META_SER_MANIFEST VARCHAR(255),
META_PAYLOAD BLOB,
PRIMARY KEY(PERSISTENCE_ID,SEQUENCE_NUMBER)
)
/
CREATE OR REPLACE PROCEDURE "reset_sequence"
IS
l_value NUMBER;
BEGIN
EXECUTE IMMEDIATE 'SELECT EVENT_JOURNAL__ORDERING_SEQ.nextval FROM dual' INTO l_value;
EXECUTE IMMEDIATE 'ALTER SEQUENCE EVENT_JOURNAL__ORDERING_SEQ INCREMENT BY -' || l_value || ' MINVALUE 0';
EXECUTE IMMEDIATE 'SELECT EVENT_JOURNAL__ORDERING_SEQ.nextval FROM dual' INTO l_value;
EXECUTE IMMEDIATE 'ALTER SEQUENCE EVENT_JOURNAL__ORDERING_SEQ INCREMENT BY 1 MINVALUE 0';
END;
/
================================================
FILE: core/src/main/resources/schema/oracle/oracle-drop-schema-legacy.sql
================================================
-- (ddl lock timeout in seconds) this allows tests which are still writing to the db to finish gracefully
ALTER SESSION SET ddl_lock_timeout = 150
/
DROP TABLE "journal" CASCADE CONSTRAINT
/
DROP TABLE "snapshot" CASCADE CONSTRAINT
/
DROP TABLE "deleted_to" CASCADE CONSTRAINT
/
DROP TRIGGER "ordering_seq_trigger"
/
DROP PROCEDURE "reset_sequence"
/
DROP SEQUENCE "ordering_seq"
/
================================================
FILE: core/src/main/resources/schema/oracle/oracle-drop-schema.sql
================================================
ALTER SESSION SET ddl_lock_timeout = 15
/
DROP TABLE EVENT_TAG CASCADE CONSTRAINT
/
DROP TABLE EVENT_JOURNAL CASCADE CONSTRAINT
/
DROP TABLE SNAPSHOT CASCADE CONSTRAINT
/
DROP TABLE SNAPSHOT CASCADE CONSTRAINT
/
DROP SEQUENCE EVENT_JOURNAL__ORDERING_SEQ
/
DROP TRIGGER EVENT_JOURNAL__ORDERING_TRG
/
================================================
FILE: core/src/main/resources/schema/oracle/oracle-event-tag-migration.sql
================================================
-- **************** first step ****************
-- add new column
ALTER TABLE EVENT_TAG
ADD (PERSISTENCE_ID VARCHAR2(255),
SEQUENCE_NUMBER NUMERIC);
-- **************** second step ****************
-- migrate rows
UPDATE EVENT_TAG
SET PERSISTENCE_ID = (SELECT PERSISTENCE_ID
FROM EVENT_JOURNAL
WHERE EVENT_TAG.EVENT_ID = EVENT_JOURNAL.ORDERING),
SEQUENCE_NUMBER = (SELECT SEQUENCE_NUMBER
FROM EVENT_JOURNAL
WHERE EVENT_TAG.EVENT_ID = EVENT_JOURNAL.ORDERING)
-- drop old FK constraint
DECLARE
v_constraint_name VARCHAR2(255);
BEGIN
SELECT CONSTRAINT_NAME
INTO v_constraint_name
FROM USER_CONSTRAINTS
WHERE TABLE_NAME = 'EVENT_TAG'
AND CONSTRAINT_TYPE = 'R';
IF v_constraint_name IS NOT NULL THEN
EXECUTE IMMEDIATE 'ALTER TABLE EVENT_TAG DROP CONSTRAINT ' || v_constraint_name;
END IF;
COMMIT;
EXCEPTION
WHEN OTHERS THEN
ROLLBACK;
RAISE;
END;
/
-- drop old PK constraint
ALTER TABLE EVENT_TAG
DROP PRIMARY KEY;
-- create new PK constraint for PK column.
ALTER TABLE EVENT_TAG
ADD CONSTRAINT "pk_event_tag"
PRIMARY KEY (PERSISTENCE_ID, SEQUENCE_NUMBER, TAG);
-- create new FK constraint for PK column.
ALTER TABLE EVENT_TAG
ADD CONSTRAINT fk_EVENT_JOURNAL_on_pk
FOREIGN KEY (PERSISTENCE_ID, SEQUENCE_NUMBER)
REFERENCES EVENT_JOURNAL (PERSISTENCE_ID, SEQUENCE_NUMBER)
ON DELETE CASCADE;
-- alter the EVENT_ID to nullable, so we can skip the InsertAndReturn.
ALTER TABLE EVENT_TAG
MODIFY EVENT_ID NULL;
================================================
FILE: core/src/main/resources/schema/postgres/postgres-create-schema-legacy.sql
================================================
CREATE TABLE IF NOT EXISTS public.journal (
ordering BIGSERIAL,
persistence_id VARCHAR(255) NOT NULL,
sequence_number BIGINT NOT NULL,
deleted BOOLEAN DEFAULT FALSE NOT NULL,
tags VARCHAR(255) DEFAULT NULL,
message BYTEA NOT NULL,
PRIMARY KEY(persistence_id, sequence_number)
);
CREATE UNIQUE INDEX IF NOT EXISTS journal_ordering_idx ON public.journal(ordering);
CREATE TABLE IF NOT EXISTS public.snapshot (
persistence_id VARCHAR(255) NOT NULL,
sequence_number BIGINT NOT NULL,
created BIGINT NOT NULL,
snapshot BYTEA NOT NULL,
PRIMARY KEY(persistence_id, sequence_number)
);
CREATE TABLE IF NOT EXISTS public.durable_state (
global_offset BIGSERIAL,
persistence_id VARCHAR(255) NOT NULL,
revision BIGINT NOT NULL,
state_payload BYTEA NOT NULL,
state_serial_id INTEGER NOT NULL,
state_serial_manifest VARCHAR(255),
tag VARCHAR,
state_timestamp BIGINT NOT NULL,
PRIMARY KEY(persistence_id)
);
CREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag);
CREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_state (global_offset);
================================================
FILE: core/src/main/resources/schema/postgres/postgres-create-schema.sql
================================================
CREATE TABLE IF NOT EXISTS public.event_journal (
ordering BIGSERIAL,
persistence_id VARCHAR(255) NOT NULL,
sequence_number BIGINT NOT NULL,
deleted BOOLEAN DEFAULT FALSE NOT NULL,
writer VARCHAR(255) NOT NULL,
write_timestamp BIGINT,
adapter_manifest VARCHAR(255),
event_ser_id INTEGER NOT NULL,
event_ser_manifest VARCHAR(255) NOT NULL,
event_payload BYTEA NOT NULL,
meta_ser_id INTEGER,
meta_ser_manifest VARCHAR(255),
meta_payload BYTEA,
PRIMARY KEY(persistence_id, sequence_number)
);
CREATE UNIQUE INDEX event_journal_ordering_idx ON public.event_journal(ordering);
CREATE TABLE IF NOT EXISTS public.event_tag(
event_id BIGINT,
persistence_id VARCHAR(255),
sequence_number BIGINT,
tag VARCHAR(256),
PRIMARY KEY(persistence_id, sequence_number, tag),
CONSTRAINT fk_event_journal
FOREIGN KEY(persistence_id, sequence_number)
REFERENCES event_journal(persistence_id, sequence_number)
ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS public.snapshot (
persistence_id VARCHAR(255) NOT NULL,
sequence_number BIGINT NOT NULL,
created BIGINT NOT NULL,
snapshot_ser_id INTEGER NOT NULL,
snapshot_ser_manifest VARCHAR(255) NOT NULL,
snapshot_payload BYTEA NOT NULL,
meta_ser_id INTEGER,
meta_ser_manifest VARCHAR(255),
meta_payload BYTEA,
PRIMARY KEY(persistence_id, sequence_number)
);
CREATE TABLE IF NOT EXISTS public.durable_state (
global_offset BIGSERIAL,
persistence_id VARCHAR(255) NOT NULL,
revision BIGINT NOT NULL,
state_payload BYTEA NOT NULL,
state_serial_id INTEGER NOT NULL,
state_serial_manifest VARCHAR(255),
tag VARCHAR,
state_timestamp BIGINT NOT NULL,
PRIMARY KEY(persistence_id)
);
CREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag);
CREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_state (global_offset);
================================================
FILE: core/src/main/resources/schema/postgres/postgres-drop-schema-legacy.sql
================================================
DROP TABLE IF EXISTS public.journal;
DROP TABLE IF EXISTS public.snapshot;
DROP TABLE IF EXISTS public.durable_state;
================================================
FILE: core/src/main/resources/schema/postgres/postgres-drop-schema.sql
================================================
DROP TABLE IF EXISTS public.event_tag;
DROP TABLE IF EXISTS public.event_journal;
DROP TABLE IF EXISTS public.snapshot;
DROP TABLE IF EXISTS public.durable_state;
================================================
FILE: core/src/main/resources/schema/postgres/postgres-event-tag-migration.sql
================================================
-- **************** first step ****************
-- add new column
ALTER TABLE public.event_tag
ADD persistence_id VARCHAR(255),
ADD sequence_number BIGINT;
-- **************** second step ****************
-- migrate rows
UPDATE public.event_tag
SET persistence_id = public.event_journal.persistence_id,
sequence_number = public.event_journal.sequence_number
FROM event_journal
WHERE public.event_tag.event_id = public.event_journal.ordering;
-- drop old FK constraint
ALTER TABLE public.event_tag
DROP CONSTRAINT "fk_event_journal";
-- drop old PK constraint
ALTER TABLE public.event_tag
DROP CONSTRAINT "event_tag_pkey";
-- create new PK constraint for PK column.
ALTER TABLE public.event_tag
ADD CONSTRAINT "pk_event_tag"
PRIMARY KEY (persistence_id, sequence_number, tag);
-- create new FK constraint for PK column.
ALTER TABLE public.event_tag
ADD CONSTRAINT "fk_event_journal_on_pk"
FOREIGN KEY (persistence_id, sequence_number)
REFERENCES public.event_journal (persistence_id, sequence_number)
ON DELETE CASCADE;
-- alter the event_id to nullable, so we can skip the InsertAndReturn.
ALTER TABLE public.event_tag
ALTER COLUMN event_id DROP NOT NULL;
================================================
FILE: core/src/main/resources/schema/sqlserver/sqlserver-create-schema-legacy.sql
================================================
IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'"journal"') AND type in (N'U'))
begin
CREATE TABLE journal (
"ordering" BIGINT IDENTITY(1,1) NOT NULL,
"deleted" BIT DEFAULT 0 NOT NULL,
"persistence_id" VARCHAR(255) NOT NULL,
"sequence_number" NUMERIC(10,0) NOT NULL,
"tags" VARCHAR(255) NULL DEFAULT NULL,
"message" VARBINARY(max) NOT NULL,
PRIMARY KEY ("persistence_id", "sequence_number")
)
CREATE UNIQUE INDEX journal_ordering_idx ON journal (ordering)
end;
IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'"snapshot"') AND type in (N'U'))
CREATE TABLE snapshot (
"persistence_id" VARCHAR(255) NOT NULL,
"sequence_number" NUMERIC(10,0) NOT NULL,
"created" NUMERIC NOT NULL,
"snapshot" VARBINARY(max) NOT NULL,
PRIMARY KEY ("persistence_id", "sequence_number")
);
end;
================================================
FILE: core/src/main/resources/schema/sqlserver/sqlserver-create-schema-varchar.sql
================================================
/*
Akka Persistence JDBC versions from 5.0.0 through 5.1.0 used this schema. The only difference from the
post-5.0.4 schema is the use of VARCHAR instead of NVARCHAR for string fields. It is strongly
recommended that new uses of Akka Persistence JDBC 5.0.0 and later use the NVARCHAR schema. This schema is
still usable with post-5.0.4 versions of Akka Persistence JDBC, though will not support Unicode persistence IDs,
manifests, or tags.
Additionally, if using this schema, it is highly recommended to not have the SQL Server JDBC client send
strings as Unicode, by appending ;sendStringParametersAsUnicode=false to the JDBC connection string.
*/
CREATE TABLE event_journal(
"ordering" BIGINT IDENTITY(1,1) NOT NULL,
"deleted" BIT DEFAULT 0 NOT NULL,
"persistence_id" VARCHAR(255) NOT NULL,
"sequence_number" NUMERIC(10,0) NOT NULL,
"writer" VARCHAR(255) NOT NULL,
"write_timestamp" BIGINT NOT NULL,
"adapter_manifest" VARCHAR(MAX) NOT NULL,
"event_payload" VARBINARY(MAX) NOT NULL,
"event_ser_id" INTEGER NOT NULL,
"event_ser_manifest" VARCHAR(MAX) NOT NULL,
"meta_payload" VARBINARY(MAX),
"meta_ser_id" INTEGER,
"meta_ser_manifest" VARCHAR(MAX)
PRIMARY KEY ("persistence_id", "sequence_number")
);
CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering);
CREATE TABLE event_tag (
"event_id" BIGINT NOT NULL,
"tag" VARCHAR(255) NOT NULL
PRIMARY KEY ("event_id","tag")
constraint "fk_event_journal"
foreign key("event_id")
references "dbo"."event_journal"("ordering")
on delete CASCADE
);
CREATE TABLE "snapshot" (
"persistence_id" VARCHAR(255) NOT NULL,
"sequence_number" NUMERIC(10,0) NOT NULL,
"created" BIGINT NOT NULL,
"snapshot_ser_id" INTEGER NOT NULL,
"snapshot_ser_manifest" VARCHAR(255) NOT NULL,
"snapshot_payload" VARBINARY(MAX) NOT NULL,
"meta_ser_id" INTEGER,
"meta_ser_manifest" VARCHAR(255),
"meta_payload" VARBINARY(MAX),
PRIMARY KEY ("persistence_id", "sequence_number")
)
================================================
FILE: core/src/main/resources/schema/sqlserver/sqlserver-create-schema.sql
================================================
CREATE TABLE event_journal (
"ordering" BIGINT IDENTITY(1,1) NOT NULL,
"deleted" BIT DEFAULT 0 NOT NULL,
"persistence_id" NVARCHAR(255) NOT NULL,
"sequence_number" NUMERIC(10,0) NOT NULL,
"writer" NVARCHAR(255) NOT NULL,
"write_timestamp" BIGINT NOT NULL,
"adapter_manifest" NVARCHAR(MAX) NOT NULL,
"event_payload" VARBINARY(MAX) NOT NULL,
"event_ser_id" INTEGER NOT NULL,
"event_ser_manifest" NVARCHAR(MAX) NOT NULL,
"meta_payload" VARBINARY(MAX),
"meta_ser_id" INTEGER,
"meta_ser_manifest" NVARCHAR(MAX)
PRIMARY KEY ("persistence_id", "sequence_number")
);
CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering);
CREATE TABLE event_tag (
"event_id" BIGINT,
"persistence_id" NVARCHAR(255),
"sequence_number" NUMERIC(10,0),
"tag" NVARCHAR(255) NOT NULL
PRIMARY KEY ("persistence_id", "sequence_number","tag"),
constraint "fk_event_journal"
foreign key("persistence_id", "sequence_number")
references "dbo"."event_journal"("persistence_id", "sequence_number")
on delete CASCADE
);
CREATE TABLE "snapshot" (
"persistence_id" NVARCHAR(255) NOT NULL,
"sequence_number" NUMERIC(10,0) NOT NULL,
"created" BIGINT NOT NULL,
"snapshot_ser_id" INTEGER NOT NULL,
"snapshot_ser_manifest" NVARCHAR(255) NOT NULL,
"snapshot_payload" VARBINARY(MAX) NOT NULL,
"meta_ser_id" INTEGER,
"meta_ser_manifest" NVARCHAR(255),
"meta_payload" VARBINARY(MAX),
PRIMARY KEY ("persistence_id", "sequence_number")
)
================================================
FILE: core/src/main/resources/schema/sqlserver/sqlserver-drop-schema-legacy.sql
================================================
DROP TABLE IF EXISTS journal;
DROP TABLE IF EXISTS snapshot;
================================================
FILE: core/src/main/resources/schema/sqlserver/sqlserver-drop-schema.sql
================================================
DROP TABLE IF EXISTS event_tag;
DROP TABLE IF EXISTS event_journal;
DROP TABLE IF EXISTS snapshot;
================================================
FILE: core/src/main/resources/schema/sqlserver/sqlserver-event-tag-migration.sql
================================================
-- **************** first step ****************
-- add new column
ALTER TABLE event_tag
ADD persistence_id VARCHAR(255),
ADD sequence_number BIGINT;
-- **************** second step ****************
-- migrate rows
UPDATE event_tag
SET persistence_id = event_journal.persistence_id,
sequence_number = event_journal.sequence_number
FROM event_journal
WHERE event_tag.event_id = event_journal.ordering;
-- drop old FK constraint
DECLARE @fkConstraintName NVARCHAR(MAX);
DECLARE @dropFKConstraintQuery NVARCHAR(MAX);
SELECT @fkConstraintName = CONSTRAINT_NAME
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS
WHERE TABLE_NAME = 'event_tag'
AND CONSTRAINT_TYPE = 'FOREIGN KEY';
IF @fkConstraintName IS NOT NULL
BEGIN
SET @dropFKConstraintQuery = 'ALTER TABLE event_tag DROP CONSTRAINT ' + QUOTENAME(@fkConstraintName);
EXEC sp_executesql @dropFKConstraintQuery;
END
-- drop old PK constraint
DECLARE @constraintName NVARCHAR(MAX);
DECLARE @dropConstraintQuery NVARCHAR(MAX);
SELECT @constraintName = CONSTRAINT_NAME
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS
WHERE TABLE_NAME = 'event_tag'
AND CONSTRAINT_TYPE = 'PRIMARY KEY';
IF @constraintName IS NOT NULL
BEGIN
SET @dropConstraintQuery = 'ALTER TABLE event_tag DROP CONSTRAINT ' + QUOTENAME(@constraintName);
EXEC sp_executesql @dropConstraintQuery;
END
-- create new PK constraint for PK column.
ALTER TABLE event_tag
ALTER COLUMN persistence_id NVARCHAR(255) NOT NULL
ALTER TABLE event_tag
ALTER COLUMN sequence_number NUMERIC(10, 0) NOT NULL
ALTER TABLE event_tag
ADD CONSTRAINT "pk_event_tag"
PRIMARY KEY (persistence_id, sequence_number, TAG)
-- create new FK constraint for PK column.
ALTER TABLE event_tag
ADD CONSTRAINT "fk_event_journal_on_pk"
FOREIGN KEY (persistence_id, sequence_number)
REFERENCES event_journal (persistence_id, sequence_number)
ON DELETE CASCADE
-- alter the event_id to nullable, so we can skip the InsertAndReturn.
ALTER TABLE event_tag
ALTER COLUMN event_id BIGINT NULL
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/AkkaSerialization.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc
import akka.annotation.InternalApi
import akka.persistence.PersistentRepr
import akka.persistence.jdbc.state.DurableStateTables
import akka.persistence.jdbc.journal.dao.JournalTables.JournalAkkaSerializationRow
import akka.serialization.{ Serialization, Serializers }
import scala.util.{ Success, Try }
/**
* INTERNAL API
*/
@InternalApi
object AkkaSerialization {
case class AkkaSerialized(serId: Int, serManifest: String, payload: Array[Byte])
def serialize(serialization: Serialization, payload: Any): Try[AkkaSerialized] = {
val p2 = payload.asInstanceOf[AnyRef]
val serializer = serialization.findSerializerFor(p2)
val serManifest = Serializers.manifestFor(serializer, p2)
val serialized = serialization.serialize(p2)
serialized.map(payload => AkkaSerialized(serializer.identifier, serManifest, payload))
}
def fromRow(serialization: Serialization)(row: JournalAkkaSerializationRow): Try[(PersistentRepr, Long)] = {
serialization.deserialize(row.eventPayload, row.eventSerId, row.eventSerManifest).flatMap { payload =>
val metadata = for {
mPayload <- row.metaPayload
mSerId <- row.metaSerId
} yield (mPayload, mSerId)
val repr = PersistentRepr(
payload,
row.sequenceNumber,
row.persistenceId,
row.adapterManifest,
row.deleted,
sender = null,
writerUuid = row.writer)
// This means that failure to deserialize the meta will fail the read, I think this is the correct to do
for {
withMeta <- metadata match {
case None => Success(repr)
case Some((payload, id)) =>
serialization.deserialize(payload, id, row.metaSerManifest.getOrElse("")).map { meta =>
repr.withMetadata(meta)
}
}
} yield (withMeta.withTimestamp(row.writeTimestamp), row.ordering)
}
}
def fromDurableStateRow(serialization: Serialization)(row: DurableStateTables.DurableStateRow): Try[AnyRef] = {
serialization.deserialize(row.statePayload, row.stateSerId, row.stateSerManifest.getOrElse(""))
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/JournalRow.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc
final case class JournalRow(
ordering: Long,
deleted: Boolean,
persistenceId: String,
sequenceNumber: Long,
message: Array[Byte],
tags: Option[String] = None)
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/cleanup/javadsl/EventSourcedCleanup.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.cleanup.javadsl
import java.util.concurrent.CompletionStage
import scala.jdk.FutureConverters._
import akka.Done
import akka.actor.ClassicActorSystemProvider
import akka.annotation.ApiMayChange
import akka.persistence.jdbc.cleanup.scaladsl
/**
* Java API: Tool for deleting events and/or snapshots for a `persistenceId` without using persistent actors.
*
* When running an operation with `EventSourcedCleanup` that deletes all events for a persistence id, the actor with
* that persistence id must not be running! If the actor is restarted it would in that case be recovered to the wrong
* state since the stored events have been deleted. Delete events before snapshot can still be used while the actor is
* running.
*
* If `resetSequenceNumber` is `true` then the creating entity with the same `persistenceId` will start from 0.
* Otherwise it will continue from the latest highest used sequence number.
*
* WARNING: reusing the same `persistenceId` after resetting the sequence number should be avoided, since it might be
* confusing to reuse the same sequence number for new events.
*/
@ApiMayChange
final class EventSourcedCleanup private (delegate: scaladsl.EventSourcedCleanup) {
def this(systemProvider: ClassicActorSystemProvider, journalConfigPath: String, snapshotConfigPath: String) =
this(new scaladsl.EventSourcedCleanup(systemProvider, journalConfigPath, snapshotConfigPath))
def this(systemProvider: ClassicActorSystemProvider) =
this(systemProvider, "jdbc-journal", "jdbc-snapshot-store")
/**
* Delete all events related to one single `persistenceId`. Snapshots are not deleted.
*/
def deleteAllEvents(persistenceId: String, resetSequenceNumber: Boolean): CompletionStage[Done] =
delegate.deleteAllEvents(persistenceId, resetSequenceNumber).asJava
/**
* Delete snapshots related to one single `persistenceId`. Events are not deleted.
*/
def deleteSnapshot(persistenceId: String): CompletionStage[Done] =
delegate.deleteSnapshot(persistenceId).asJava
/**
* Delete everything related to one single `persistenceId`. All events and snapshots are deleted.
*/
def deleteAll(persistenceId: String, resetSequenceNumber: Boolean): CompletionStage[Done] =
delegate.deleteAll(persistenceId, resetSequenceNumber).asJava
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/cleanup/scaladsl/EventSourcedCleanup.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.cleanup.scaladsl
import scala.concurrent.{ ExecutionContext, Future }
import akka.Done
import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
import akka.annotation.ApiMayChange
import akka.persistence.jdbc.config.{ JournalConfig, SnapshotConfig }
import akka.persistence.jdbc.db.SlickExtension
import akka.persistence.jdbc.journal.dao.JournalDaoInstantiation
import akka.persistence.jdbc.snapshot.dao.SnapshotDaoInstantiation
import akka.stream.{ Materializer, SystemMaterializer }
/**
* Scala API: Tool for deleting events and/or snapshots for a `persistenceId` without using persistent actors.
*
* When running an operation with `EventSourcedCleanup` that deletes all events for a persistence id, the actor with
* that persistence id must not be running! If the actor is restarted it would in that case be recovered to the wrong
* state since the stored events have been deleted. Delete events before snapshot can still be used while the actor is
* running.
*
* If `resetSequenceNumber` is `true` then the creating entity with the same `persistenceId` will start from 0.
* Otherwise it will continue from the latest highest used sequence number.
*
* WARNING: reusing the same `persistenceId` after resetting the sequence number should be avoided, since it might be
* confusing to reuse the same sequence number for new events.
*/
@ApiMayChange
final class EventSourcedCleanup(
systemProvider: ClassicActorSystemProvider,
journalConfigPath: String,
snapshotConfigPath: String) {
def this(systemProvider: ClassicActorSystemProvider) =
this(systemProvider, "jdbc-journal", "jdbc-snapshot-store")
private implicit val system: ActorSystem = systemProvider.classicSystem
private implicit val executionContext: ExecutionContext = system.dispatchers.defaultGlobalDispatcher
private implicit val mat: Materializer = SystemMaterializer(system).materializer
private val slick = SlickExtension(system)
private val journalConfig = system.settings.config.getConfig(journalConfigPath)
private val journalDao =
JournalDaoInstantiation.journalDao(new JournalConfig(journalConfig), slick.database(journalConfig))
private val snapshotConfig = system.settings.config.getConfig(snapshotConfigPath)
private val snapshotDao =
SnapshotDaoInstantiation.snapshotDao(new SnapshotConfig(snapshotConfig), slick.database(snapshotConfig))
/**
* Delete all events related to one single `persistenceId`. Snapshots are not deleted.
*/
def deleteAllEvents(persistenceId: String, resetSequenceNumber: Boolean): Future[Done] = {
journalDao.deleteEventsTo(persistenceId, toSequenceNr = Long.MaxValue, resetSequenceNumber).map(_ => Done)
}
/**
* Delete snapshots related to one single `persistenceId`. Events are not deleted.
*/
def deleteSnapshot(persistenceId: String): Future[Done] = {
snapshotDao.deleteUpToMaxSequenceNr(persistenceId, Long.MaxValue).map(_ => Done)
}
/**
* Delete everything related to one single `persistenceId`. All events and snapshots are deleted.
*/
def deleteAll(persistenceId: String, resetSequenceNumber: Boolean): Future[Done] = {
for {
_ <- deleteAllEvents(persistenceId, resetSequenceNumber)
_ <- deleteSnapshot(persistenceId)
} yield Done
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/config/AkkaPersistenceConfig.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.config
import akka.persistence.jdbc.util.ConfigOps._
import com.typesafe.config.Config
import scala.concurrent.duration._
object ConfigKeys {
val useSharedDb = "use-shared-db"
}
class SlickConfiguration(config: Config) {
val jndiName: Option[String] = config.asStringOption("jndiName")
val jndiDbName: Option[String] = config.asStringOption("jndiDbName")
override def toString: String = s"SlickConfiguration($jndiName,$jndiDbName)"
}
class LegacyJournalTableColumnNames(config: Config) {
private val cfg = config.getConfig("tables.legacy_journal.columnNames")
val ordering: String = cfg.getString("ordering")
val deleted: String = cfg.getString("deleted")
val persistenceId: String = cfg.getString("persistenceId")
val sequenceNumber: String = cfg.getString("sequenceNumber")
val created: String = cfg.getString("created")
val tags: String = cfg.getString("tags")
val message: String = cfg.getString("message")
override def toString: String = s"JournalTableColumnNames($persistenceId,$sequenceNumber,$created,$tags,$message)"
}
class EventJournalTableColumnNames(config: Config) {
private val cfg = config.getConfig("tables.event_journal.columnNames")
val ordering: String = cfg.getString("ordering")
val deleted: String = cfg.getString("deleted")
val persistenceId: String = cfg.getString("persistenceId")
val sequenceNumber: String = cfg.getString("sequenceNumber")
val writer: String = cfg.getString("writer")
val writeTimestamp: String = cfg.getString("writeTimestamp")
val adapterManifest: String = cfg.getString("adapterManifest")
val eventPayload: String = cfg.getString("eventPayload")
val eventSerId: String = cfg.getString("eventSerId")
val eventSerManifest: String = cfg.getString("eventSerManifest")
val metaPayload: String = cfg.getString("metaPayload")
val metaSerId: String = cfg.getString("metaSerId")
val metaSerManifest: String = cfg.getString("metaSerManifest")
}
class EventTagTableColumnNames(config: Config) {
private val cfg = config.getConfig("tables.event_tag.columnNames")
val eventId: String = cfg.getString("eventId") // for compatibility
val persistenceId: String = cfg.getString("persistenceId")
val sequenceNumber: String = cfg.getString("sequenceNumber")
val tag: String = cfg.getString("tag")
}
class LegacyJournalTableConfiguration(config: Config) {
private val cfg = config.getConfig("tables.legacy_journal")
val tableName: String = cfg.getString("tableName")
val schemaName: Option[String] = cfg.asStringOption("schemaName")
val columnNames: LegacyJournalTableColumnNames = new LegacyJournalTableColumnNames(config)
override def toString: String = s"LegacyJournalTableConfiguration($tableName,$schemaName,$columnNames)"
}
class EventJournalTableConfiguration(config: Config) {
private val cfg = config.getConfig("tables.event_journal")
val tableName: String = cfg.getString("tableName")
val schemaName: Option[String] = cfg.asStringOption("schemaName")
val columnNames: EventJournalTableColumnNames = new EventJournalTableColumnNames(config)
override def toString: String = s"EventJournalTableConfiguration($tableName,$schemaName,$columnNames)"
}
class EventTagTableConfiguration(config: Config) {
private val cfg = config.getConfig("tables.event_tag")
val legacyTagKey: Boolean = cfg.getBoolean("legacy-tag-key")
val tableName: String = cfg.getString("tableName")
val schemaName: Option[String] = cfg.asStringOption("schemaName")
val columnNames: EventTagTableColumnNames = new EventTagTableColumnNames(config)
}
class LegacySnapshotTableColumnNames(config: Config) {
private val cfg = config.getConfig("tables.legacy_snapshot.columnNames")
val persistenceId: String = cfg.getString("persistenceId")
val sequenceNumber: String = cfg.getString("sequenceNumber")
val created: String = cfg.getString("created")
val snapshot: String = cfg.getString("snapshot")
override def toString: String = s"SnapshotTableColumnNames($persistenceId,$sequenceNumber,$created,$snapshot)"
}
class SnapshotTableColumnNames(config: Config) {
private val cfg = config.getConfig("tables.snapshot.columnNames")
val persistenceId: String = cfg.getString("persistenceId")
val sequenceNumber: String = cfg.getString("sequenceNumber")
val created: String = cfg.getString("created")
val snapshotPayload: String = cfg.getString("snapshotPayload")
val snapshotSerId: String = cfg.getString("snapshotSerId")
val snapshotSerManifest: String = cfg.getString("snapshotSerManifest")
val metaPayload: String = cfg.getString("metaPayload")
val metaSerId: String = cfg.getString("metaSerId")
val metaSerManifest: String = cfg.getString("metaSerManifest")
}
class LegacySnapshotTableConfiguration(config: Config) {
private val cfg = config.getConfig("tables.legacy_snapshot")
val tableName: String = cfg.getString("tableName")
val schemaName: Option[String] = cfg.asStringOption("schemaName")
val columnNames: LegacySnapshotTableColumnNames = new LegacySnapshotTableColumnNames(config)
override def toString: String = s"LegacySnapshotTableConfiguration($tableName,$schemaName,$columnNames)"
}
class SnapshotTableConfiguration(config: Config) {
private val cfg = config.getConfig("tables.snapshot")
val tableName: String = cfg.getString("tableName")
val schemaName: Option[String] = cfg.asStringOption("schemaName")
val columnNames: SnapshotTableColumnNames = new SnapshotTableColumnNames(config)
override def toString: String = s"SnapshotTableConfiguration($tableName,$schemaName,$columnNames)"
}
class JournalPluginConfig(config: Config) {
val tagSeparator: String = config.getString("tagSeparator")
val dao: String = config.getString("dao")
override def toString: String = s"JournalPluginConfig($tagSeparator,$dao)"
}
class BaseDaoConfig(config: Config) {
val bufferSize: Int = config.getInt("bufferSize")
val batchSize: Int = config.getInt("batchSize")
val replayBatchSize: Int = config.getInt("replayBatchSize")
val parallelism: Int = config.getInt("parallelism")
override def toString: String = s"BaseDaoConfig($bufferSize,$batchSize,$parallelism)"
}
class ReadJournalPluginConfig(config: Config) {
val tagSeparator: String = config.getString("tagSeparator")
val dao: String = config.getString("dao")
override def toString: String = s"ReadJournalPluginConfig($tagSeparator,$dao)"
}
class SnapshotPluginConfig(config: Config) {
val dao: String = config.getString("dao")
override def toString: String = s"SnapshotPluginConfig($dao)"
}
// aggregations
class JournalConfig(config: Config) {
val journalTableConfiguration = new LegacyJournalTableConfiguration(config)
val eventJournalTableConfiguration = new EventJournalTableConfiguration(config)
val eventTagTableConfiguration = new EventTagTableConfiguration(config)
val pluginConfig = new JournalPluginConfig(config)
val daoConfig = new BaseDaoConfig(config)
val useSharedDb: Option[String] = config.asStringOption(ConfigKeys.useSharedDb)
override def toString: String = s"JournalConfig($journalTableConfiguration,$pluginConfig,$useSharedDb)"
}
class SnapshotConfig(config: Config) {
val legacySnapshotTableConfiguration = new LegacySnapshotTableConfiguration(config)
val snapshotTableConfiguration = new SnapshotTableConfiguration(config)
val pluginConfig = new SnapshotPluginConfig(config)
val useSharedDb: Option[String] = config.asStringOption(ConfigKeys.useSharedDb)
override def toString: String = s"SnapshotConfig($snapshotTableConfiguration,$pluginConfig,$useSharedDb)"
}
object JournalSequenceRetrievalConfig {
def apply(config: Config): JournalSequenceRetrievalConfig =
JournalSequenceRetrievalConfig(
batchSize = config.getInt("journal-sequence-retrieval.batch-size"),
maxTries = config.getInt("journal-sequence-retrieval.max-tries"),
queryDelay = config.asFiniteDuration("journal-sequence-retrieval.query-delay"),
maxBackoffQueryDelay = config.asFiniteDuration("journal-sequence-retrieval.max-backoff-query-delay"),
askTimeout = config.asFiniteDuration("journal-sequence-retrieval.ask-timeout"))
}
case class JournalSequenceRetrievalConfig(
batchSize: Int,
maxTries: Int,
queryDelay: FiniteDuration,
maxBackoffQueryDelay: FiniteDuration,
askTimeout: FiniteDuration)
class ReadJournalConfig(config: Config) {
val journalTableConfiguration = new LegacyJournalTableConfiguration(config)
val eventJournalTableConfiguration = new EventJournalTableConfiguration(config)
val eventTagTableConfiguration = new EventTagTableConfiguration(config)
val journalSequenceRetrievalConfiguration = JournalSequenceRetrievalConfig(config)
val pluginConfig = new ReadJournalPluginConfig(config)
val refreshInterval: FiniteDuration = config.asFiniteDuration("refresh-interval")
val maxBufferSize: Int = config.getInt("max-buffer-size")
val eventsByTagBufferSizesPerQuery: Long = config.getLong("events-by-tag-buffer-sizes-per-query")
require(eventsByTagBufferSizesPerQuery >= 0, "events-by-tag-buffer-sizes-per-query must not be negative")
val addShutdownHook: Boolean = config.getBoolean("add-shutdown-hook")
override def toString: String =
s"ReadJournalConfig($journalTableConfiguration,$pluginConfig,$refreshInterval,$maxBufferSize,$addShutdownHook)"
}
class DurableStateTableColumnNames(config: Config) {
private val cfg = config.getConfig("tables.durable_state.columnNames")
val globalOffset: String = cfg.getString("globalOffset")
val persistenceId: String = cfg.getString("persistenceId")
val revision: String = cfg.getString("revision")
val statePayload: String = cfg.getString("statePayload")
val stateSerId: String = cfg.getString("stateSerId")
val stateSerManifest: String = cfg.getString("stateSerManifest")
val tag: String = cfg.getString("tag")
val stateTimestamp: String = cfg.getString("stateTimestamp")
}
class DurableStateTableConfiguration(config: Config) {
private val cfg = config.getConfig("tables.durable_state")
val tableName: String = cfg.getString("tableName")
val refreshInterval: FiniteDuration = config.asFiniteDuration("refreshInterval")
val batchSize: Int = config.getInt("batchSize")
val schemaName: Option[String] = cfg.asStringOption("schemaName")
val columnNames: DurableStateTableColumnNames = new DurableStateTableColumnNames(config)
val stateSequenceConfig = DurableStateSequenceRetrievalConfig(config)
override def toString: String = s"DurableStateTableConfiguration($tableName,$schemaName,$columnNames)"
}
object DurableStateSequenceRetrievalConfig {
def apply(config: Config): DurableStateSequenceRetrievalConfig =
DurableStateSequenceRetrievalConfig(
batchSize = config.getInt("durable-state-sequence-retrieval.batch-size"),
maxTries = config.getInt("durable-state-sequence-retrieval.max-tries"),
queryDelay = config.asFiniteDuration("durable-state-sequence-retrieval.query-delay"),
maxBackoffQueryDelay = config.asFiniteDuration("durable-state-sequence-retrieval.max-backoff-query-delay"),
askTimeout = config.asFiniteDuration("durable-state-sequence-retrieval.ask-timeout"),
revisionCacheCapacity = config.getInt("durable-state-sequence-retrieval.revision-cache-capacity"))
}
case class DurableStateSequenceRetrievalConfig(
batchSize: Int,
maxTries: Int,
queryDelay: FiniteDuration,
maxBackoffQueryDelay: FiniteDuration,
askTimeout: FiniteDuration,
revisionCacheCapacity: Int)
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/db/SlickDatabase.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.db
import akka.actor.ActorSystem
import akka.annotation.InternalApi
import javax.naming.InitialContext
import akka.persistence.jdbc.config.SlickConfiguration
import com.typesafe.config.Config
import slick.basic.DatabaseConfig
import slick.jdbc.JdbcProfile
import slick.jdbc.JdbcBackend._
/**
* INTERNAL API
*/
@deprecated(message = "Internal API, will be removed in 4.0.0", since = "3.4.0")
object SlickDriver {
/**
* INTERNAL API
*/
@deprecated(message = "Internal API, will be removed in 4.0.0", since = "3.4.0")
def forDriverName(config: Config): JdbcProfile =
SlickDatabase.profile(config, "slick")
}
/**
* INTERNAL API
*/
object SlickDatabase {
/**
* INTERNAL API
*/
@deprecated(message = "Internal API, will be removed in 4.0.0", since = "3.4.0")
def forConfig(config: Config, slickConfiguration: SlickConfiguration): Database = {
database(config, slickConfiguration, "slick.db")
}
/**
* INTERNAL API
*/
private[jdbc] def profile(config: Config, path: String): JdbcProfile =
DatabaseConfig.forConfig[JdbcProfile](path, config).profile
/**
* INTERNAL API
*/
private[jdbc] def database(config: Config, slickConfiguration: SlickConfiguration, path: String): Database = {
slickConfiguration.jndiName
.map(Database.forName(_, None))
.orElse {
slickConfiguration.jndiDbName.map(new InitialContext().lookup(_).asInstanceOf[Database])
}
.getOrElse(Database.forConfig(path, config))
}
/**
* INTERNAL API
*/
private[jdbc] def initializeEagerly(
config: Config,
slickConfiguration: SlickConfiguration,
path: String): SlickDatabase = {
val dbPath = if (path.isEmpty) "db" else s"$path.db"
EagerSlickDatabase(database(config, slickConfiguration, dbPath), profile(config, path))
}
}
trait SlickDatabase {
def database: Database
def profile: JdbcProfile
/**
* If true, the requesting side usually a (read/write/snapshot journal)
* should shutdown the database when it closes. If false, it should leave
* the database connection pool open, since it might still be used elsewhere.
*/
def allowShutdown: Boolean
}
@InternalApi
case class EagerSlickDatabase(database: Database, profile: JdbcProfile) extends SlickDatabase {
override def allowShutdown: Boolean = true
}
/**
* A LazySlickDatabase lazily initializes a database, it also manages the shutdown of the database
* @param config The configuration used to create the database
*/
@InternalApi
class LazySlickDatabase(config: Config, system: ActorSystem) extends SlickDatabase {
val profile: JdbcProfile = SlickDatabase.profile(config, path = "")
lazy val database: Database = {
val db = SlickDatabase.database(config, new SlickConfiguration(config), path = "db")
system.registerOnTermination {
db.close()
}
db
}
/** This database shutdown is managed by the db holder, so users of this db do not need to bother shutting it down */
override def allowShutdown: Boolean = false
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/db/SlickExtension.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.db
import akka.actor.{ ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider }
import akka.persistence.jdbc.config.{ ConfigKeys, SlickConfiguration }
import akka.persistence.jdbc.util.ConfigOps._
import com.typesafe.config.{ Config, ConfigObject }
import scala.jdk.CollectionConverters._
import scala.util.{ Failure, Success }
object SlickExtension extends ExtensionId[SlickExtensionImpl] with ExtensionIdProvider {
override def lookup: SlickExtension.type = SlickExtension
override def createExtension(system: ExtendedActorSystem) = new SlickExtensionImpl(system)
}
class SlickExtensionImpl(system: ExtendedActorSystem) extends Extension {
private val dbProvider: SlickDatabaseProvider = {
val fqcn = system.settings.config.getString("akka-persistence-jdbc.database-provider-fqcn")
val args = List(classOf[ActorSystem] -> system)
system.dynamicAccess.createInstanceFor[SlickDatabaseProvider](fqcn, args) match {
case Success(result) => result
case Failure(t) => throw new RuntimeException("Failed to create SlickDatabaseProvider", t)
}
}
def database(config: Config): SlickDatabase = dbProvider.database(config)
}
/**
* User overridable database provider.
* Since this provider is called from an akka extension it must be thread safe!
*
* A SlickDatabaseProvider is loaded using reflection,
* The instance is created using the following:
* - The fully qualified class name as configured in `jdbc-journal.database-provider-fqcn`.
* - The constructor with one argument of type [[akka.actor.ActorSystem]] is used to create the instance.
* Therefore the class must have such a constructor.
*/
trait SlickDatabaseProvider {
/**
* Create or retrieve the database
* @param config The configuration which may be used to create the database. If the database is shared
* then the SlickDatabaseProvider implementation may choose to ignore this parameter.
*/
def database(config: Config): SlickDatabase
}
class DefaultSlickDatabaseProvider(system: ActorSystem) extends SlickDatabaseProvider {
val sharedDatabases: Map[String, LazySlickDatabase] = system.settings.config
.getObject("akka-persistence-jdbc.shared-databases")
.asScala
.flatMap {
case (key, confObj: ConfigObject) =>
val conf = confObj.toConfig
if (conf.hasPath("profile")) {
// Only create the LazySlickDatabase if a profile has actually been configured, this ensures that the example in the reference conf is ignored
List(key -> new LazySlickDatabase(conf, system))
} else Nil
case (key, notAnObject) =>
throw new RuntimeException(
s"""Expected "akka-persistence-jdbc.shared-databases.$key" to be a config ConfigObject, but got ${notAnObject
.valueType()} (${notAnObject.getClass})""")
}
.toMap
private def getSharedDbOrThrow(sharedDbName: String): LazySlickDatabase =
sharedDatabases.getOrElse(
sharedDbName,
throw new RuntimeException(
s"No shared database is configured under akka-persistence-jdbc.shared-databases.$sharedDbName"))
def database(config: Config): SlickDatabase = {
config.asStringOption(ConfigKeys.useSharedDb) match {
case None => SlickDatabase.initializeEagerly(config, new SlickConfiguration(config.getConfig("slick")), "slick")
case Some(sharedDbName) =>
getSharedDbOrThrow(sharedDbName)
}
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/JdbcAsyncWriteJournal.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal
import java.util.{ HashMap => JHMap, Map => JMap }
import akka.Done
import akka.actor.ActorSystem
import akka.persistence.jdbc.config.JournalConfig
import akka.persistence.jdbc.journal.JdbcAsyncWriteJournal.{ InPlaceUpdateEvent, WriteFinished }
import akka.persistence.jdbc.journal.dao.{ JournalDao, JournalDaoInstantiation, JournalDaoWithUpdates }
import akka.persistence.jdbc.db.{ SlickDatabase, SlickExtension }
import akka.persistence.journal.AsyncWriteJournal
import akka.persistence.{ AtomicWrite, PersistentRepr }
import akka.stream.{ Materializer, SystemMaterializer }
import com.typesafe.config.Config
import slick.jdbc.JdbcBackend._
import scala.collection.immutable._
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.{ Failure, Success, Try }
import akka.pattern.pipe
import akka.persistence.jdbc.util.PluginVersionChecker
object JdbcAsyncWriteJournal {
private case class WriteFinished(pid: String, f: Future[_])
/**
* Extra Plugin API: May be used to issue in-place updates for events.
* To be used only for data migrations such as "encrypt all events" and similar operations.
*
* The write payload may be wrapped in a [[akka.persistence.journal.Tagged]],
* in which case the new tags will overwrite the existing tags of the event.
*/
final case class InPlaceUpdateEvent(persistenceId: String, seqNr: Long, write: AnyRef)
}
class JdbcAsyncWriteJournal(config: Config) extends AsyncWriteJournal {
implicit val ec: ExecutionContext = context.dispatcher
implicit val system: ActorSystem = context.system
implicit val mat: Materializer = SystemMaterializer(system).materializer
val journalConfig = new JournalConfig(config)
PluginVersionChecker.check()
val slickDb: SlickDatabase = SlickExtension(system).database(config)
def db: Database = slickDb.database
val journalDao: JournalDao = JournalDaoInstantiation.journalDao(journalConfig, slickDb)
// only accessed if we need to perform Updates -- which is very rarely
def journalDaoWithUpdates: JournalDaoWithUpdates =
journalDao match {
case upgraded: JournalDaoWithUpdates => upgraded
case _ =>
throw new IllegalStateException(s"The ${journalDao.getClass} does NOT implement [JournalDaoWithUpdates], " +
s"which is required to perform updates of events! Please configure a valid update capable DAO (e.g. the default [ByteArrayJournalDao].")
}
// readHighestSequence must be performed after pending write for a persistenceId
// when the persistent actor is restarted.
private val writeInProgress: JMap[String, Future[_]] = new JHMap
override def asyncWriteMessages(messages: Seq[AtomicWrite]): Future[Seq[Try[Unit]]] = {
// add timestamp to all payloads in all AtomicWrite messages
val now = System.currentTimeMillis()
val timedMessages =
messages.map { atomWrt =>
atomWrt.copy(payload = atomWrt.payload.map(pr => pr.withTimestamp(now)))
}
val future = journalDao.asyncWriteMessages(timedMessages)
val persistenceId = timedMessages.head.persistenceId
writeInProgress.put(persistenceId, future)
future.onComplete(_ => self ! WriteFinished(persistenceId, future))
future
}
override def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] =
journalDao.delete(persistenceId, toSequenceNr)
override def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = {
def fetchHighestSeqNr() = journalDao.highestSequenceNr(persistenceId, fromSequenceNr)
writeInProgress.get(persistenceId) match {
case null => fetchHighestSeqNr()
case f: Future[Any @unchecked] =>
// we must fetch the highest sequence number after the previous write has completed
// If the previous write failed then we can ignore this
f.recover { case _ => () }.flatMap(_ => fetchHighestSeqNr())
}
}
private def asyncUpdateEvent(persistenceId: String, sequenceNr: Long, message: AnyRef): Future[Done] = {
journalDaoWithUpdates.update(persistenceId, sequenceNr, message)
}
override def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(
recoveryCallback: (PersistentRepr) => Unit): Future[Unit] =
journalDao
.messagesWithBatch(persistenceId, fromSequenceNr, toSequenceNr, journalConfig.daoConfig.replayBatchSize, None)
.take(max)
.runForeach {
case Success((repr, _)) =>
recoveryCallback(repr)
case Failure(ex) => throw ex
}
.map(_ => ())
override def postStop(): Unit = {
if (slickDb.allowShutdown) {
// Since a (new) db is created when this actor (re)starts, we must close it when the actor stops
db.close()
}
super.postStop()
}
override def receivePluginInternal: Receive = {
case WriteFinished(persistenceId, future) =>
writeInProgress.remove(persistenceId, future)
case InPlaceUpdateEvent(pid, seq, write) =>
asyncUpdateEvent(pid, seq, write).pipeTo(sender())
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/BaseDao.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
import akka.persistence.jdbc.config.BaseDaoConfig
import akka.stream.scaladsl.{ Keep, Sink, Source, SourceQueueWithComplete }
import akka.stream.{ Materializer, OverflowStrategy, QueueOfferResult }
import scala.collection.immutable.{ Seq, Vector }
import scala.concurrent.{ ExecutionContext, Future, Promise }
// Shared with the legacy DAO
abstract class BaseDao[T] {
implicit val mat: Materializer
implicit val ec: ExecutionContext
def baseDaoConfig: BaseDaoConfig
val writeQueue: SourceQueueWithComplete[(Promise[Unit], Seq[T])] = Source
.queue[(Promise[Unit], Seq[T])](baseDaoConfig.bufferSize, OverflowStrategy.dropNew)
.batchWeighted[(Seq[Promise[Unit]], Seq[T])](baseDaoConfig.batchSize, _._2.size, tup => Vector(tup._1) -> tup._2) {
case ((promises, rows), (newPromise, newRows)) => (promises :+ newPromise) -> (rows ++ newRows)
}
.mapAsync(baseDaoConfig.parallelism) { case (promises, rows) =>
writeJournalRows(rows).map(unit => promises.foreach(_.success(unit))).recover { case t =>
promises.foreach(_.failure(t))
}
}
.toMat(Sink.ignore)(Keep.left)
.run()
def writeJournalRows(xs: Seq[T]): Future[Unit]
def queueWriteJournalRows(xs: Seq[T]): Future[Unit] = {
val promise = Promise[Unit]()
writeQueue.offer(promise -> xs).flatMap {
case QueueOfferResult.Enqueued =>
promise.future
case QueueOfferResult.Failure(t) =>
Future.failed(new Exception("Failed to write journal row batch", t))
case QueueOfferResult.Dropped =>
Future.failed(new Exception(
s"Failed to enqueue journal row batch write, the queue buffer was full (${baseDaoConfig.bufferSize} elements) please check the jdbc-journal.bufferSize setting"))
case QueueOfferResult.QueueClosed =>
Future.failed(new Exception("Failed to enqueue journal row batch write, the queue was closed"))
}
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/BaseJournalDaoWithReadMessages.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
import akka.NotUsed
import akka.actor.Scheduler
import akka.persistence.PersistentRepr
import akka.persistence.jdbc.journal.dao.FlowControl.{ Continue, ContinueDelayed, Stop }
import akka.stream.Materializer
import akka.stream.scaladsl.{ Sink, Source }
import scala.concurrent.{ ExecutionContext, Future }
import scala.concurrent.duration.FiniteDuration
import scala.util.{ Failure, Success, Try }
trait BaseJournalDaoWithReadMessages extends JournalDaoWithReadMessages {
implicit val ec: ExecutionContext
implicit val mat: Materializer
override def messagesWithBatch(
persistenceId: String,
fromSequenceNr: Long,
toSequenceNr: Long,
batchSize: Int,
refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[Try[(PersistentRepr, Long)], NotUsed] = {
Source
.unfoldAsync[(Long, FlowControl), Seq[Try[(PersistentRepr, Long)]]]((Math.max(1, fromSequenceNr), Continue)) {
case (from, control) =>
def retrieveNextBatch(): Future[Option[((Long, FlowControl), Seq[Try[(PersistentRepr, Long)]])]] = {
for {
xs <- messages(persistenceId, from, toSequenceNr, batchSize).runWith(Sink.seq)
} yield {
val hasMoreEvents = xs.size == batchSize
// Events are ordered by sequence number, therefore the last one is the largest)
val lastSeqNrInBatch: Option[Long] = xs.lastOption match {
case Some(Success((repr, _))) => Some(repr.sequenceNr)
case Some(Failure(e)) => throw e // fail the returned Future
case None => None
}
val hasLastEvent = lastSeqNrInBatch.exists(_ >= toSequenceNr)
val nextControl: FlowControl =
if (hasLastEvent || from > toSequenceNr) Stop
else if (hasMoreEvents) Continue
else if (refreshInterval.isEmpty) Stop
else ContinueDelayed
val nextFrom: Long = lastSeqNrInBatch match {
// Continue querying from the last sequence number (the events are ordered)
case Some(lastSeqNr) => lastSeqNr + 1
case None => from
}
Some(((nextFrom, nextControl), xs))
}
}
control match {
case Stop => Future.successful(None)
case Continue => retrieveNextBatch()
case ContinueDelayed =>
val (delay, scheduler) = refreshInterval.get
akka.pattern.after(delay, scheduler)(retrieveNextBatch())
}
}
.mapConcat(identity)
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/DefaultJournalDao.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
import scala.collection.immutable
import scala.collection.immutable.Nil
import scala.collection.immutable.Seq
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.util.Try
import akka.NotUsed
import akka.persistence.jdbc.AkkaSerialization
import akka.persistence.jdbc.config.BaseDaoConfig
import akka.persistence.jdbc.config.JournalConfig
import akka.persistence.jdbc.journal.dao.JournalTables.JournalAkkaSerializationRow
import akka.persistence.journal.Tagged
import akka.persistence.AtomicWrite
import akka.persistence.PersistentRepr
import akka.serialization.Serialization
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import slick.jdbc.JdbcBackend.Database
import slick.jdbc.JdbcProfile
/**
* A [[JournalDao]] that uses Akka serialization to serialize the payload and store
* the manifest and serializer id used.
*/
class DefaultJournalDao(
val db: Database,
val profile: JdbcProfile,
val journalConfig: JournalConfig,
serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer)
extends BaseDao[(JournalAkkaSerializationRow, Set[String])]
with BaseJournalDaoWithReadMessages
with JournalDao
with H2Compat {
import profile.api._
override def baseDaoConfig: BaseDaoConfig = journalConfig.daoConfig
override def writeJournalRows(xs: immutable.Seq[(JournalAkkaSerializationRow, Set[String])]): Future[Unit] = {
db.run(queries.writeJournalRows(xs).transactionally).map(_ => ())(ExecutionContext.parasitic)
}
val queries =
new JournalQueries(profile, journalConfig.eventJournalTableConfiguration, journalConfig.eventTagTableConfiguration)
override def deleteEventsTo(persistenceId: String, toSequenceNr: Long, resetSequenceNumber: Boolean): Future[Unit] = {
// note: the passed toSequenceNr will be Long.MaxValue when doing a 'full' journal clean-up
// see JournalSpec's test: 'not reset highestSequenceNr after journal cleanup'
val actions: DBIOAction[Unit, NoStream, Effect.Write with Effect.Read] = {
// If we're resetting the sequence number, no need to determine the highest sequence number.
if (resetSequenceNumber) {
queries.delete(persistenceId, toSequenceNr).map(_ => ())
} else {
highestSequenceNrAction(persistenceId)
.flatMap {
// are we trying to delete the highest or even higher seqNr ?
case highestSeqNr if highestSeqNr <= toSequenceNr =>
// if so, we delete up to the before last and
// mark the last as logically deleted preserving highestSeqNr
queries
.delete(persistenceId, highestSeqNr - 1)
.flatMap(_ => queries.markAsDeleted(persistenceId, highestSeqNr))
case _ =>
// if not, we delete up to the requested seqNr
queries.delete(persistenceId, toSequenceNr)
}
.map(_ => ())
}
}
db.run(actions.transactionally)
}
override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] =
db.run(highestSequenceNrAction(persistenceId))
private def highestSequenceNrAction(persistenceId: String): DBIOAction[Long, NoStream, Effect.Read] =
queries.highestSequenceNrForPersistenceId(persistenceId).result.map(_.getOrElse(0))
private def highestMarkedSequenceNr(persistenceId: String) =
queries.highestMarkedSequenceNrForPersistenceId(persistenceId).result
override def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = {
def serializeAtomicWrite(aw: AtomicWrite): Try[Seq[(JournalAkkaSerializationRow, Set[String])]] = {
Try(aw.payload.map(serialize))
}
def serialize(pr: PersistentRepr): (JournalAkkaSerializationRow, Set[String]) = {
val (updatedPr, tags) = pr.payload match {
case Tagged(payload, tags) => (pr.withPayload(payload), tags)
case _ => (pr, Set.empty[String])
}
val serializedPayload = AkkaSerialization.serialize(serialization, updatedPr.payload).get
val serializedMetadata = updatedPr.metadata.flatMap(m => AkkaSerialization.serialize(serialization, m).toOption)
val row = JournalAkkaSerializationRow(
Long.MinValue,
updatedPr.deleted,
updatedPr.persistenceId,
updatedPr.sequenceNr,
updatedPr.writerUuid,
updatedPr.timestamp,
updatedPr.manifest,
serializedPayload.payload,
serializedPayload.serId,
serializedPayload.serManifest,
serializedMetadata.map(_.payload),
serializedMetadata.map(_.serId),
serializedMetadata.map(_.serManifest))
(row, tags)
}
val serializedTries = messages.map(serializeAtomicWrite)
val rowsToWrite: Seq[(JournalAkkaSerializationRow, Set[String])] = for {
serializeTry <- serializedTries
row <- serializeTry.getOrElse(Seq.empty)
} yield row
def resultWhenWriteComplete =
if (serializedTries.forall(_.isSuccess)) Nil else serializedTries.map(_.map(_ => ()))
queueWriteJournalRows(rowsToWrite).map(_ => resultWhenWriteComplete)
}
override def messages(
persistenceId: String,
fromSequenceNr: Long,
toSequenceNr: Long,
max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = {
Source
.fromPublisher(
db.stream(
queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result))
.map(AkkaSerialization.fromRow(serialization)(_))
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/FlowControl.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
private[jdbc] sealed trait FlowControl
private[jdbc] object FlowControl {
/** Keep querying - used when we are sure that there is more events to fetch */
case object Continue extends FlowControl
/**
* Keep querying with delay - used when we have consumed all events,
* but want to poll for future events
*/
case object ContinueDelayed extends FlowControl
/** Stop querying - used when we reach the desired offset */
case object Stop extends FlowControl
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/H2Compat.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
import slick.jdbc.JdbcProfile
trait H2Compat {
val profile: JdbcProfile
private lazy val isH2Driver = profile match {
case slick.jdbc.H2Profile => true
case _ => false
}
def correctMaxForH2Driver(max: Long): Long = {
if (isH2Driver) {
Math.min(max, Int.MaxValue) // H2 only accepts a LIMIT clause as an Integer
} else {
max
}
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDao.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
import akka.persistence.AtomicWrite
import java.time.Instant
import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.util.Try
trait JournalDao extends JournalDaoWithReadMessages {
/**
* Deletes all persistent messages up to toSequenceNr (inclusive) for the persistenceId
*/
def delete(persistenceId: String, toSequenceNr: Long): Future[Unit] =
deleteEventsTo(persistenceId, toSequenceNr, false)
/**
* Deletes all persistent events up to toSequenceNr (inclusive) for the persistenceId
*/
def deleteEventsTo(persistenceId: String, toSequenceNr: Long, resetSequenceNumber: Boolean): Future[Unit]
/**
* Returns the highest sequence number for the events that are stored for that `persistenceId`. When no events are
* found for the `persistenceId`, 0L will be the highest sequence number
*/
def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long]
/**
* @see [[akka.persistence.journal.AsyncWriteJournal.asyncWriteMessages(messages)]]
*/
def asyncWriteMessages(messages: Seq[AtomicWrite]): Future[Seq[Try[Unit]]]
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoInstantiation.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
import akka.actor.{ ActorSystem, ExtendedActorSystem }
import akka.annotation.InternalApi
import akka.persistence.jdbc.config.JournalConfig
import akka.persistence.jdbc.db.SlickDatabase
import akka.serialization.{ Serialization, SerializationExtension }
import akka.stream.Materializer
import slick.jdbc.JdbcBackend.Database
import slick.jdbc.JdbcProfile
import scala.concurrent.ExecutionContext
import scala.util.{ Failure, Success }
@InternalApi
private[jdbc] object JournalDaoInstantiation {
def journalDao(
journalConfig: JournalConfig,
slickDb: SlickDatabase)(implicit system: ActorSystem, ec: ExecutionContext, mat: Materializer): JournalDao = {
val fqcn = journalConfig.pluginConfig.dao
val profile: JdbcProfile = slickDb.profile
val args = Seq(
(classOf[Database], slickDb.database),
(classOf[JdbcProfile], profile),
(classOf[JournalConfig], journalConfig),
(classOf[Serialization], SerializationExtension(system)),
(classOf[ExecutionContext], ec),
(classOf[Materializer], mat))
system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[JournalDao](fqcn, args) match {
case Success(dao) => dao
case Failure(cause) => throw cause
}
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoWithReadMessages.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
import scala.concurrent.duration.FiniteDuration
import scala.util.Try
import akka.NotUsed
import akka.actor.Scheduler
import akka.persistence.PersistentRepr
import akka.stream.scaladsl.Source
trait JournalDaoWithReadMessages {
/**
* Returns a Source of PersistentRepr and ordering number for a certain persistenceId.
* It includes the events with sequenceNr between `fromSequenceNr` (inclusive) and
* `toSequenceNr` (inclusive).
*/
def messages(
persistenceId: String,
fromSequenceNr: Long,
toSequenceNr: Long,
max: Long): Source[Try[(PersistentRepr, Long)], NotUsed]
/**
* Returns a Source of PersistentRepr and ordering number for a certain persistenceId.
* It includes the events with sequenceNr between `fromSequenceNr` (inclusive) and
* `toSequenceNr` (inclusive).
*/
def messagesWithBatch(
persistenceId: String,
fromSequenceNr: Long,
toSequenceNr: Long,
batchSize: Int,
refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[Try[(PersistentRepr, Long)], NotUsed]
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoWithUpdates.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
import akka.Done
import scala.concurrent.Future
/**
* A [[JournalDao]] with extended capabilities, such as updating payloads and tags of existing events.
* These operations should be used sparingly, for example for migrating data from un-encrypted to encrypted formats
*/
trait JournalDaoWithUpdates extends JournalDao {
/**
* Update (!) an existing event with the passed in data.
*/
def update(persistenceId: String, sequenceNr: Long, payload: AnyRef): Future[Done]
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalQueries.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
import akka.persistence.jdbc.config.{ EventJournalTableConfiguration, EventTagTableConfiguration }
import akka.persistence.jdbc.journal.dao.JournalTables.{ JournalAkkaSerializationRow, TagRow }
import slick.jdbc.JdbcProfile
import scala.concurrent.ExecutionContext
class JournalQueries(
val profile: JdbcProfile,
override val journalTableCfg: EventJournalTableConfiguration,
override val tagTableCfg: EventTagTableConfiguration)
extends JournalTables {
import profile.api._
private val JournalTableC = Compiled(JournalTable)
private val insertAndReturn = JournalTable.returning(JournalTable.map(_.ordering))
private val TagTableC = Compiled(TagTable)
def writeJournalRows(xs: Seq[(JournalAkkaSerializationRow, Set[String])])(
implicit ec: ExecutionContext): DBIOAction[Any, NoStream, Effect.Write] = {
val sorted = xs.sortBy(event => event._1.sequenceNumber)
if (sorted.exists(_._2.nonEmpty)) {
// only if there are any tags
writeEventsAndTags(sorted)
} else {
// optimization avoid some work when not using tags
val events = sorted.map(_._1)
JournalTableC ++= events
}
}
private def writeEventsAndTags(sorted: Seq[(JournalAkkaSerializationRow, Set[String])])(
implicit ec: ExecutionContext): DBIOAction[Any, NoStream, Effect.Write] = {
val (events, _) = sorted.unzip
if (tagTableCfg.legacyTagKey) {
for {
ids <- insertAndReturn ++= events
tagInserts = ids.zip(sorted).flatMap { case (id, (e, tags)) =>
tags.map(tag => TagRow(Some(id), Some(e.persistenceId), Some(e.sequenceNumber), tag))
}
_ <- TagTableC ++= tagInserts
} yield ()
} else {
val tagInserts = sorted.map { case (e, tags) =>
tags.map(t => TagRow(None, Some(e.persistenceId), Some(e.sequenceNumber), t))
}
// optimization using batch insert
for {
_ <- JournalTableC ++= events
_ <- TagTableC ++= tagInserts.flatten
} yield ()
}
}
private def selectAllJournalForPersistenceIdDesc(persistenceId: Rep[String]) =
selectAllJournalForPersistenceId(persistenceId).sortBy(_.sequenceNumber.desc)
private def selectAllJournalForPersistenceId(persistenceId: Rep[String]) =
JournalTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc)
def delete(persistenceId: String, toSequenceNr: Long) = {
JournalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber <= toSequenceNr).delete
}
private[akka] def markAsDeleted(persistenceId: String, seqNr: Long) =
JournalTable
.filter(_.persistenceId === persistenceId)
.filter(_.sequenceNumber === seqNr)
.filter(_.deleted === false)
.map(_.deleted)
.update(true)
@deprecated(message = "Intended to be internal API", since = "5.4.2")
def markJournalMessagesAsDeleted(persistenceId: String, maxSequenceNr: Long) =
JournalTable
.filter(_.persistenceId === persistenceId)
.filter(_.sequenceNumber <= maxSequenceNr)
.filter(_.deleted === false)
.map(_.deleted)
.update(true)
private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] =
selectAllJournalForPersistenceId(persistenceId).take(1).map(_.sequenceNumber).max
private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] =
selectAllJournalForPersistenceId(persistenceId).filter(_.deleted === true).take(1).map(_.sequenceNumber).max
val highestSequenceNrForPersistenceId = Compiled(_highestSequenceNrForPersistenceId _)
val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _)
private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) =
selectAllJournalForPersistenceIdDesc(persistenceId).filter(_.sequenceNumber <= maxSequenceNr)
val selectByPersistenceIdAndMaxSequenceNumber = Compiled(_selectByPersistenceIdAndMaxSequenceNumber _)
private def _allPersistenceIdsDistinct: Query[Rep[String], String, Seq] =
JournalTable.map(_.persistenceId).distinct
val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct)
def journalRowByPersistenceIds(persistenceIds: Iterable[String]): Query[Rep[String], String, Seq] =
for {
query <- JournalTable.map(_.persistenceId)
if query.inSetBind(persistenceIds)
} yield query
private def _messagesQuery(
persistenceId: Rep[String],
fromSequenceNr: Rep[Long],
toSequenceNr: Rep[Long],
max: ConstColumn[Long]) =
JournalTable
.filter(_.persistenceId === persistenceId)
.filter(_.deleted === false)
.filter(_.sequenceNumber >= fromSequenceNr)
.filter(_.sequenceNumber <= toSequenceNr)
.sortBy(_.sequenceNumber.asc)
.take(max)
val messagesQuery = Compiled(_messagesQuery _)
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalTables.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
import akka.annotation.InternalApi
import akka.persistence.jdbc.config.{ EventJournalTableConfiguration, EventTagTableConfiguration }
import akka.persistence.jdbc.journal.dao.JournalTables.{ JournalAkkaSerializationRow, TagRow }
/**
* INTERNAL API
*/
@InternalApi
object JournalTables {
case class JournalAkkaSerializationRow(
ordering: Long,
deleted: Boolean,
persistenceId: String,
sequenceNumber: Long,
writer: String,
writeTimestamp: Long,
adapterManifest: String,
eventPayload: Array[Byte],
eventSerId: Int,
eventSerManifest: String,
metaPayload: Option[Array[Byte]],
metaSerId: Option[Int],
metaSerManifest: Option[String])
case class TagRow(eventId: Option[Long], persistenceId: Option[String], sequenceNumber: Option[Long], tag: String)
}
/**
* For the schema added in 5.0.0
* INTERNAL API
*/
@InternalApi
trait JournalTables {
val profile: slick.jdbc.JdbcProfile
import profile.api._
def journalTableCfg: EventJournalTableConfiguration
def tagTableCfg: EventTagTableConfiguration
class JournalEvents(_tableTag: Tag)
extends Table[JournalAkkaSerializationRow](
_tableTag,
_schemaName = journalTableCfg.schemaName,
_tableName = journalTableCfg.tableName) {
def * =
(
ordering,
deleted,
persistenceId,
sequenceNumber,
writer,
timestamp,
adapterManifest,
eventPayload,
eventSerId,
eventSerManifest,
metaPayload,
metaSerId,
metaSerManifest).<>((JournalAkkaSerializationRow.apply _).tupled, JournalAkkaSerializationRow.unapply)
val ordering: Rep[Long] = column[Long](journalTableCfg.columnNames.ordering, O.AutoInc)
val persistenceId: Rep[String] =
column[String](journalTableCfg.columnNames.persistenceId, O.Length(255, varying = true))
val sequenceNumber: Rep[Long] = column[Long](journalTableCfg.columnNames.sequenceNumber)
val deleted: Rep[Boolean] = column[Boolean](journalTableCfg.columnNames.deleted, O.Default(false))
val writer: Rep[String] = column[String](journalTableCfg.columnNames.writer)
val adapterManifest: Rep[String] = column[String](journalTableCfg.columnNames.adapterManifest)
val timestamp: Rep[Long] = column[Long](journalTableCfg.columnNames.writeTimestamp)
val eventPayload: Rep[Array[Byte]] = column[Array[Byte]](journalTableCfg.columnNames.eventPayload)
val eventSerId: Rep[Int] = column[Int](journalTableCfg.columnNames.eventSerId)
val eventSerManifest: Rep[String] = column[String](journalTableCfg.columnNames.eventSerManifest)
val metaPayload: Rep[Option[Array[Byte]]] = column[Option[Array[Byte]]](journalTableCfg.columnNames.metaPayload)
val metaSerId: Rep[Option[Int]] = column[Option[Int]](journalTableCfg.columnNames.metaSerId)
val metaSerManifest: Rep[Option[String]] = column[Option[String]](journalTableCfg.columnNames.metaSerManifest)
val pk = primaryKey(s"${tableName}_pk", (persistenceId, sequenceNumber))
val orderingIdx = index(s"${tableName}_ordering_idx", ordering, unique = true)
}
lazy val JournalTable = new TableQuery(tag => new JournalEvents(tag))
class EventTags(_tableTag: Tag) extends Table[TagRow](_tableTag, tagTableCfg.schemaName, tagTableCfg.tableName) {
override def * = (eventId, persistenceId, sequenceNumber, tag).<>((TagRow.apply _).tupled, TagRow.unapply)
// allow null value insert.
val eventId: Rep[Option[Long]] = column[Option[Long]](tagTableCfg.columnNames.eventId)
val persistenceId: Rep[Option[String]] = column[Option[String]](tagTableCfg.columnNames.persistenceId)
val sequenceNumber: Rep[Option[Long]] = column[Option[Long]](tagTableCfg.columnNames.sequenceNumber)
val tag: Rep[String] = column[String](tagTableCfg.columnNames.tag)
val pk = primaryKey(s"${tagTableCfg.tableName}_pk", (persistenceId, sequenceNumber, tag))
val journalEvent =
foreignKey(s"fk_${journalTableCfg.tableName}", (persistenceId, sequenceNumber), JournalTable)(e =>
(Rep.Some(e.persistenceId), Rep.Some(e.sequenceNumber)))
}
lazy val TagTable = new TableQuery(tag => new EventTags(tag))
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalDao.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao.legacy
import akka.persistence.jdbc.config.{ BaseDaoConfig, JournalConfig }
import akka.persistence.jdbc.journal.dao.{ BaseDao, BaseJournalDaoWithReadMessages, H2Compat, JournalDaoWithUpdates }
import akka.persistence.jdbc.serialization.FlowPersistentReprSerializer
import akka.persistence.{ AtomicWrite, PersistentRepr }
import akka.serialization.Serialization
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.{ Done, NotUsed }
import org.slf4j.LoggerFactory
import slick.jdbc.JdbcBackend.Database
import slick.jdbc.JdbcProfile
import scala.annotation.nowarn
import scala.collection.immutable.{ Nil, Seq }
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.{ Failure, Success, Try }
class ByteArrayJournalDao(
val db: Database,
val profile: JdbcProfile,
val journalConfig: JournalConfig,
serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer)
extends BaseByteArrayJournalDao {
val queries = new JournalQueries(profile, journalConfig.journalTableConfiguration)
val serializer: ByteArrayJournalSerializer =
new ByteArrayJournalSerializer(serialization, journalConfig.pluginConfig.tagSeparator)
}
/**
* The DefaultJournalDao contains all the knowledge to persist and load serialized journal entries
*/
trait BaseByteArrayJournalDao
extends BaseDao[JournalRow]
with JournalDaoWithUpdates
with BaseJournalDaoWithReadMessages
with H2Compat {
val db: Database
val profile: JdbcProfile
val queries: JournalQueries
val journalConfig: JournalConfig
override def baseDaoConfig: BaseDaoConfig = journalConfig.daoConfig
@nowarn("msg=deprecated")
val serializer: FlowPersistentReprSerializer[JournalRow]
implicit val ec: ExecutionContext
implicit val mat: Materializer
import profile.api._
val logger = LoggerFactory.getLogger(this.getClass)
def writeJournalRows(xs: Seq[JournalRow]): Future[Unit] = { // Write atomically without auto-commit
db.run(queries.writeJournalRows(xs).transactionally).map(_ => ())
}
/**
* @see [[akka.persistence.journal.AsyncWriteJournal.asyncWriteMessages(messages)]]
*/
def asyncWriteMessages(messages: Seq[AtomicWrite]): Future[Seq[Try[Unit]]] = {
val serializedTries: Seq[Try[Seq[JournalRow]]] = serializer.serialize(messages)
// If serialization fails for some AtomicWrites, the other AtomicWrites may still be written
val rowsToWrite: Seq[JournalRow] = for {
serializeTry <- serializedTries
row <- serializeTry.getOrElse(Seq.empty)
} yield row
def resultWhenWriteComplete =
if (serializedTries.forall(_.isSuccess)) Nil else serializedTries.map(_.map(_ => ()))
queueWriteJournalRows(rowsToWrite).map(_ => resultWhenWriteComplete)
}
override def deleteEventsTo(
persistenceId: String,
maxSequenceNr: Long,
resetSequenceNumber: Boolean): Future[Unit] = {
val actions: DBIOAction[Unit, NoStream, Effect.Write with Effect.Read] = if (resetSequenceNumber) {
queries.delete(persistenceId, maxSequenceNr).map(_ => ())
} else {
// We should keep journal record with highest sequence number in order to be compliant
// with @see [[akka.persistence.journal.JournalSpec]]
for {
_ <- queries.markJournalMessagesAsDeleted(persistenceId, maxSequenceNr)
highestMarkedSequenceNr <- highestMarkedSequenceNr(persistenceId)
_ <- queries.delete(persistenceId, highestMarkedSequenceNr.getOrElse(0L) - 1)
} yield ()
}
db.run(actions.transactionally)
}
def update(persistenceId: String, sequenceNr: Long, payload: AnyRef): Future[Done] = {
val write = PersistentRepr(payload, sequenceNr, persistenceId)
val serializedRow = serializer.serialize(write) match {
case Success(t) => t
case Failure(cause) =>
throw new IllegalArgumentException(
s"Failed to serialize ${write.getClass} for update of [$persistenceId] @ [$sequenceNr]",
cause)
}
db.run(queries.update(persistenceId, sequenceNr, serializedRow.message).map(_ => Done))
}
private def highestMarkedSequenceNr(persistenceId: String) =
queries.highestMarkedSequenceNrForPersistenceId(persistenceId).result
override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] =
for {
maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result)
} yield maybeHighestSeqNo.getOrElse(0L)
override def messages(
persistenceId: String,
fromSequenceNr: Long,
toSequenceNr: Long,
max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] =
Source
.fromPublisher(
db.stream(
queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result))
.via(serializer.deserializeFlow)
.map {
case Success((repr, _, ordering)) => Success(repr -> ordering)
case Failure(e) => Failure(e)
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalSerializer.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc
package journal.dao.legacy
import akka.persistence.PersistentRepr
import akka.persistence.jdbc.serialization.FlowPersistentReprSerializer
import akka.serialization.Serialization
import scala.annotation.nowarn
import scala.collection.immutable._
import scala.util.Try
@nowarn("msg=deprecated")
class ByteArrayJournalSerializer(serialization: Serialization, separator: String)
extends FlowPersistentReprSerializer[JournalRow] {
override def serialize(persistentRepr: PersistentRepr, tags: Set[String]): Try[JournalRow] = {
serialization
.serialize(persistentRepr)
.map(
JournalRow(
Long.MinValue,
persistentRepr.deleted,
persistentRepr.persistenceId,
persistentRepr.sequenceNr,
_,
encodeTags(tags, separator)))
}
override def deserialize(journalRow: JournalRow): Try[(PersistentRepr, Set[String], Long)] = {
serialization
.deserialize(journalRow.message, classOf[PersistentRepr])
.map((_, decodeTags(journalRow.tags, separator), journalRow.ordering))
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/JournalQueries.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc
package journal.dao.legacy
import akka.persistence.jdbc.config.LegacyJournalTableConfiguration
import slick.jdbc.JdbcProfile
class JournalQueries(val profile: JdbcProfile, override val journalTableCfg: LegacyJournalTableConfiguration)
extends JournalTables {
import profile.api._
private val JournalTableC = Compiled(JournalTable)
def writeJournalRows(xs: Seq[JournalRow]) =
JournalTableC ++= xs.sortBy(_.sequenceNumber)
private def selectAllJournalForPersistenceIdDesc(persistenceId: Rep[String]) =
selectAllJournalForPersistenceId(persistenceId).sortBy(_.sequenceNumber.desc)
private def selectAllJournalForPersistenceId(persistenceId: Rep[String]) =
JournalTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc)
def delete(persistenceId: String, toSequenceNr: Long) = {
JournalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber <= toSequenceNr).delete
}
/**
* Updates (!) a payload stored in a specific events row.
* Intended to be used sparingly, e.g. moving all events to their encrypted counterparts.
*/
def update(persistenceId: String, seqNr: Long, replacement: Array[Byte]) = {
val baseQuery = JournalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber === seqNr)
baseQuery.map(_.message).update(replacement)
}
def markJournalMessagesAsDeleted(persistenceId: String, maxSequenceNr: Long) =
JournalTable
.filter(_.persistenceId === persistenceId)
.filter(_.sequenceNumber <= maxSequenceNr)
.filter(_.deleted === false)
.map(_.deleted)
.update(true)
private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] =
selectAllJournalForPersistenceId(persistenceId).take(1).map(_.sequenceNumber).max
private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] =
selectAllJournalForPersistenceId(persistenceId).filter(_.deleted === true).take(1).map(_.sequenceNumber).max
val highestSequenceNrForPersistenceId = Compiled(_highestSequenceNrForPersistenceId _)
val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _)
private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) =
selectAllJournalForPersistenceIdDesc(persistenceId).filter(_.sequenceNumber <= maxSequenceNr)
val selectByPersistenceIdAndMaxSequenceNumber = Compiled(_selectByPersistenceIdAndMaxSequenceNumber _)
private def _allPersistenceIdsDistinct: Query[Rep[String], String, Seq] =
JournalTable.map(_.persistenceId).distinct
val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct)
def journalRowByPersistenceIds(persistenceIds: Iterable[String]): Query[Rep[String], String, Seq] =
for {
query <- JournalTable.map(_.persistenceId)
if query.inSetBind(persistenceIds)
} yield query
private def _messagesQuery(
persistenceId: Rep[String],
fromSequenceNr: Rep[Long],
toSequenceNr: Rep[Long],
max: ConstColumn[Long]) =
JournalTable
.filter(_.persistenceId === persistenceId)
.filter(_.deleted === false)
.filter(_.sequenceNumber >= fromSequenceNr)
.filter(_.sequenceNumber <= toSequenceNr)
.sortBy(_.sequenceNumber.asc)
.take(max)
val messagesQuery = Compiled(_messagesQuery _)
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/JournalTables.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao.legacy
import akka.persistence.jdbc.config.LegacyJournalTableConfiguration
trait JournalTables {
val profile: slick.jdbc.JdbcProfile
import profile.api._
def journalTableCfg: LegacyJournalTableConfiguration
class Journal(_tableTag: Tag)
extends Table[JournalRow](
_tableTag,
_schemaName = journalTableCfg.schemaName,
_tableName = journalTableCfg.tableName) {
def * = (ordering, deleted, persistenceId, sequenceNumber, message, tags)
.<>((JournalRow.apply _).tupled, JournalRow.unapply)
val ordering: Rep[Long] = column[Long](journalTableCfg.columnNames.ordering, O.AutoInc)
val persistenceId: Rep[String] =
column[String](journalTableCfg.columnNames.persistenceId, O.Length(255, varying = true))
val sequenceNumber: Rep[Long] = column[Long](journalTableCfg.columnNames.sequenceNumber)
val deleted: Rep[Boolean] = column[Boolean](journalTableCfg.columnNames.deleted, O.Default(false))
val tags: Rep[Option[String]] =
column[Option[String]](journalTableCfg.columnNames.tags, O.Length(255, varying = true))
val message: Rep[Array[Byte]] = column[Array[Byte]](journalTableCfg.columnNames.message)
val pk = primaryKey(s"${tableName}_pk", (persistenceId, sequenceNumber))
val orderingIdx = index(s"${tableName}_ordering_idx", ordering, unique = true)
}
lazy val JournalTable = new TableQuery(tag => new Journal(tag))
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/package.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.journal.dao
package object legacy {
final case class JournalRow(
ordering: Long,
deleted: Boolean,
persistenceId: String,
sequenceNumber: Long,
message: Array[Byte],
tags: Option[String] = None)
def encodeTags(tags: Set[String], separator: String): Option[String] =
if (tags.isEmpty) None else Option(tags.mkString(separator))
def decodeTags(tags: Option[String], separator: String): Set[String] =
tags.map(_.split(separator).toSet).getOrElse(Set.empty[String])
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/query/JdbcReadJournalProvider.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.query
import akka.actor.ExtendedActorSystem
import akka.persistence.query.ReadJournalProvider
import com.typesafe.config.Config
class JdbcReadJournalProvider(system: ExtendedActorSystem, config: Config, configPath: String)
extends ReadJournalProvider {
override def scaladslReadJournal(): scaladsl.JdbcReadJournal =
new scaladsl.JdbcReadJournal(config, configPath)(system)
override def javadslReadJournal(): javadsl.JdbcReadJournal = new javadsl.JdbcReadJournal(scaladslReadJournal())
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/query/JournalSequenceActor.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc
package query
import akka.actor.{ Actor, ActorLogging, Props, Status, Timers }
import akka.pattern.pipe
import akka.persistence.jdbc.config.JournalSequenceRetrievalConfig
import akka.persistence.jdbc.query.dao.ReadJournalDao
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import scala.collection.immutable.NumericRange
import scala.concurrent.duration.FiniteDuration
object JournalSequenceActor {
def props(readJournalDao: ReadJournalDao, config: JournalSequenceRetrievalConfig)(
implicit materializer: Materializer): Props = Props(new JournalSequenceActor(readJournalDao, config))
private case object QueryOrderingIds
private case class NewOrderingIds(originalOffset: Long, elements: Seq[OrderingId])
private case class ScheduleAssumeMaxOrderingId(max: OrderingId)
private case class AssumeMaxOrderingId(max: OrderingId)
case object GetMaxOrderingId
case class MaxOrderingId(maxOrdering: OrderingId)
private case object QueryOrderingIdsTimerKey
private case object AssumeMaxOrderingIdTimerKey
private type OrderingId = Long
/**
* Efficient representation of missing elements using NumericRanges.
* It can be seen as a collection of OrderingIds
*/
private case class MissingElements(elements: Seq[NumericRange[OrderingId]]) {
def addRange(from: OrderingId, until: OrderingId): MissingElements = {
val newRange = from.until(until)
MissingElements(elements :+ newRange)
}
def contains(id: OrderingId): Boolean = elements.exists(_.containsTyped(id))
def isEmpty: Boolean = elements.forall(_.isEmpty)
}
private object MissingElements {
def empty: MissingElements = MissingElements(Vector.empty)
}
}
/**
* To support the EventsByTag query, this actor keeps track of which rows are visible in the database.
* This is required to guarantee the EventByTag does not skip any rows in case rows with a higher (ordering) id are
* visible in the database before rows with a lower (ordering) id.
*/
class JournalSequenceActor(readJournalDao: ReadJournalDao, config: JournalSequenceRetrievalConfig)(
implicit materializer: Materializer)
extends Actor
with ActorLogging
with Timers {
import JournalSequenceActor._
import context.dispatcher
import config.{ batchSize, maxBackoffQueryDelay, maxTries, queryDelay }
override def receive: Receive = receive(0L, Map.empty, 0)
override def preStart(): Unit = {
self ! QueryOrderingIds
readJournalDao.maxJournalSequence().mapTo[Long].onComplete {
case scala.util.Success(maxInDatabase) =>
self ! ScheduleAssumeMaxOrderingId(maxInDatabase)
case scala.util.Failure(t) =>
log.info("Failed to recover fast, using event-by-event recovery instead. Cause: {}", t)
}
}
/**
* @param currentMaxOrdering The highest ordering value for which it is known that no missing elements exist
* @param missingByCounter A map with missing orderingIds. The key of the map is the count at which the missing elements
* can be assumed to be "skipped ids" (they are no longer assumed missing).
* @param moduloCounter A counter which is incremented every time a new query have been executed, modulo `maxTries`
* @param previousDelay The last used delay (may change in case failures occur)
*/
private def receive(
currentMaxOrdering: OrderingId,
missingByCounter: Map[Int, MissingElements],
moduloCounter: Int,
previousDelay: FiniteDuration = queryDelay): Receive = {
case ScheduleAssumeMaxOrderingId(max) =>
// All elements smaller than max can be assumed missing after this delay
val delay = queryDelay * maxTries
timers.startSingleTimer(key = AssumeMaxOrderingIdTimerKey, AssumeMaxOrderingId(max), delay)
case AssumeMaxOrderingId(max) =>
if (currentMaxOrdering < max) {
context.become(receive(max, missingByCounter, moduloCounter, previousDelay))
}
case GetMaxOrderingId =>
sender() ! MaxOrderingId(currentMaxOrdering)
case QueryOrderingIds =>
readJournalDao
.journalSequence(currentMaxOrdering, batchSize)
.runWith(Sink.seq)
.map(result => NewOrderingIds(currentMaxOrdering, result))
.pipeTo(self)
case NewOrderingIds(originalOffset, _) if originalOffset < currentMaxOrdering =>
// search was done using an offset that became obsolete in the meantime
// therefore we start a new query
self ! QueryOrderingIds
case NewOrderingIds(_, elements) =>
findGaps(elements, currentMaxOrdering, missingByCounter, moduloCounter)
case Status.Failure(t) =>
val newDelay = maxBackoffQueryDelay.min(previousDelay * 2)
if (newDelay == maxBackoffQueryDelay) {
log.warning("Failed to query max ordering id because of {}, retrying in {}", t, newDelay)
}
scheduleQuery(newDelay)
context.become(receive(currentMaxOrdering, missingByCounter, moduloCounter, newDelay))
}
/**
* This method that implements the "find gaps" algo. It's the meat and main purpose of this actor.
*/
private def findGaps(
elements: Seq[OrderingId],
currentMaxOrdering: OrderingId,
missingByCounter: Map[Int, MissingElements],
moduloCounter: Int): Unit = {
// list of elements that will be considered as genuine gaps.
// `givenUp` is either empty or is was filled on a previous iteration
val givenUp = missingByCounter.getOrElse(moduloCounter, MissingElements.empty)
val (nextMax, _, missingElems) =
// using the ordering elements that were fetched, we verify if there are any gaps
elements.foldLeft[(OrderingId, OrderingId, MissingElements)](
(currentMaxOrdering, currentMaxOrdering, MissingElements.empty)) {
case ((currentMax, previousElement, missing), currentElement) =>
// we must decide if we move the cursor forward
val newMax = {
val maxCandidate = currentMax + 1
if ((currentElement - maxCandidate) < Int.MaxValue) {
if ((currentMax + 1).until(currentElement).forall(givenUp.contains)) {
// we move the cursor forward when:
// 1) they have been detected as missing on previous iteration, it's time now to give up
// 2) current + 1 == currentElement (meaning no gap). Note that `forall` on an empty range always returns true
currentElement
} else currentMax
} else {
// we can't iterate over this... assume that forall failed
// the AssumeMaxOrderingId will advance the currentMaxOrdering
currentMax
}
}
// we accumulate in newMissing the gaps we detect on each iteration
val newMissing =
if (previousElement + 1 == currentElement || newMax == currentElement) missing
else missing.addRange(previousElement + 1, currentElement)
(newMax, currentElement, newMissing)
}
val newMissingByCounter = missingByCounter + (moduloCounter -> missingElems)
// did we detect gaps in the current batch?
val noGapsFound = missingElems.isEmpty
// full batch means that we retrieved as much elements as the batchSize
// that happens when we are not yet at the end of the stream
val isFullBatch = elements.size == batchSize
if (noGapsFound && isFullBatch) {
// Many elements have been retrieved but none are missing
// We can query again immediately, as this allows the actor to rapidly retrieve the real max ordering
self ! QueryOrderingIds
context.become(receive(nextMax, newMissingByCounter, moduloCounter))
} else {
// either we detected gaps or we reached the end of stream (batch not full)
// in this case we want to keep querying but not immediately
scheduleQuery(queryDelay)
context.become(receive(nextMax, newMissingByCounter, (moduloCounter + 1) % maxTries))
}
}
def scheduleQuery(delay: FiniteDuration): Unit = {
timers.startSingleTimer(key = QueryOrderingIdsTimerKey, QueryOrderingIds, delay)
}
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/query/dao/DefaultReadJournalDao.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.query.dao
import akka.NotUsed
import akka.persistence.PersistentRepr
import akka.persistence.jdbc.AkkaSerialization
import akka.persistence.jdbc.config.ReadJournalConfig
import akka.persistence.jdbc.journal.dao.{ BaseJournalDaoWithReadMessages, H2Compat }
import akka.serialization.Serialization
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import slick.jdbc.JdbcBackend.Database
import slick.jdbc.JdbcProfile
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try
class DefaultReadJournalDao(
val db: Database,
val profile: JdbcProfile,
val readJournalConfig: ReadJournalConfig,
serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer)
extends ReadJournalDao
with BaseJournalDaoWithReadMessages
with H2Compat {
import profile.api._
val queries = new ReadJournalQueries(profile, readJournalConfig)
override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] =
Source.fromPublisher(db.stream(queries.allPersistenceIdsDistinct(correctMaxForH2Driver(max)).result))
override def eventsByTag(
tag: String,
offset: Long,
maxOffset: Long,
max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = {
// This doesn't populate the tags. AFAICT they aren't used
Source
.fromPublisher(db.stream(queries.eventsByTag((tag, offset, maxOffset, correctMaxForH2Driver(max))).result))
.map(row =>
AkkaSerialization.fromRow(serialization)(row).map { case (repr, ordering) => (repr, Set.empty, ordering) })
}
override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] =
Source.fromPublisher(db.stream(queries.journalSequenceQuery((offset, limit)).result))
override def maxJournalSequence(): Future[Long] =
db.run(queries.maxJournalSequenceQuery.result)
override def messages(
persistenceId: String,
fromSequenceNr: Long,
toSequenceNr: Long,
max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] =
Source
.fromPublisher(
db.stream(
queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result))
.map(AkkaSerialization.fromRow(serialization)(_))
}
================================================
FILE: core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalDao.scala
================================================
/*
* Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>
* Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>
*/
package akka.persistence.jdbc.query.dao
import akka.NotUsed
import akka.persistence.PersistentRepr
import akka.persistence.jdbc.journal.dao.JournalDaoWithReadMessages
import akka.stream.scaladsl.Source
import scala.collection.immutable.Set
import scala.concurrent.Future
import scala.util.Try
trait ReadJournalDao extends JournalDaoWithReadMessages {
/**
* Returns distinct stream of persistenceIds
*/
def allPersistenceIdsSource(max: Long): Source[String, NotUsed]
/**
* Returns a Source of deserialized data for certain tag from an offset. The result is sorted by
* the global ordering of the events.
* Each element with be a try with a PersistentRepr, set of tags, and a Long representing the global ordering of events
*/
def eventsByTag(
tag: String,
offset: Long,
maxOffset: Long,
max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed]
/**
* @param offset Minimum value to retrieve
* @param limit Maximum number of values to retrieve
* @return A Source of journal event sequence numbers (corresponding to the Ordering column)
*/
def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed]
/**
* @return The value of the maximum (ordering) id
gitextract_t3d9jgyh/
├── .fossa.yml
├── .github/
│ └── workflows/
│ ├── checks.yml
│ ├── fossa.yml
│ ├── link-validator.yml
│ ├── release.yml
│ ├── test.yml
│ └── weekly.yml
├── .gitignore
├── .sbtopts
├── .scala-steward.conf
├── .scalafmt.conf
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── RELEASING.md
├── build.sbt
├── core/
│ ├── lib/
│ │ └── ojdbc6.jar
│ └── src/
│ ├── main/
│ │ ├── mima-filters/
│ │ │ ├── 3.5.3.backwards.excludes/
│ │ │ │ ├── issue-322-messagesWithBatch.excludes
│ │ │ │ └── issue-91-ordering-offset.excludes
│ │ │ ├── 4.x.x.backwards.excludes/
│ │ │ │ └── pr-401-highest-seq-nr.excludes
│ │ │ ├── 5.0.1.backwards.excludes/
│ │ │ │ └── pr-570-akka-serialization.excludes
│ │ │ ├── 5.0.2.backwards.excludes/
│ │ │ │ └── issue-585-performance-regression.excludes
│ │ │ ├── 5.1.0.backwards.excludes/
│ │ │ │ └── issue-557-logical-delete.excludes
│ │ │ ├── 5.4.0.backwards.excludes/
│ │ │ │ ├── issue-710-tag-fk.excludes
│ │ │ │ └── issue-775-slick-3.50.excludes
│ │ │ ├── 5.5.0.backwards.excludes/
│ │ │ │ └── issue-891-durable-store.excludes
│ │ │ └── 5.5.2.backwards.excludes/
│ │ │ └── pr-928-cleanup-tool.excludes
│ │ ├── resources/
│ │ │ ├── reference.conf
│ │ │ └── schema/
│ │ │ ├── h2/
│ │ │ │ ├── h2-create-schema-legacy.sql
│ │ │ │ ├── h2-create-schema.sql
│ │ │ │ ├── h2-drop-schema-legacy.sql
│ │ │ │ └── h2-drop-schema.sql
│ │ │ ├── mysql/
│ │ │ │ ├── mysql-create-schema-legacy.sql
│ │ │ │ ├── mysql-create-schema.sql
│ │ │ │ ├── mysql-drop-schema-legacy.sql
│ │ │ │ ├── mysql-drop-schema.sql
│ │ │ │ └── mysql-event-tag-migration.sql
│ │ │ ├── oracle/
│ │ │ │ ├── oracle-create-schema-legacy.sql
│ │ │ │ ├── oracle-create-schema.sql
│ │ │ │ ├── oracle-drop-schema-legacy.sql
│ │ │ │ ├── oracle-drop-schema.sql
│ │ │ │ └── oracle-event-tag-migration.sql
│ │ │ ├── postgres/
│ │ │ │ ├── postgres-create-schema-legacy.sql
│ │ │ │ ├── postgres-create-schema.sql
│ │ │ │ ├── postgres-drop-schema-legacy.sql
│ │ │ │ ├── postgres-drop-schema.sql
│ │ │ │ └── postgres-event-tag-migration.sql
│ │ │ └── sqlserver/
│ │ │ ├── sqlserver-create-schema-legacy.sql
│ │ │ ├── sqlserver-create-schema-varchar.sql
│ │ │ ├── sqlserver-create-schema.sql
│ │ │ ├── sqlserver-drop-schema-legacy.sql
│ │ │ ├── sqlserver-drop-schema.sql
│ │ │ └── sqlserver-event-tag-migration.sql
│ │ └── scala/
│ │ └── akka/
│ │ └── persistence/
│ │ └── jdbc/
│ │ ├── AkkaSerialization.scala
│ │ ├── JournalRow.scala
│ │ ├── cleanup/
│ │ │ ├── javadsl/
│ │ │ │ └── EventSourcedCleanup.scala
│ │ │ └── scaladsl/
│ │ │ └── EventSourcedCleanup.scala
│ │ ├── config/
│ │ │ └── AkkaPersistenceConfig.scala
│ │ ├── db/
│ │ │ ├── SlickDatabase.scala
│ │ │ └── SlickExtension.scala
│ │ ├── journal/
│ │ │ ├── JdbcAsyncWriteJournal.scala
│ │ │ └── dao/
│ │ │ ├── BaseDao.scala
│ │ │ ├── BaseJournalDaoWithReadMessages.scala
│ │ │ ├── DefaultJournalDao.scala
│ │ │ ├── FlowControl.scala
│ │ │ ├── H2Compat.scala
│ │ │ ├── JournalDao.scala
│ │ │ ├── JournalDaoInstantiation.scala
│ │ │ ├── JournalDaoWithReadMessages.scala
│ │ │ ├── JournalDaoWithUpdates.scala
│ │ │ ├── JournalQueries.scala
│ │ │ ├── JournalTables.scala
│ │ │ └── legacy/
│ │ │ ├── ByteArrayJournalDao.scala
│ │ │ ├── ByteArrayJournalSerializer.scala
│ │ │ ├── JournalQueries.scala
│ │ │ ├── JournalTables.scala
│ │ │ └── package.scala
│ │ ├── query/
│ │ │ ├── JdbcReadJournalProvider.scala
│ │ │ ├── JournalSequenceActor.scala
│ │ │ ├── dao/
│ │ │ │ ├── DefaultReadJournalDao.scala
│ │ │ │ ├── ReadJournalDao.scala
│ │ │ │ ├── ReadJournalQueries.scala
│ │ │ │ └── legacy/
│ │ │ │ ├── ByteArrayReadJournalDao.scala
│ │ │ │ └── ReadJournalQueries.scala
│ │ │ ├── javadsl/
│ │ │ │ └── JdbcReadJournal.scala
│ │ │ ├── package.scala
│ │ │ └── scaladsl/
│ │ │ └── JdbcReadJournal.scala
│ │ ├── serialization/
│ │ │ ├── PersistentReprSerializer.scala
│ │ │ └── SnapshotSerializer.scala
│ │ ├── snapshot/
│ │ │ ├── JdbcSnapshotStore.scala
│ │ │ └── dao/
│ │ │ ├── DefaultSnapshotDao.scala
│ │ │ ├── SnapshotDao.scala
│ │ │ ├── SnapshotDaoInstantiation.scala
│ │ │ ├── SnapshotQueries.scala
│ │ │ ├── SnapshotTables.scala
│ │ │ └── legacy/
│ │ │ ├── ByteArraySnapshotDao.scala
│ │ │ ├── ByteArraySnapshotSerializer.scala
│ │ │ ├── SnapshotQueries.scala
│ │ │ └── SnapshotTables.scala
│ │ ├── state/
│ │ │ ├── DurableStateQueries.scala
│ │ │ ├── DurableStateTables.scala
│ │ │ ├── JdbcDurableStateStoreProvider.scala
│ │ │ ├── OffsetOps.scala
│ │ │ ├── SequenceNextValUpdater.scala
│ │ │ ├── javadsl/
│ │ │ │ └── JdbcDurableStateStore.scala
│ │ │ └── scaladsl/
│ │ │ ├── DurableStateSequenceActor.scala
│ │ │ └── JdbcDurableStateStore.scala
│ │ ├── testkit/
│ │ │ ├── internal/
│ │ │ │ ├── SchemaType.scala
│ │ │ │ └── SchemaUtilsImpl.scala
│ │ │ ├── javadsl/
│ │ │ │ └── SchemaUtils.scala
│ │ │ └── scaladsl/
│ │ │ └── SchemaUtils.scala
│ │ └── util/
│ │ ├── BlockingOps.scala
│ │ ├── ByteArrayOps.scala
│ │ ├── ConfigOps.scala
│ │ ├── InputStreamOps.scala
│ │ ├── PluginVersionChecker.scala
│ │ ├── StringOps.scala
│ │ └── TrySeq.scala
│ └── test/
│ ├── LICENSE
│ ├── java/
│ │ └── akka/
│ │ └── persistence/
│ │ └── jdbc/
│ │ ├── JavadslSnippets.java
│ │ └── state/
│ │ └── JavadslSnippets.java
│ ├── resources/
│ │ ├── general.conf
│ │ ├── h2-application.conf
│ │ ├── h2-default-mode-application.conf
│ │ ├── h2-shared-db-application.conf
│ │ ├── h2-two-read-journals-application.conf
│ │ ├── jndi-application.conf
│ │ ├── jndi-shared-db-application.conf
│ │ ├── logback-test.xml
│ │ ├── mysql-application.conf
│ │ ├── mysql-shared-db-application.conf
│ │ ├── oracle-application.conf
│ │ ├── oracle-schema-overrides.conf
│ │ ├── oracle-shared-db-application.conf
│ │ ├── postgres-application.conf
│ │ ├── postgres-shared-db-application.conf
│ │ ├── sqlserver-application.conf
│ │ └── sqlserver-shared-db-application.conf
│ └── scala/
│ └── akka/
│ └── persistence/
│ └── jdbc/
│ ├── ScaladslSnippets.scala
│ ├── SharedActorSystemTestSpec.scala
│ ├── SimpleSpec.scala
│ ├── SingleActorSystemPerTestSpec.scala
│ ├── TablesTestSpec.scala
│ ├── cleanup/
│ │ └── scaladsl/
│ │ └── EventSourcedCleanupTest.scala
│ ├── configuration/
│ │ ├── AkkaPersistenceConfigTest.scala
│ │ ├── ConfigOpsTest.scala
│ │ └── JNDIConfigTest.scala
│ ├── journal/
│ │ ├── JdbcJournalPerfSpec.scala
│ │ ├── JdbcJournalSpec.scala
│ │ └── dao/
│ │ ├── ByteArrayJournalSerializerTest.scala
│ │ ├── JournalTablesTest.scala
│ │ ├── TagsSerializationTest.scala
│ │ └── TrySeqTest.scala
│ ├── query/
│ │ ├── AllPersistenceIdsTest.scala
│ │ ├── CurrentEventsByPersistenceIdTest.scala
│ │ ├── CurrentEventsByTagTest.scala
│ │ ├── CurrentPersistenceIdsTest.scala
│ │ ├── EventAdapterTest.scala
│ │ ├── EventsByPersistenceIdTest.scala
│ │ ├── EventsByTagMigrationTest.scala
│ │ ├── EventsByTagTest.scala
│ │ ├── EventsByUnfrequentTagTest.scala
│ │ ├── HardDeleteQueryTest.scala
│ │ ├── JournalDaoStreamMessagesMemoryTest.scala
│ │ ├── JournalSequenceActorTest.scala
│ │ ├── MultipleReadJournalTest.scala
│ │ ├── QueryTestSpec.scala
│ │ ├── TaggingEventAdapter.scala
│ │ └── dao/
│ │ ├── ReadJournalTablesTest.scala
│ │ └── TestProbeReadJournalDao.scala
│ ├── serialization/
│ │ └── StoreOnlySerializableMessagesTest.scala
│ ├── snapshot/
│ │ ├── JdbcSnapshotStoreSpec.scala
│ │ └── dao/
│ │ └── legacy/
│ │ └── SnapshotTablesTest.scala
│ ├── state/
│ │ ├── Payloads.scala
│ │ ├── ScaladslSnippets.scala
│ │ └── scaladsl/
│ │ ├── DataGenerationHelper.scala
│ │ ├── DurableStateSequenceActorTest.scala
│ │ ├── DurableStateStorePluginSpec.scala
│ │ ├── JdbcDurableStateSpec.scala
│ │ ├── StateSpecBase.scala
│ │ └── TestProbeDurableStateStoreQuery.scala
│ └── util/
│ ├── ClasspathResources.scala
│ └── DropCreate.scala
├── doc/
│ └── deadlock.md
├── docs/
│ ├── LICENSE
│ ├── release-train-issue-template.md
│ └── src/
│ └── main/
│ └── paradox/
│ ├── _template/
│ │ └── projectSpecificFooter.st
│ ├── assets/
│ │ └── js/
│ │ └── warnOldVersion.js
│ ├── configuration.md
│ ├── custom-dao.md
│ ├── durable-state-store.md
│ ├── index.md
│ ├── migration.md
│ ├── overview.md
│ ├── query.md
│ └── snapshots.md
├── integration/
│ ├── LICENSE
│ └── src/
│ └── test/
│ └── scala/
│ └── akka/
│ └── persistence/
│ └── jdbc/
│ └── integration/
│ ├── AllPersistenceIdsTest.scala
│ ├── CurrentEventsByPersistenceIdTest.scala
│ ├── CurrentEventsByTagTest.scala
│ ├── CurrentPersistenceIdsTest.scala
│ ├── EventAdapterTest.scala
│ ├── EventSourcedCleanupTest.scala
│ ├── EventsByPersistenceIdTest.scala
│ ├── EventsByTagMigrationTest.scala
│ ├── EventsByTagTest.scala
│ ├── HardDeleteQueryTest.scala
│ ├── JdbcJournalPerfSpec.scala
│ ├── JdbcJournalSpec.scala
│ ├── JdbcSnapshotStoreSpec.scala
│ ├── JournalDaoStreamMessagesMemoryTest.scala
│ ├── JournalSequenceActorTest.scala
│ ├── PostgresDurableStateStorePluginSpec.scala
│ ├── PostgresScalaJdbcDurableStateChangesByTagTest.scala
│ └── StoreOnlySerializableMessagesTest.scala
├── migrator/
│ └── src/
│ ├── main/
│ │ └── scala/
│ │ └── akka/
│ │ └── persistence/
│ │ └── jdbc/
│ │ └── migrator/
│ │ ├── JournalMigrator.scala
│ │ └── SnapshotMigrator.scala
│ └── test/
│ ├── LICENSE
│ ├── resources/
│ │ ├── general.conf
│ │ ├── h2-application.conf
│ │ ├── mysql-application.conf
│ │ ├── oracle-application.conf
│ │ ├── postgres-application.conf
│ │ ├── schema/
│ │ │ ├── h2/
│ │ │ │ ├── h2-create-schema-legacy.sql
│ │ │ │ ├── h2-create-schema.sql
│ │ │ │ ├── h2-drop-schema-legacy.sql
│ │ │ │ └── h2-drop-schema.sql
│ │ │ ├── mysql/
│ │ │ │ ├── mysql-create-schema-legacy.sql
│ │ │ │ ├── mysql-create-schema.sql
│ │ │ │ ├── mysql-drop-schema-legacy.sql
│ │ │ │ └── mysql-drop-schema.sql
│ │ │ ├── oracle/
│ │ │ │ ├── oracle-create-schema-legacy.sql
│ │ │ │ ├── oracle-create-schema.sql
│ │ │ │ ├── oracle-drop-schema-legacy.sql
│ │ │ │ └── oracle-drop-schema.sql
│ │ │ ├── postgres/
│ │ │ │ ├── postgres-create-schema-legacy.sql
│ │ │ │ ├── postgres-create-schema.sql
│ │ │ │ ├── postgres-drop-schema-legacy.sql
│ │ │ │ └── postgres-drop-schema.sql
│ │ │ └── sqlserver/
│ │ │ ├── sqlserver-create-schema-legacy.sql
│ │ │ ├── sqlserver-create-schema.sql
│ │ │ ├── sqlserver-drop-schema-legacy.sql
│ │ │ └── sqlserver-drop-schema.sql
│ │ └── sqlserver-application.conf
│ └── scala/
│ └── akka/
│ └── persistence/
│ └── jdbc/
│ └── migrator/
│ ├── JournalMigratorTest.scala
│ ├── MigratorSpec.scala
│ └── SnapshotMigratorTest.scala
├── migrator-integration/
│ ├── LICENSE
│ └── src/
│ └── test/
│ └── scala/
│ └── akka/
│ └── persistence/
│ └── jdbc/
│ └── migrator/
│ └── integration/
│ ├── JournalMigratorTest.scala
│ └── SnapshotMigratorTest.scala
├── project/
│ ├── AutomaticModuleName.scala
│ ├── Dependencies.scala
│ ├── IntegrationTests.scala
│ ├── ProjectAutoPlugin.scala
│ ├── Publish.scala
│ ├── build.properties
│ ├── plugins.sbt
│ └── project-info.conf
└── scripts/
├── cat-log.sh
├── create-release-issue.sh
├── docker-compose.yml
├── launch-all.sh
├── launch-mysql.sh
├── launch-oracle.sh
├── launch-postgres.sh
├── launch-sqlserver.sh
├── link-validator.conf
├── mysql-cli.sh
├── oracle-cli.sh
├── psql-cli.sh
└── sqlserver-cli.sh
SYMBOL INDEX (100 symbols across 22 files)
FILE: core/src/main/resources/schema/h2/h2-create-schema-legacy.sql
type PUBLIC (line 1) | CREATE TABLE IF NOT EXISTS PUBLIC."journal" (
type PUBLIC (line 10) | CREATE UNIQUE INDEX IF NOT EXISTS "journal_ordering_idx" ON PUBLIC."jou...
type PUBLIC (line 12) | CREATE TABLE IF NOT EXISTS PUBLIC."snapshot" (
type "durable_state" (line 21) | CREATE TABLE IF NOT EXISTS "durable_state" (
type "durable_state" (line 33) | CREATE INDEX "state_tag_idx" on "durable_state" ("tag")
type "durable_state" (line 34) | CREATE INDEX "state_global_offset_idx" on "durable_state" ("global_offset")
FILE: core/src/main/resources/schema/h2/h2-create-schema.sql
type "event_journal" (line 1) | CREATE TABLE IF NOT EXISTS "event_journal" (
type "event_journal" (line 18) | CREATE UNIQUE INDEX "event_journal_ordering_idx" on "event_journal" ("or...
type "event_tag" (line 20) | CREATE TABLE IF NOT EXISTS "event_tag" (
type "snapshot" (line 32) | CREATE TABLE IF NOT EXISTS "snapshot" (
type "durable_state" (line 46) | CREATE TABLE IF NOT EXISTS "durable_state" (
type "durable_state" (line 57) | CREATE INDEX IF NOT EXISTS "state_tag_idx" on "durable_state" ("tag")
type "durable_state" (line 58) | CREATE INDEX IF NOT EXISTS "state_global_offset_idx" on "durable_state" ...
FILE: core/src/main/resources/schema/mysql/mysql-create-schema-legacy.sql
type journal (line 1) | CREATE TABLE IF NOT EXISTS journal (
type journal_ordering_idx (line 10) | CREATE UNIQUE INDEX journal_ordering_idx ON journal(ordering)
type snapshot (line 12) | CREATE TABLE IF NOT EXISTS snapshot (
FILE: core/src/main/resources/schema/mysql/mysql-create-schema.sql
type event_journal (line 1) | CREATE TABLE IF NOT EXISTS event_journal (
type event_journal_ordering_idx (line 17) | CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering)
type event_tag (line 19) | CREATE TABLE IF NOT EXISTS event_tag (
type snapshot (line 30) | CREATE TABLE IF NOT EXISTS snapshot (
FILE: core/src/main/resources/schema/oracle/oracle-create-schema-legacy.sql
type "journal" (line 15) | CREATE UNIQUE INDEX "journal_ordering_idx" ON "journal"("ordering")
FILE: core/src/main/resources/schema/postgres/postgres-create-schema-legacy.sql
type public (line 1) | CREATE TABLE IF NOT EXISTS public.journal (
type journal_ordering_idx (line 10) | CREATE UNIQUE INDEX IF NOT EXISTS journal_ordering_idx ON public.journal...
type public (line 12) | CREATE TABLE IF NOT EXISTS public.snapshot (
type public (line 20) | CREATE TABLE IF NOT EXISTS public.durable_state (
type state_tag_idx (line 31) | CREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag)
type state_global_offset_idx (line 32) | CREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_stat...
FILE: core/src/main/resources/schema/postgres/postgres-create-schema.sql
type public (line 1) | CREATE TABLE IF NOT EXISTS public.event_journal (
type event_journal_ordering_idx (line 22) | CREATE UNIQUE INDEX event_journal_ordering_idx ON public.event_journal(o...
type public (line 24) | CREATE TABLE IF NOT EXISTS public.event_tag(
type public (line 36) | CREATE TABLE IF NOT EXISTS public.snapshot (
type public (line 52) | CREATE TABLE IF NOT EXISTS public.durable_state (
type state_tag_idx (line 63) | CREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag)
type state_global_offset_idx (line 64) | CREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_stat...
FILE: core/src/main/resources/schema/sqlserver/sqlserver-create-schema-legacy.sql
type journal_ordering_idx (line 13) | CREATE UNIQUE INDEX journal_ordering_idx ON journal (ordering)
type snapshot (line 18) | CREATE TABLE snapshot (
FILE: core/src/main/resources/schema/sqlserver/sqlserver-create-schema-varchar.sql
type event_journal (line 12) | CREATE TABLE event_journal(
type event_journal_ordering_idx (line 29) | CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering)
type event_tag (line 31) | CREATE TABLE event_tag (
type "snapshot" (line 41) | CREATE TABLE "snapshot" (
FILE: core/src/main/resources/schema/sqlserver/sqlserver-create-schema.sql
type event_journal (line 1) | CREATE TABLE event_journal (
type event_journal_ordering_idx (line 18) | CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering)
type event_tag (line 20) | CREATE TABLE event_tag (
type "snapshot" (line 32) | CREATE TABLE "snapshot" (
FILE: core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java
class JavadslSnippets (line 38) | final class JavadslSnippets {
method create (line 39) | void create() {
method readJournal (line 47) | void readJournal() {
method persistenceIds (line 58) | void persistenceIds() {
method eventsByPersistenceIds (line 72) | void eventsByPersistenceIds() {
method eventsByTag (line 89) | void eventsByTag() {
FILE: core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java
class JavadslSnippets (line 50) | final class JavadslSnippets {
method create (line 51) | void create() {
method durableStatePlugin (line 59) | void durableStatePlugin() {
method getObject (line 72) | void getObject() {
method upsertAndGetObject (line 93) | void upsertAndGetObject() {
method deleteObject (line 119) | void deleteObject() {
method currentChanges (line 139) | void currentChanges() {
method changes (line 155) | void changes() {
FILE: docs/src/main/paradox/assets/js/warnOldVersion.js
function initOldVersionWarnings (line 1) | function initOldVersionWarnings($, thisVersion, projectUrl) {
function showVersionWarning (line 16) | function showVersionWarning(thisVersion, currentVersion, projectUrl) {
FILE: migrator/src/test/resources/schema/h2/h2-create-schema-legacy.sql
type PUBLIC (line 1) | CREATE TABLE IF NOT EXISTS PUBLIC."journal" (
type PUBLIC (line 10) | CREATE UNIQUE INDEX IF NOT EXISTS "journal_ordering_idx" ON PUBLIC."jou...
type PUBLIC (line 12) | CREATE TABLE IF NOT EXISTS PUBLIC."legacy_snapshot" (
type "durable_state" (line 21) | CREATE TABLE IF NOT EXISTS "durable_state" (
type "durable_state" (line 33) | CREATE INDEX "state_tag_idx" on "durable_state" ("tag")
type "durable_state" (line 34) | CREATE INDEX "state_global_offset_idx" on "durable_state" ("global_offset")
FILE: migrator/src/test/resources/schema/h2/h2-create-schema.sql
type "event_journal" (line 1) | CREATE TABLE IF NOT EXISTS "event_journal" (
type "event_journal" (line 18) | CREATE UNIQUE INDEX "event_journal_ordering_idx" on "event_journal" ("or...
type "event_tag" (line 20) | CREATE TABLE IF NOT EXISTS "event_tag" (
type "snapshot" (line 32) | CREATE TABLE IF NOT EXISTS "snapshot" (
type "durable_state" (line 46) | CREATE TABLE IF NOT EXISTS "durable_state" (
type "durable_state" (line 57) | CREATE INDEX IF NOT EXISTS "state_tag_idx" on "durable_state" ("tag")
type "durable_state" (line 58) | CREATE INDEX IF NOT EXISTS "state_global_offset_idx" on "durable_state" ...
FILE: migrator/src/test/resources/schema/mysql/mysql-create-schema-legacy.sql
type journal (line 1) | CREATE TABLE IF NOT EXISTS journal (
type journal_ordering_idx (line 10) | CREATE UNIQUE INDEX journal_ordering_idx ON journal(ordering)
type legacy_snapshot (line 12) | CREATE TABLE IF NOT EXISTS legacy_snapshot (
FILE: migrator/src/test/resources/schema/mysql/mysql-create-schema.sql
type event_journal (line 1) | CREATE TABLE IF NOT EXISTS event_journal (
type event_journal_ordering_idx (line 17) | CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering)
type event_tag (line 19) | CREATE TABLE IF NOT EXISTS event_tag (
type snapshot (line 30) | CREATE TABLE IF NOT EXISTS snapshot (
FILE: migrator/src/test/resources/schema/oracle/oracle-create-schema-legacy.sql
type "journal" (line 15) | CREATE UNIQUE INDEX "journal_ordering_idx" ON "journal"("ordering")
FILE: migrator/src/test/resources/schema/postgres/postgres-create-schema-legacy.sql
type public (line 1) | CREATE TABLE IF NOT EXISTS public.journal (
type journal_ordering_idx (line 10) | CREATE UNIQUE INDEX IF NOT EXISTS journal_ordering_idx ON public.journal...
type public (line 12) | CREATE TABLE IF NOT EXISTS public.legacy_snapshot (
type public (line 20) | CREATE TABLE IF NOT EXISTS public.durable_state (
type state_tag_idx (line 31) | CREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag)
type state_global_offset_idx (line 32) | CREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_stat...
FILE: migrator/src/test/resources/schema/postgres/postgres-create-schema.sql
type public (line 1) | CREATE TABLE IF NOT EXISTS public.event_journal(
type event_journal_ordering_idx (line 22) | CREATE UNIQUE INDEX event_journal_ordering_idx ON public.event_journal(o...
type public (line 24) | CREATE TABLE IF NOT EXISTS public.event_tag(
type public (line 36) | CREATE TABLE IF NOT EXISTS public.snapshot (
type public (line 52) | CREATE TABLE IF NOT EXISTS public.durable_state (
type state_tag_idx (line 63) | CREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag)
type state_global_offset_idx (line 64) | CREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_stat...
FILE: migrator/src/test/resources/schema/sqlserver/sqlserver-create-schema-legacy.sql
type journal_ordering_idx (line 12) | CREATE UNIQUE INDEX journal_ordering_idx ON journal (ordering)
type legacy_snapshot (line 17) | CREATE TABLE legacy_snapshot (
FILE: migrator/src/test/resources/schema/sqlserver/sqlserver-create-schema.sql
type event_journal (line 1) | CREATE TABLE event_journal(
type event_journal_ordering_idx (line 18) | CREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering)
type event_tag (line 20) | CREATE TABLE event_tag (
type "snapshot" (line 32) | CREATE TABLE "snapshot" (
Condensed preview — 271 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (870K chars).
[
{
"path": ".fossa.yml",
"chars": 165,
"preview": "version: 3\n\n# https://github.com/fossas/fossa-cli/blob/master/docs/references/files/fossa-yml.md\n\npaths:\n exclude:\n "
},
{
"path": ".github/workflows/checks.yml",
"chars": 3459,
"preview": "name: Basic checks\n\non:\n pull_request:\n push:\n branches:\n - main\n tags-ignore: [ v.* ]\n\npermissions:\n cont"
},
{
"path": ".github/workflows/fossa.yml",
"chars": 1258,
"preview": "name: Dependency License Scanning\n\non:\n workflow_dispatch:\n schedule:\n - cron: '0 0 * * 0' # At 00:00 on Sunday\n\npe"
},
{
"path": ".github/workflows/link-validator.yml",
"chars": 1269,
"preview": "name: Link Validator\n\non:\n workflow_dispatch:\n pull_request:\n schedule:\n - cron: '40 6 1 * *'\n\npermissions:\n con"
},
{
"path": ".github/workflows/release.yml",
"chars": 2672,
"preview": "name: Release\n\non:\n push:\n branches:\n - main\n tags: [\"v*\"]\n\npermissions:\n contents: read\n\njobs:\n release:\n"
},
{
"path": ".github/workflows/test.yml",
"chars": 2694,
"preview": "name: Integration Tests\n\non:\n pull_request:\n push:\n branches:\n - main\n tags-ignore: [ v.* ]\n\npermissions:\n "
},
{
"path": ".github/workflows/weekly.yml",
"chars": 2420,
"preview": "name: Weekly Integration Tests\n\non:\n schedule:\n - cron: \"0 0 * * 1\"\n workflow_dispatch:\n\npermissions:\n contents: r"
},
{
"path": ".gitignore",
"chars": 142,
"preview": "/RUNNING_PID\nlogs\ntarget\n.idea\n*.iml\n*.iws\n.settings\n.classpath\n.project\n.worksheet\n.bsp\n*.code-workspace\n.bloop\n.metals"
},
{
"path": ".sbtopts",
"chars": 50,
"preview": "-J-Xms512M\n-J-Xmx4096M\n-J-XX:MaxGCPauseMillis=200\n"
},
{
"path": ".scala-steward.conf",
"chars": 349,
"preview": "pullRequests.frequency = \"@monthly\"\n\nupdates.ignore = [\n { groupId = \"org.scalameta\", artifactId = \"scalafmt-core\" }\n "
},
{
"path": ".scalafmt.conf",
"chars": 1091,
"preview": "version = 3.0.8\n\nstyle = defaultWithAlign\n\ndocstrings.style = Asterisk\ndocstrings.wrap = no\nindentO"
},
{
"path": "CONTRIBUTING.md",
"chars": 6349,
"preview": "# Contributing to Akka Persistence JDBC\n\n## General Workflow\n\nThis is the process for committing code into master.\n\n1. M"
},
{
"path": "LICENSE",
"chars": 4741,
"preview": "Business Source License 1.1\n\nParameters\n\nLicensor: Lightbend, Inc.\nLicensed Work: Akka Persistence JD"
},
{
"path": "README.md",
"chars": 3176,
"preview": "Akka\n====\n*Akka is a powerful platform that simplifies building and operating highly responsive, resilient, and scalable"
},
{
"path": "RELEASING.md",
"chars": 1273,
"preview": "## Releasing\n\nUse this command to create a release issue of [Release Train Issue Template](docs/release-train-issue-temp"
},
{
"path": "build.sbt",
"chars": 5700,
"preview": "import com.lightbend.paradox.apidoc.ApidocPlugin.autoImport.apidocRootPackage\nimport com.geirsson.CiReleasePlugin\n\nlazy "
},
{
"path": "core/src/main/mima-filters/3.5.3.backwards.excludes/issue-322-messagesWithBatch.excludes",
"chars": 1066,
"preview": "# #322 Adding messagesWithBatch to Dao traits\nProblemFilters.exclude[InheritedNewAbstractMethodProblem](\"akka.persistenc"
},
{
"path": "core/src/main/mima-filters/3.5.3.backwards.excludes/issue-91-ordering-offset.excludes",
"chars": 888,
"preview": "# #91 changing signature of messages and messagesWithBatch in JournalDaoWithReadMessages\n# tuple (PersistentRepr, Lo"
},
{
"path": "core/src/main/mima-filters/4.x.x.backwards.excludes/pr-401-highest-seq-nr.excludes",
"chars": 347,
"preview": "# https://github.com/akka/akka-persistence-jdbc/pull/401/files\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akk"
},
{
"path": "core/src/main/mima-filters/5.0.1.backwards.excludes/pr-570-akka-serialization.excludes",
"chars": 697,
"preview": "# https://github.com/akka/akka-persistence-jdbc/pull/570/files\n# The problem comes from an earlier PR where the class ak"
},
{
"path": "core/src/main/mima-filters/5.0.2.backwards.excludes/issue-585-performance-regression.excludes",
"chars": 1187,
"preview": "# internals\nProblemFilters.exclude[IncompatibleTemplateDefProblem](\"akka.persistence.jdbc.journal.dao.BaseDao\")\nProblemF"
},
{
"path": "core/src/main/mima-filters/5.1.0.backwards.excludes/issue-557-logical-delete.excludes",
"chars": 539,
"preview": "ProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.config.BaseDaoConfig.logicalDelete\")\nProblemFi"
},
{
"path": "core/src/main/mima-filters/5.4.0.backwards.excludes/issue-710-tag-fk.excludes",
"chars": 1060,
"preview": "ProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.journal.dao.JournalTables#EventTags.eventId\""
},
{
"path": "core/src/main/mima-filters/5.4.0.backwards.excludes/issue-775-slick-3.50.excludes",
"chars": 4014,
"preview": "ProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.db.EagerSlickDatabase.apply\")\nProblemFilters."
},
{
"path": "core/src/main/mima-filters/5.5.0.backwards.excludes/issue-891-durable-store.excludes",
"chars": 258,
"preview": "# internal api changes\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.state.JdbcDurableStateS"
},
{
"path": "core/src/main/mima-filters/5.5.2.backwards.excludes/pr-928-cleanup-tool.excludes",
"chars": 263,
"preview": "# internal api changes\nProblemFilters.exclude[ReversedMissingMethodProblem](\"akka.persistence.jdbc.journal.dao.JournalDa"
},
{
"path": "core/src/main/resources/reference.conf",
"chars": 31949,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\nakka-persistence-"
},
{
"path": "core/src/main/resources/schema/h2/h2-create-schema-legacy.sql",
"chars": 1174,
"preview": "CREATE TABLE IF NOT EXISTS PUBLIC.\"journal\" (\n \"ordering\" BIGINT AUTO_INCREMENT,\n \"persistence_id\" VARCHAR(255) NOT NU"
},
{
"path": "core/src/main/resources/schema/h2/h2-create-schema.sql",
"chars": 2120,
"preview": "CREATE TABLE IF NOT EXISTS \"event_journal\" (\n \"ordering\" BIGINT UNIQUE NOT NULL AUTO_INCREMENT,\n \"deleted\" BOOLEAN"
},
{
"path": "core/src/main/resources/schema/h2/h2-drop-schema-legacy.sql",
"chars": 124,
"preview": "DROP TABLE IF EXISTS PUBLIC.\"journal\";\nDROP TABLE IF EXISTS PUBLIC.\"snapshot\";\nDROP TABLE IF EXISTS PUBLIC.\"durable_stat"
},
{
"path": "core/src/main/resources/schema/h2/h2-drop-schema.sql",
"chars": 223,
"preview": "DROP TABLE IF EXISTS PUBLIC.\"event_tag\";\nDROP TABLE IF EXISTS PUBLIC.\"event_journal\";\nDROP TABLE IF EXISTS PUBLIC.\"snaps"
},
{
"path": "core/src/main/resources/schema/mysql/mysql-create-schema-legacy.sql",
"chars": 563,
"preview": "CREATE TABLE IF NOT EXISTS journal (\n ordering SERIAL,\n persistence_id VARCHAR(255) NOT NULL,\n sequence_number BIGINT"
},
{
"path": "core/src/main/resources/schema/mysql/mysql-create-schema.sql",
"chars": 1344,
"preview": "CREATE TABLE IF NOT EXISTS event_journal (\n ordering SERIAL,\n deleted BOOLEAN DEFAULT false NOT NULL,\n persiste"
},
{
"path": "core/src/main/resources/schema/mysql/mysql-drop-schema-legacy.sql",
"chars": 61,
"preview": "DROP TABLE IF EXISTS journal;\nDROP TABLE IF EXISTS snapshot;\n"
},
{
"path": "core/src/main/resources/schema/mysql/mysql-drop-schema.sql",
"chars": 99,
"preview": "DROP TABLE IF EXISTS event_tag;\nDROP TABLE IF EXISTS event_journal;\nDROP TABLE IF EXISTS snapshot;\n"
},
{
"path": "core/src/main/resources/schema/mysql/mysql-event-tag-migration.sql",
"chars": 1367,
"preview": "-- **************** first step ****************\n-- add new column\nALTER TABLE event_tag\n ADD persistence_id VARCHAR("
},
{
"path": "core/src/main/resources/schema/oracle/oracle-create-schema-legacy.sql",
"chars": 1237,
"preview": "CREATE SEQUENCE \"ordering_seq\" START WITH 1 INCREMENT BY 1 NOMAXVALUE\n/\n\nCREATE TABLE \"journal\" (\n \"ordering\" NUMERIC,\n"
},
{
"path": "core/src/main/resources/schema/oracle/oracle-create-schema.sql",
"chars": 2079,
"preview": "CREATE SEQUENCE EVENT_JOURNAL__ORDERING_SEQ START WITH 1 INCREMENT BY 1 NOMAXVALUE\n/\n\nCREATE TABLE EVENT_JOURNAL (\n O"
},
{
"path": "core/src/main/resources/schema/oracle/oracle-drop-schema-legacy.sql",
"chars": 388,
"preview": "-- (ddl lock timeout in seconds) this allows tests which are still writing to the db to finish gracefully\nALTER SESSION "
},
{
"path": "core/src/main/resources/schema/oracle/oracle-drop-schema.sql",
"chars": 305,
"preview": "ALTER SESSION SET ddl_lock_timeout = 15\n/\n\nDROP TABLE EVENT_TAG CASCADE CONSTRAINT\n/\n\nDROP TABLE EVENT_JOURNAL CASCADE C"
},
{
"path": "core/src/main/resources/schema/oracle/oracle-event-tag-migration.sql",
"chars": 1593,
"preview": "-- **************** first step ****************\n-- add new column\nALTER TABLE EVENT_TAG\n ADD (PERSISTENCE_ID VARCHAR2"
},
{
"path": "core/src/main/resources/schema/postgres/postgres-create-schema-legacy.sql",
"chars": 1121,
"preview": "CREATE TABLE IF NOT EXISTS public.journal (\n ordering BIGSERIAL,\n persistence_id VARCHAR(255) NOT NULL,\n sequence_num"
},
{
"path": "core/src/main/resources/schema/postgres/postgres-create-schema.sql",
"chars": 1901,
"preview": "CREATE TABLE IF NOT EXISTS public.event_journal (\n ordering BIGSERIAL,\n persistence_id VARCHAR(255) NOT NULL,\n sequen"
},
{
"path": "core/src/main/resources/schema/postgres/postgres-drop-schema-legacy.sql",
"chars": 118,
"preview": "DROP TABLE IF EXISTS public.journal;\nDROP TABLE IF EXISTS public.snapshot;\nDROP TABLE IF EXISTS public.durable_state;\n"
},
{
"path": "core/src/main/resources/schema/postgres/postgres-drop-schema.sql",
"chars": 164,
"preview": "DROP TABLE IF EXISTS public.event_tag;\nDROP TABLE IF EXISTS public.event_journal;\nDROP TABLE IF EXISTS public.snapshot;\n"
},
{
"path": "core/src/main/resources/schema/postgres/postgres-event-tag-migration.sql",
"chars": 1222,
"preview": "-- **************** first step ****************\n-- add new column\nALTER TABLE public.event_tag\n ADD persistence_id V"
},
{
"path": "core/src/main/resources/schema/sqlserver/sqlserver-create-schema-legacy.sql",
"chars": 845,
"preview": "\nIF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'\"journal\"') AND type in (N'U'))\nbegin\nCREATE TA"
},
{
"path": "core/src/main/resources/schema/sqlserver/sqlserver-create-schema-varchar.sql",
"chars": 2058,
"preview": "/*\nAkka Persistence JDBC versions from 5.0.0 through 5.1.0 used this schema. The only difference from the\npost-5.0.4 sc"
},
{
"path": "core/src/main/resources/schema/sqlserver/sqlserver-create-schema.sql",
"chars": 1554,
"preview": "CREATE TABLE event_journal (\n \"ordering\" BIGINT IDENTITY(1,1) NOT NULL,\n \"deleted\" BIT DEFAULT 0 NOT NULL,\n \"pe"
},
{
"path": "core/src/main/resources/schema/sqlserver/sqlserver-drop-schema-legacy.sql",
"chars": 61,
"preview": "DROP TABLE IF EXISTS journal;\nDROP TABLE IF EXISTS snapshot;\n"
},
{
"path": "core/src/main/resources/schema/sqlserver/sqlserver-drop-schema.sql",
"chars": 99,
"preview": "DROP TABLE IF EXISTS event_tag;\nDROP TABLE IF EXISTS event_journal;\nDROP TABLE IF EXISTS snapshot;\n"
},
{
"path": "core/src/main/resources/schema/sqlserver/sqlserver-event-tag-migration.sql",
"chars": 2029,
"preview": "-- **************** first step ****************\n-- add new column\nALTER TABLE event_tag\n ADD persistence_id VARCHAR("
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/AkkaSerialization.scala",
"chars": 2280,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/JournalRow.scala",
"chars": 357,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/cleanup/javadsl/EventSourcedCleanup.scala",
"chars": 2475,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/cleanup/scaladsl/EventSourcedCleanup.scala",
"chars": 3448,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/config/AkkaPersistenceConfig.scala",
"chars": 11640,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/db/SlickDatabase.scala",
"chars": 3202,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/db/SlickExtension.scala",
"chars": 3628,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/JdbcAsyncWriteJournal.scala",
"chars": 5259,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/BaseDao.scala",
"chars": 2096,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/BaseJournalDaoWithReadMessages.scala",
"chars": 2852,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/DefaultJournalDao.scala",
"chars": 5781,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/FlowControl.scala",
"chars": 666,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/H2Compat.scala",
"chars": 581,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDao.scala",
"chars": 1299,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoInstantiation.scala",
"chars": 1434,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoWithReadMessages.scala",
"chars": 1256,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoWithUpdates.scala",
"chars": 671,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalQueries.scala",
"chars": 5102,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalTables.scala",
"chars": 4403,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalDao.scala",
"chars": 5205,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalSerializer.scala",
"chars": 1242,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/JournalQueries.scala",
"chars": 3593,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/JournalTables.scala",
"chars": 1601,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/package.scala",
"chars": 691,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/query/JdbcReadJournalProvider.scala",
"chars": 675,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/query/JournalSequenceActor.scala",
"chars": 8307,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/query/dao/DefaultReadJournalDao.scala",
"chars": 2440,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalDao.scala",
"chars": 1457,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalQueries.scala",
"chars": 2770,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ByteArrayReadJournalDao.scala",
"chars": 6067,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ReadJournalQueries.scala",
"chars": 2273,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/query/javadsl/JdbcReadJournal.scala",
"chars": 5995,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/query/package.scala",
"chars": 588,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/query/scaladsl/JdbcReadJournal.scala",
"chars": 15140,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/serialization/PersistentReprSerializer.scala",
"chars": 2044,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/serialization/SnapshotSerializer.scala",
"chars": 407,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/snapshot/JdbcSnapshotStore.scala",
"chars": 3651,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/DefaultSnapshotDao.scala",
"chars": 5314,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotDao.scala",
"chars": 1287,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotDaoInstantiation.scala",
"chars": 1445,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotQueries.scala",
"chars": 3275,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotTables.scala",
"chars": 3008,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/ByteArraySnapshotDao.scala",
"chars": 3950,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/ByteArraySnapshotSerializer.scala",
"chars": 1195,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/SnapshotQueries.scala",
"chars": 3301,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/SnapshotTables.scala",
"chars": 2229,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/state/DurableStateQueries.scala",
"chars": 5783,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/state/DurableStateTables.scala",
"chars": 2282,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/state/JdbcDurableStateStoreProvider.scala",
"chars": 1962,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/state/OffsetOps.scala",
"chars": 592,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/state/SequenceNextValUpdater.scala",
"chars": 1850,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/state/javadsl/JdbcDurableStateStore.scala",
"chars": 2517,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/state/scaladsl/DurableStateSequenceActor.scala",
"chars": 14387,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/state/scaladsl/JdbcDurableStateStore.scala",
"chars": 8525,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/testkit/internal/SchemaType.scala",
"chars": 752,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/testkit/internal/SchemaUtilsImpl.scala",
"chars": 6180,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/testkit/javadsl/SchemaUtils.scala",
"chars": 6766,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/testkit/scaladsl/SchemaUtils.scala",
"chars": 6626,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/util/BlockingOps.scala",
"chars": 616,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/util/ByteArrayOps.scala",
"chars": 499,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/util/ConfigOps.scala",
"chars": 742,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/util/InputStreamOps.scala",
"chars": 948,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/util/PluginVersionChecker.scala",
"chars": 761,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/util/StringOps.scala",
"chars": 363,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/main/scala/akka/persistence/jdbc/util/TrySeq.scala",
"chars": 731,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/LICENSE",
"chars": 23557,
"preview": "LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT\r\n\r\nTHIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS \"AGREEMEN"
},
{
"path": "core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java",
"chars": 3268,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java",
"chars": 5507,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/resources/general.conf",
"chars": 2117,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// This file cont"
},
{
"path": "core/src/test/resources/h2-application.conf",
"chars": 1326,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf i"
},
{
"path": "core/src/test/resources/h2-default-mode-application.conf",
"chars": 1018,
"preview": "# Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n\n// general.conf is included only for shared settings used "
},
{
"path": "core/src/test/resources/h2-shared-db-application.conf",
"chars": 1172,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general."
},
{
"path": "core/src/test/resources/h2-two-read-journals-application.conf",
"chars": 308,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"h2-appli"
},
{
"path": "core/src/test/resources/jndi-application.conf",
"chars": 986,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf i"
},
{
"path": "core/src/test/resources/jndi-shared-db-application.conf",
"chars": 981,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general."
},
{
"path": "core/src/test/resources/logback-test.xml",
"chars": 1908,
"preview": "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<configuration debug=\"false\">\n\n <appender name=\"console\" class=\"ch.qos.logbac"
},
{
"path": "core/src/test/resources/mysql-application.conf",
"chars": 1689,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf i"
},
{
"path": "core/src/test/resources/mysql-shared-db-application.conf",
"chars": 1768,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general."
},
{
"path": "core/src/test/resources/oracle-application.conf",
"chars": 1255,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf i"
},
{
"path": "core/src/test/resources/oracle-schema-overrides.conf",
"chars": 1769,
"preview": "# Oracle does not support returning a column with a case senstive name so all the column names and table names need\n# to"
},
{
"path": "core/src/test/resources/oracle-shared-db-application.conf",
"chars": 1327,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general."
},
{
"path": "core/src/test/resources/postgres-application.conf",
"chars": 1351,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf i"
},
{
"path": "core/src/test/resources/postgres-shared-db-application.conf",
"chars": 1495,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general."
},
{
"path": "core/src/test/resources/sqlserver-application.conf",
"chars": 1364,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general."
},
{
"path": "core/src/test/resources/sqlserver-shared-db-application.conf",
"chars": 1348,
"preview": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general."
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala",
"chars": 3007,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/SharedActorSystemTestSpec.scala",
"chars": 1732,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/SimpleSpec.scala",
"chars": 1042,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/SingleActorSystemPerTestSpec.scala",
"chars": 2929,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/TablesTestSpec.scala",
"chars": 14798,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/cleanup/scaladsl/EventSourcedCleanupTest.scala",
"chars": 1633,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/configuration/AkkaPersistenceConfigTest.scala",
"chars": 21147,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/configuration/ConfigOpsTest.scala",
"chars": 959,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/configuration/JNDIConfigTest.scala",
"chars": 1979,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalPerfSpec.scala",
"chars": 3696,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalSpec.scala",
"chars": 1872,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/journal/dao/ByteArrayJournalSerializerTest.scala",
"chars": 1550,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/journal/dao/JournalTablesTest.scala",
"chars": 1637,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/journal/dao/TagsSerializationTest.scala",
"chars": 1277,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/journal/dao/TrySeqTest.scala",
"chars": 1755,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/AllPersistenceIdsTest.scala",
"chars": 1630,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/CurrentEventsByPersistenceIdTest.scala",
"chars": 7518,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/CurrentEventsByTagTest.scala",
"chars": 8276,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/CurrentPersistenceIdsTest.scala",
"chars": 1240,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/EventAdapterTest.scala",
"chars": 7223,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/EventsByPersistenceIdTest.scala",
"chars": 11104,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/EventsByTagMigrationTest.scala",
"chars": 10636,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/EventsByTagTest.scala",
"chars": 16554,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/EventsByUnfrequentTagTest.scala",
"chars": 2692,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/HardDeleteQueryTest.scala",
"chars": 3881,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/JournalDaoStreamMessagesMemoryTest.scala",
"chars": 5526,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/JournalSequenceActorTest.scala",
"chars": 11622,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/MultipleReadJournalTest.scala",
"chars": 1144,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/QueryTestSpec.scala",
"chars": 16032,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/TaggingEventAdapter.scala",
"chars": 749,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/dao/ReadJournalTablesTest.scala",
"chars": 1612,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/query/dao/TestProbeReadJournalDao.scala",
"chars": 2529,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/serialization/StoreOnlySerializableMessagesTest.scala",
"chars": 4662,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/snapshot/JdbcSnapshotStoreSpec.scala",
"chars": 1716,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/snapshot/dao/legacy/SnapshotTablesTest.scala",
"chars": 1678,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/state/Payloads.scala",
"chars": 932,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala",
"chars": 4651,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/DataGenerationHelper.scala",
"chars": 2518,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/DurableStateSequenceActorTest.scala",
"chars": 17226,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/DurableStateStorePluginSpec.scala",
"chars": 2066,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/JdbcDurableStateSpec.scala",
"chars": 15284,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/StateSpecBase.scala",
"chars": 4241,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/TestProbeDurableStateStoreQuery.scala",
"chars": 2065,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/util/ClasspathResources.scala",
"chars": 659,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "core/src/test/scala/akka/persistence/jdbc/util/DropCreate.scala",
"chars": 1457,
"preview": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. "
},
{
"path": "doc/deadlock.md",
"chars": 5003,
"preview": "\n\n# Slick Scheduling Algorithm\nFor the [new scheduling algorithm in #1461](https://github.com/slick/slick/pull/1461) to "
},
{
"path": "docs/LICENSE",
"chars": 23557,
"preview": "LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT\r\n\r\nTHIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS \"AGREEMEN"
},
{
"path": "docs/release-train-issue-template.md",
"chars": 3866,
"preview": "Release Akka Persistence JDBC $VERSION$\n\n<!--\n# Release Train Issue Template for Akka Persistence JDBC\n\n(Liberally copie"
},
{
"path": "docs/src/main/paradox/_template/projectSpecificFooter.st",
"chars": 272,
"preview": "<script type=\"text/javascript\" src=\"$page.base$assets/js/warnOldVersion.js\"></script>\n<script type=\"text/javascript\">//<"
},
{
"path": "docs/src/main/paradox/assets/js/warnOldVersion.js",
"chars": 1085,
"preview": "function initOldVersionWarnings($, thisVersion, projectUrl) {\n if (projectUrl && projectUrl !== \"\") {\n var sch"
},
{
"path": "docs/src/main/paradox/configuration.md",
"chars": 6250,
"preview": "# Configuration\n\nThe plugin relies on @extref[Slick](slick:) to do create the SQL dialect for the database in use, there"
},
{
"path": "docs/src/main/paradox/custom-dao.md",
"chars": 2999,
"preview": "# Custom DAO Implementation\n\nThe plugin supports loading a custom DAO for the journal and snapshot. You should implement"
},
{
"path": "docs/src/main/paradox/durable-state-store.md",
"chars": 2887,
"preview": "# DurableStateStore\n## How to get the DurableStateStore\n\nThe `DurableStateStore` for JDBC plugin is obtained through the"
},
{
"path": "docs/src/main/paradox/index.md",
"chars": 484,
"preview": "# Akka Persistence JDBC\n\nThe Akka Persistence JDBC plugin allows for using JDBC-compliant databases as backend for @extr"
},
{
"path": "docs/src/main/paradox/migration.md",
"chars": 3485,
"preview": "# Migration\n\n## Migrating to version 5.4.0\n\nRelease `5.4.0` change the schema of `event_tag` table.\n\nThe previous versio"
},
{
"path": "docs/src/main/paradox/overview.md",
"chars": 3738,
"preview": "# Overview\n\nThe Akka Persistence JDBC plugin allows for using JDBC-compliant databases as backend for @extref:[Akka Pers"
},
{
"path": "docs/src/main/paradox/query.md",
"chars": 3292,
"preview": "# Persistence Query\n\n## How to get the ReadJournal\n\nThe `ReadJournal` is retrieved via the `akka.persistence.query.Persi"
},
{
"path": "docs/src/main/paradox/snapshots.md",
"chars": 1089,
"preview": "---\nproject.description: Snapshot builds via the Sonatype snapshot repository.\n---\n# Snapshots\n\nSnapshots are published "
},
{
"path": "integration/LICENSE",
"chars": 23557,
"preview": "LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT\r\n\r\nTHIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS \"AGREEMEN"
},
{
"path": "integration/src/test/scala/akka/persistence/jdbc/integration/AllPersistenceIdsTest.scala",
"chars": 662,
"preview": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n AllPersistenceIdsTest,\n MysqlCleaner,"
},
{
"path": "integration/src/test/scala/akka/persistence/jdbc/integration/CurrentEventsByPersistenceIdTest.scala",
"chars": 946,
"preview": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n CurrentEventsByPersistenceIdTest,\n My"
},
{
"path": "integration/src/test/scala/akka/persistence/jdbc/integration/CurrentEventsByTagTest.scala",
"chars": 856,
"preview": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n CurrentEventsByTagTest,\n MysqlCleaner"
},
{
"path": "integration/src/test/scala/akka/persistence/jdbc/integration/CurrentPersistenceIdsTest.scala",
"chars": 859,
"preview": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n CurrentPersistenceIdsTest,\n MysqlClea"
}
]
// ... and 71 more files (download for full content)
About this extraction
This page contains the full source code of the dnvriend/akka-persistence-jdbc GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 271 files (800.0 KB), approximately 202.4k tokens, and a symbol index with 100 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.