[
  {
    "path": ".fossa.yml",
    "content": "version: 3\n\n# https://github.com/fossas/fossa-cli/blob/master/docs/references/files/fossa-yml.md\n\npaths:\n  exclude:\n    - ./integration\n    - ./migrator-integration\n"
  },
  {
    "path": ".github/workflows/checks.yml",
    "content": "name: Basic checks\n\non:\n  pull_request:\n  push:\n    branches:\n      - main\n    tags-ignore: [ v.* ]\n\npermissions:\n  contents: read\n\njobs:\n  check-code-style:\n    name: Check Code Style\n    runs-on: Akka-Default\n    steps:\n      - name: Checkout\n        # https://github.com/actions/checkout/releases\n        uses: actions/checkout@v6\n        with:\n          # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves\n          fetch-depth: 0\n\n      - name: Checkout GitHub merge\n        if: github.event.pull_request\n        run: |-\n          git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch\n          git checkout scratch\n\n      - name: Cache Coursier cache\n        # https://github.com/coursier/cache-action/releases\n        uses: coursier/cache-action@v8.1.0\n\n      - name: Set up JDK 11\n        # https://github.com/coursier/setup-action/releases\n        uses: coursier/setup-action@v3.0.0\n        with:\n          jvm: temurin:1.11.0\n\n      - name: Run akka/github-actions-scripts\n        uses: akka/github-actions-scripts/setup_global_resolver@main\n\n      - name: Code style check and binary-compatibility check\n        # Run locally with: sbt 'verifyCodeStyle ; mimaReportBinaryIssues'\n        run: sbt \"; verifyCodeStyle; mimaReportBinaryIssues\"\n\n  check-code-compilation:\n    name: Check Code Compilation\n    runs-on: Akka-Default\n    steps:\n      - name: Checkout\n        # https://github.com/actions/checkout/releases\n        uses: actions/checkout@v6\n        with:\n          fetch-depth: 0\n\n      - name: Checkout GitHub merge\n        if: github.event.pull_request\n        run: |-\n          git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch\n          git checkout scratch\n\n      - name: Cache Coursier cache\n        # https://github.com/coursier/cache-action/releases\n        uses: coursier/cache-action@v8.1.0\n\n      - name: Set up JDK 11\n        # https://github.com/coursier/setup-action/releases\n        uses: coursier/setup-action@v3.0.0\n        with:\n          jvm: temurin:1.11.0\n\n      - name: Run akka/github-actions-scripts\n        uses: akka/github-actions-scripts/setup_global_resolver@main\n\n      - name: Compile all code with fatal warnings for Java 11 and Scala 2.13\n        # Run locally with: sbt 'clean ; +Test/compile ; +It/compile'\n        run: sbt \"; Test/compile\"\n\n      - name: Compile all code with Scala 3.3\n        run: sbt \"++3.3; Test/compile\"\n\n  check-docs:\n    name: Check Docs\n    runs-on: Akka-Default\n    steps:\n      - name: Checkout\n        # https://github.com/actions/checkout/releases\n        uses: actions/checkout@v6\n        with:\n          fetch-depth: 0\n\n      - name: Checkout GitHub merge\n        if: github.event.pull_request\n        run: |-\n          git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch\n          git checkout scratch\n\n      - name: Cache Coursier cache\n        # https://github.com/coursier/cache-action/releases\n        uses: coursier/cache-action@v8.1.0\n\n      - name: Set up JDK 11\n        # https://github.com/coursier/setup-action/releases\n        uses: coursier/setup-action@v3.0.0\n        with:\n          jvm: temurin:1.11.0\n\n      - name: Run akka/github-actions-scripts\n        uses: akka/github-actions-scripts/setup_global_resolver@main\n\n      - name: Create all API docs for artifacts/website and all reference docs\n        run: sbt docs/paradox\n"
  },
  {
    "path": ".github/workflows/fossa.yml",
    "content": "name: Dependency License Scanning\n\non:\n  workflow_dispatch:\n  schedule:\n    - cron: '0 0 * * 0' # At 00:00 on Sunday\n\npermissions:\n  contents: read\n\njobs:\n  fossa:\n    name: Fossa\n    runs-on: Akka-Default\n    if: github.repository == 'akka/akka-persistence-jdbc'\n    steps:\n      - name: Checkout\n        # https://github.com/actions/checkout/releases\n        uses: actions/checkout@v6\n        with:\n          # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves\n          fetch-depth: 0\n\n      - name: Cache Coursier cache\n        # https://github.com/coursier/cache-action/releases\n        uses: coursier/cache-action@v8.1.0\n\n      - name: Set up JDK 11\n        # https://github.com/coursier/setup-action/releases\n        uses: coursier/setup-action@v3.0.0\n        with:\n          jvm: temurin:1.11.0\n\n      - name: Run akka/github-actions-scripts\n        uses: akka/github-actions-scripts/setup_global_resolver@main\n\n      - name: FOSSA policy check\n        run: |-\n          curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash\n          fossa analyze && fossa test\n        env:\n          FOSSA_API_KEY: \"${{secrets.FOSSA_API_KEY}}\"\n"
  },
  {
    "path": ".github/workflows/link-validator.yml",
    "content": "name: Link Validator\n\non:\n  workflow_dispatch:\n  pull_request:\n  schedule:\n    - cron:  '40 6 1 * *'\n\npermissions:\n  contents: read\n\njobs:\n  validate-links:\n    runs-on: Akka-Default\n    if: github.repository == 'akka/akka-persistence-jdbc'\n    steps:\n      - name: Checkout\n        # https://github.com/actions/checkout/releases\n        uses: actions/checkout@v6\n        with:\n          # See https://github.com/actions/checkout/issues/299#issuecomment-677674415\n          ref: ${{ github.event.pull_request.head.sha }}\n          fetch-depth: 100\n\n      - name: Fetch tags\n        run: git fetch --depth=100 origin +refs/tags/*:refs/tags/*\n\n      - name: Cache Coursier cache\n        # https://github.com/coursier/cache-action/releases\n        uses: coursier/cache-action@v8.1.0\n\n      - name: Set up JDK 25\n        # https://github.com/coursier/setup-action/releases\n        uses: coursier/setup-action@v3.0.0\n        with:\n          jvm: temurin:1.25\n          apps: cs\n\n      - name: Run akka/github-actions-scripts\n        uses: akka/github-actions-scripts/setup_global_resolver@main\n\n      - name: sbt site\n        run: sbt docs/makeSite\n\n      - name: Run Link Validator\n        run: cs launch net.runne::site-link-validator:0.2.3 -- scripts/link-validator.conf\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: Release\n\non:\n  push:\n    branches:\n      - main\n    tags: [\"v*\"]\n\npermissions:\n  contents: read\n\njobs:\n  release:\n    # runs on main repo only\n    if: github.event.repository.fork == false\n    name: Release\n    # the release environment provides access to secrets required in the release process\n    # https://github.com/akka/akka-persistence-jdbc/settings/environments/164872635/edit\n    environment: release\n    runs-on: Akka-Default\n    steps:\n      - name: Checkout\n        # https://github.com/actions/checkout/releases\n        uses: actions/checkout@v6\n        with:\n          # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves\n          fetch-depth: 0\n\n      - name: Checkout GitHub merge\n        if: github.event.pull_request\n        run: |-\n          git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch\n          git checkout scratch\n\n      - name: Cache Coursier cache\n        # https://github.com/coursier/cache-action/releases\n        uses: coursier/cache-action@v8.1.0\n\n      - name: Set up JDK 11\n        # https://github.com/coursier/setup-action/releases\n        uses: coursier/setup-action@v3.0.0\n        with:\n          jvm: temurin:1.11.0.17\n\n      - name: Run akka/github-actions-scripts\n        uses: akka/github-actions-scripts/setup_global_resolver@main\n\n      - name: Publish artifacts for all Scala versions\n        env:\n          PGP_SECRET: ${{ secrets.PGP_SECRET }}\n          PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }}\n          PUBLISH_USER: ${{ secrets.PUBLISH_USER }}\n          PUBLISH_PASSWORD: ${{ secrets.PUBLISH_PASSWORD }}\n        run: sbt +publishSigned\n\n  documentation:\n    name: Documentation\n    runs-on: Akka-Default\n    if: github.event.repository.fork == false\n    steps:\n      - name: Checkout\n        # https://github.com/actions/checkout/releases\n        uses: actions/checkout@v6\n        with:\n          # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves\n          fetch-depth: 0\n\n      - name: Set up JDK 25\n        # https://github.com/coursier/setup-action/releases\n        uses: coursier/setup-action@v3.0.0\n        with:\n          jvm: temurin:1.25\n\n      - name: Run akka/github-actions-scripts\n        uses: akka/github-actions-scripts/setup_global_resolver@main\n\n      - name: Publish\n        run: |-\n          eval \"$(ssh-agent -s)\"\n          echo $AKKA_RSYNC_GUSTAV | base64 -d > .github/id_rsa\n          chmod 600 .github/id_rsa\n          ssh-add .github/id_rsa\n          sbt publishRsync\n        env:\n          AKKA_RSYNC_GUSTAV: ${{ secrets.AKKA_RSYNC_GUSTAV }}\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "name: Integration Tests\n\non:\n  pull_request:\n  push:\n    branches:\n      - main\n    tags-ignore: [ v.* ]\n\npermissions:\n  contents: read\n\njobs:\n  integration-test:\n    runs-on: Akka-Default\n    strategy:\n      fail-fast: false\n      matrix:\n        db:\n        - name: \"H2\"\n          test: \"test\"\n        - name: \"MySQL\"\n          test: '\"integration/testOnly akka.persistence.jdbc.integration.MySQL*\"'\n          script: 'launch-mysql.sh'\n          hasOldDao: true\n        - name: \"Oracle\"\n          test: '\"integration/testOnly akka.persistence.jdbc.integration.Oracle*\"'\n          script: 'launch-oracle.sh'\n          hasOldDao: true\n        - name: \"Postgres\"\n          test: '\"integration/testOnly akka.persistence.jdbc.integration.Postgres*\"'\n          script: 'launch-postgres.sh'\n          hasOldDao: true\n\n        - name: \"SqlServer\"\n          test: '\"integration/testOnly akka.persistence.jdbc.integration.SqlServer*\"'\n          script: 'launch-sqlserver.sh'\n          hasOldDao: true\n\n    name: Integration Test ${{ matrix.db.name }}\n\n    steps:\n      - name: Checkout\n        # https://github.com/actions/checkout/releases\n        uses: actions/checkout@v6\n        with:\n          fetch-depth: 0\n\n      - name: Checkout GitHub merge\n        if: github.event.pull_request\n        run: |-\n          git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch\n          git checkout scratch\n\n      - name: Cache Coursier cache\n        # https://github.com/coursier/cache-action/releases\n        uses: coursier/cache-action@v8.1.0\n\n      - name: Set up JDK 11\n        # https://github.com/coursier/setup-action/releases\n        uses: coursier/setup-action@v3.0.0\n        with:\n          jvm: temurin:1.11.0\n\n      - name: Run akka/github-actions-scripts\n        uses: akka/github-actions-scripts/setup_global_resolver@main\n\n      - name: Start DB in docker container\n        if: ${{ matrix.db.script }}\n        run: |-\n          ./scripts/${{ matrix.db.script }}\n\n      - name: Run Integration tests for  ${{ matrix.db.name }}\n        run: sbt ${{ matrix.db.test }} ${{ matrix.old-dao.extraOpts }}\n\n      - name: Run Integration tests for  ${{ matrix.db.name }} (old dao)\n        if: ${{ matrix.db.hasOldDao }}\n        run: sbt ${{ matrix.db.test }} ${{ matrix.old-dao.extraOpts }} -Djdbc-journal.dao=akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao -Djdbc-snapshot-store.dao=akka.persistence.jdbc.snapshot.dao.legacy.ByteArraySnapshotDao -Djdbc-read-journal.dao=akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao\n\n\n      - name: Print logs on failure\n        if: ${{ failure() }}\n        run: find . -name \"*.log\" -exec ./scripts/cat-log.sh {} \\;\n"
  },
  {
    "path": ".github/workflows/weekly.yml",
    "content": "name: Weekly Integration Tests\n\non:\n  schedule:\n    - cron: \"0 0 * * 1\"\n  workflow_dispatch:\n\npermissions:\n  contents: read\n\njobs:\n  integration-test:\n    name: Weekly Integration Test ${{ matrix.db.name }}, ${{ matrix.db.jdk }}\n    runs-on: Akka-Default\n    strategy:\n      fail-fast: false\n      matrix:\n        db:\n          - name: \"H2\"\n            test: \"test\"\n            jdk: 'temurin:1.21'\n          - name: \"H2\"\n            test: \"test\"\n            jdk: 'temurin:1.25'\n          - name: \"MySQL\"\n            test: '\"integration/testOnly akka.persistence.jdbc.integration.MySQL*\"'\n            script: 'launch-mysql.sh'\n            jdk: 'temurin:1.21'\n          - name: \"Oracle\"\n            test: '\"integration/testOnly akka.persistence.jdbc.integration.Oracle*\"'\n            script: 'launch-oracle.sh'\n            jdk: 'temurin:1.21'\n          - name: \"Postgres\"\n            test: '\"integration/testOnly akka.persistence.jdbc.integration.Postgres*\"'\n            script: 'launch-postgres.sh'\n            jdk: 'temurin:1.21'\n          - name: \"SqlServer\"\n            test: '\"integration/testOnly akka.persistence.jdbc.integration.SqlServer*\"'\n            script: 'launch-sqlserver.sh'\n            jdk: 'temurin:1.21'\n\n    steps:\n      - name: Checkout\n        # https://github.com/actions/checkout/releases\n        uses: actions/checkout@v6\n        with:\n          fetch-depth: 0\n\n      - name: Checkout GitHub merge\n        if: github.event.pull_request\n        run: |-\n          git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch\n          git checkout scratch\n\n      - name: Cache Coursier cache\n        # https://github.com/coursier/cache-action/releases\n        uses: coursier/cache-action@v8.1.0\n\n      - name: Set up ${{ matrix.db.jdk }}\n        # https://github.com/coursier/setup-action/releases\n        uses: coursier/setup-action@v3.0.0\n        with:\n          jvm: ${{ matrix.db.jdk }}\n\n      - name: Run akka/github-actions-scripts\n        uses: akka/github-actions-scripts/setup_global_resolver@main\n\n      - name: Start DB in docker container\n        if: ${{ matrix.db.script }}\n        run: |-\n          ./scripts/${{ matrix.db.script }}\n\n      - name: Run Integration tests for  ${{ matrix.db.name }}\n        run: sbt ${{ matrix.db.test }}\n\n      - name: Print logs on failure\n        if: ${{ failure() }}\n        run: find . -name \"*.log\" -exec ./scripts/cat-log.sh {} \\;\n"
  },
  {
    "path": ".gitignore",
    "content": "/RUNNING_PID\nlogs\ntarget\n.idea\n*.iml\n*.iws\n.settings\n.classpath\n.project\n.worksheet\n.bsp\n*.code-workspace\n.bloop\n.metals\nmetals.sbt\n.DS_Store\n"
  },
  {
    "path": ".sbtopts",
    "content": "-J-Xms512M\n-J-Xmx4096M\n-J-XX:MaxGCPauseMillis=200\n"
  },
  {
    "path": ".scala-steward.conf",
    "content": "pullRequests.frequency = \"@monthly\"\n\nupdates.ignore = [\n  { groupId = \"org.scalameta\", artifactId = \"scalafmt-core\" }\n  { groupId = \"org.scalameta\", artifactId = \"sbt-scalafmt\" }\n  // explicit updates\n  { groupId = \"com.typesafe.akka\" }\n]\n\ncommits.message = \"bump: ${artifactName} ${nextVersion} (was ${currentVersion})\"\n\nupdatePullRequests = never\n"
  },
  {
    "path": ".scalafmt.conf",
    "content": "version = 3.0.8\n\nstyle = defaultWithAlign\n\ndocstrings.style           = Asterisk\ndocstrings.wrap            = no\nindentOperator.preset      = spray\nmaxColumn                  = 120\nrewrite.rules              = [RedundantParens, SortImports, AvoidInfix]\nunindentTopLevelOperators  = true\nalign.tokens               = [{code = \"=>\", owner = \"Case\"}]\nalign.openParenDefnSite    = false\nalign.openParenCallSite    = false\noptIn.breakChainOnFirstMethodDot = false\noptIn.configStyleArguments = false\ndanglingParentheses.defnSite = false\ndanglingParentheses.callSite = false\nspaces.inImportCurlyBraces = true\nrewrite.neverInfix.excludeFilters = [\n  and\n  min\n  max\n  until\n  to\n  by\n  eq\n  ne\n  \"should.*\"\n  \"contain.*\"\n  \"must.*\"\n  in\n  ignore\n  be\n  taggedAs\n  thrownBy\n  synchronized\n  have\n  when\n  size\n  only\n  noneOf\n  oneElementOf\n  noElementsOf\n  atLeastOneElementOf\n  atMostOneElementOf\n  allElementsOf\n  inOrderElementsOf\n  theSameElementsAs\n]\nrewriteTokens = {\n  \"⇒\": \"=>\"\n  \"→\": \"->\"\n  \"←\": \"<-\"\n}\nnewlines.afterCurlyLambda = preserve\nnewlines.implicitParamListModifierPrefer = before\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing to Akka Persistence JDBC\n\n## General Workflow\n\nThis is the process for committing code into master.\n\n1. Make sure you have signed the Lightbend CLA, if not, [sign it online](https://www.lightbend.com/contribute/cla/akka/current).\n2. Before starting to work on a feature or a fix, make sure that there is a ticket for your work in the [issue tracker](https://github.com/akka/akka-persistence-jdbc/issues). If not, create it first.\n3. Perform your work according to the [pull request requirements](#pull-request-requirements).\n4. When the feature or fix is completed you should open a [Pull Request](https://help.github.com/articles/using-pull-requests) on [GitHub](https://github.com/akka/akka-persistence-jdbc/pulls).\n5. The Pull Request should be reviewed by other maintainers (as many as feasible/practical). Note that the maintainers can consist of outside contributors, both within and outside Lightbend. Outside contributors are encouraged to participate in the review process, it is not a closed process.\n6. After the review you should fix the issues (review comments, CI failures) by pushing a new commit for new review, iterating until the reviewers give their thumbs up and CI tests pass.\n7. If the branch merge conflicts with its target, rebase your branch onto the target branch.\n\nIn case of questions about the contribution process or for discussion of specific issues please visit the [akka forum](https://discuss.akka.io/c/akka/).\n\n## Pull Request Requirements\n\nFor a Pull Request to be considered at all it has to meet these requirements:\n\n1. Pull Request branch should be given a unique descriptive name that explains its intent.\n2. Code in the branch should live up to the current code standard:\n   - Not violate [DRY](http://programmer.97things.oreilly.com/wiki/index.php/Don%27t_Repeat_Yourself).\n   - [Boy Scout Rule](http://programmer.97things.oreilly.com/wiki/index.php/The_Boy_Scout_Rule) needs to have been applied.\n3. Regardless if the code introduces new features or fixes bugs or regressions, it must have comprehensive tests.\n4. The code must be well documented (see the [Documentation](#documentation) section below).\n5. The commit messages must properly describe the changes, see [further below](#creating-commits-and-writing-commit-messages).\n6. Do not use ``@author`` tags since it does not encourage [Collective Code Ownership](http://www.extremeprogramming.org/rules/collective.html). Contributors get the credit they deserve in the release notes.\n\nIf these requirements are not met then the code should **not** be merged into master, or even reviewed - regardless of how good or important it is. No exceptions.\n\n## Documentation\n\nDocumentation should be written in two forms:\n\n1. API documentation in the form of scaladoc/javadoc comments on the Scala and Java user API.\n2. Guide documentation in [docs](docs/) subproject using [Paradox](https://github.com/lightbend/paradox) documentation tool. This documentation should give a short introduction of how a given connector should be used.\n\n## External Dependencies\n\nAll the external runtime dependencies for the project, including transitive dependencies, must have an open source license that is equal to, or compatible with, [Apache 2](https://www.apache.org/licenses/LICENSE-2.0).\n\nThis must be ensured by manually verifying the license for all the dependencies for the project:\n\n1. Whenever a committer to the project changes a version of a dependency (including Scala) in the build file.\n2. Whenever a committer to the project adds a new dependency.\n3. Whenever a new release is cut (public or private for a customer).\n\nEvery external dependency listed in the build file must have a trailing comment with the license name of the dependency.\n\nWhich licenses are compatible with Apache 2 are defined in [this doc](https://www.apache.org/legal/3party.html#category-a), where you can see that the licenses that are listed under ``Category A`` automatically compatible with Apache 2, while the ones listed under ``Category B`` needs additional action:\n\n> Each license in this category requires some degree of [reciprocity](https://www.apache.org/legal/3party.html#define-reciprocal); therefore, additional action must be taken in order to minimize the chance that a user of an Apache product will create a derivative work of a reciprocally-licensed portion of an Apache product without being aware of the applicable requirements.\n\n## Creating Commits And Writing Commit Messages\n\nFollow these guidelines when creating public commits and writing commit messages.\n\n1. If your work spans multiple local commits (for example; if you do safe point commits while working in a feature branch or work in a branch for long time doing merges/rebases etc.) then please do not commit it all but rewrite the history by squashing the commits into a single big commit which you write a good commit message for (like discussed in the following sections). For more info read this article: [Git Workflow](https://sandofsky.com/blog/git-workflow.html). Every commit should be able to be used in isolation, cherry picked etc.\n\n2. First line should be a descriptive sentence what the commit is doing, including the ticket number. It should be possible to fully understand what the commit does—but not necessarily how it does it—by just reading this single line. We follow the “imperative present tense” style for commit messages ([more info here](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html)).\n\n   It is **not ok** to only list the ticket number, type \"minor fix\" or similar.\n   If the commit is a small fix, then you are done. If not, go to 3.\n\n3. Following the single line description should be a blank line followed by an enumerated list with the details of the commit.\n\n4. Add keywords for your commit (depending on the degree of automation we reach, the list may change over time):\n    * ``Review by @gituser`` - if you want to notify someone on the team. The others can, and are encouraged to participate.\n\nExample:\n\n    Add eventsByTag query #123\n\n    * Details 1\n    * Details 2\n    * Details 3\n\n## How To Enforce These Guidelines?\n\n1. [Scalafmt](https://scalameta.org/scalafmt/) enforces some of the code style rules.\n2. [sbt-header plugin](https://github.com/sbt/sbt-header) manages consistent copyright headers in every source file.\n"
  },
  {
    "path": "LICENSE",
    "content": "Business Source License 1.1\n\nParameters\n\nLicensor:             Lightbend, Inc.\nLicensed Work:        Akka Persistence JDBC 5.5.4\n                      This license applies to all sub directories and files\n                      UNLESS another license file is present in a sub\n                      directory, then that other license applies to all files\n                      in its directory and sub directories.\n                      The Licensed Work is (c) 2025 Lightbend Inc.\nAdditional Use Grant:\n    If you develop an application using a version of Play Framework that\n    utilizes binary versions of akka-streams and its dependencies, you may\n    use such binary versions of akka-streams and its dependencies in the\n    development of your application only as they are incorporated into\n    Play Framework and solely to implement the functionality provided by\n    Play Framework; provided that, they are only used in the following way:\n    Connecting to a Play Framework websocket and/or Play Framework\n    request/response bodies for server and play-ws client.\n\nChange Date:          2028-10-30\n\nChange License:       Apache License, Version 2.0\n\nFor information about alternative licensing arrangements for the Software,\nplease visit: https://akka.io\n\n-----------------------------------------------------------------------------\n\nBusiness Source License 1.1\n\nLicense text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.\n“Business Source License” is a trademark of MariaDB Corporation Ab.\n\nTerms\n\nThe Licensor hereby grants you the right to copy, modify, create derivative\nworks, redistribute, and make non-production use of the Licensed Work. The\nLicensor may make an Additional Use Grant, above, permitting limited\nproduction use.\n\nEffective on the Change Date, or the fourth anniversary of the first publicly\navailable distribution of a specific version of the Licensed Work under this\nLicense, whichever comes first, the Licensor hereby grants you rights under\nthe terms of the Change License, and the rights granted in the paragraph\nabove terminate.\n\nIf your use of the Licensed Work does not comply with the requirements\ncurrently in effect as described in this License, you must purchase a\ncommercial license from the Licensor, its affiliated entities, or authorized\nresellers, or you must refrain from using the Licensed Work.\n\nAll copies of the original and modified Licensed Work, and derivative works\nof the Licensed Work, are subject to this License. This License applies\nseparately for each version of the Licensed Work and the Change Date may vary\nfor each version of the Licensed Work released by Licensor.\n\nYou must conspicuously display this License on each original or modified copy\nof the Licensed Work. If you receive the Licensed Work in original or\nmodified form from a third party, the terms and conditions set forth in this\nLicense apply to your use of that work.\n\nAny use of the Licensed Work in violation of this License will automatically\nterminate your rights under this License for the current and all other\nversions of the Licensed Work.\n\nThis License does not grant you any right in any trademark or logo of\nLicensor or its affiliates (provided that you may use a trademark or logo of\nLicensor as expressly required by this License).\n\nTO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON\nAN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,\nEXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND\nTITLE.\n\nMariaDB hereby grants you permission to use this License’s text to license\nyour works, and to refer to it using the trademark “Business Source License”,\nas long as you comply with the Covenants of Licensor below.\n\nCovenants of Licensor\n\nIn consideration of the right to use this License’s text and the “Business\nSource License” name and trademark, Licensor covenants to MariaDB, and to all\nother recipients of the licensed work to be provided by Licensor:\n\n1. To specify as the Change License the GPL Version 2.0 or any later version,\n   or a license that is compatible with GPL Version 2.0 or a later version,\n   where “compatible” means that software provided under the Change License can\n   be included in a program with software provided under GPL Version 2.0 or a\n   later version. Licensor may specify additional Change Licenses without\n   limitation.\n\n2. To either: (a) specify an additional grant of rights to use that does not\n   impose any additional restriction on the right granted in this License, as\n   the Additional Use Grant; or (b) insert the text “None”.\n\n3. To specify a Change Date.\n\n4. Not to modify this License in any other way.\n"
  },
  {
    "path": "README.md",
    "content": "Akka\n====\n*Akka is a powerful platform that simplifies building and operating highly responsive, resilient, and scalable services.*\n\n\nThe platform consists of\n* the [**Akka SDK**](https://doc.akka.io/) for straightforward, rapid development with AI assist and automatic clustering. Services built with the Akka SDK are automatically clustered and can be deployed on any infrastructure.\n* and [**Akka Automated Operations**](https://doc.akka.io/operations/akka-platform.html), a managed solution that handles everything for Akka SDK services from auto-elasticity to multi-region high availability running safely within your VPC.\n\nThe **Akka SDK** and **Akka Automated Operations** are built upon the foundational [**Akka libraries**](https://doc.akka.io/libraries/akka-dependencies/current/), providing the building blocks for distributed systems.\n\n\nJDBC plugin for Akka Persistence\n================================\n\nakka-persistence-jdbc writes journal and snapshot entries to a configured JDBC store. It implements the full akka-persistence-query API and is therefore very useful for implementing DDD-style \napplication models using Akka for creating reactive applications.\n\nPlease note that the H2 database is not recommended to be used as a production database, and support for H2 is primarily for testing purposes.\n\nThe Akka Persistence JDBC was originally created by @dnvriend.\n\nReference Documentation\n-----------------------\n\nThe reference documentation for all Akka libraries is available via [doc.akka.io/libraries/](https://doc.akka.io/libraries/), details for the Akka JDBC plugin\nfor [Scala](https://doc.akka.io/libraries/akka-persistence-jdbc/current/?language=scala) and [Java](https://doc.akka.io/libraries/akka-persistence-jdbc/current/?language=java).\n\nThe current versions of all Akka libraries are listed on the [Akka Dependencies](https://doc.akka.io/libraries/akka-dependencies/current/) page. Releases of the Akka JDBC plugin in this repository are listed on the [GitHub releases](https://github.com/akka/akka-persistence-jdbc/releases) page.\n\n\n## Build Token\n\nTo build locally, you need to fetch a token at https://account.akka.io/token that you have to place into `~/.sbt/1.0/akka-commercial.sbt` file like this:\n```\nThisBuild / resolvers += \"lightbend-akka\".at(\"your token resolver here\")\n```\n\n## Contributing\n\nContributions are *very* welcome! The Akka team appreciates community contributions by both those new to Akka and those more experienced.\n\nIf you find an issue that you'd like to see fixed, the quickest way to make that happen is to implement the fix and submit a pull request.\n\nRefer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for more details about the workflow, and general hints on how to prepare your pull request.\n\nYou can also ask for clarifications or guidance in GitHub issues directly, or in the [akka forum](https://discuss.akka.io/c/akka/).\n\n## License\n\nAkka is licensed under the Business Source License 1.1, please see the [Akka License FAQ](https://www.lightbend.com/akka/license-faq).\n\nTests and documentation are under a separate license, see the LICENSE file in each documentation and test root directory for details.\n"
  },
  {
    "path": "RELEASING.md",
    "content": "## Releasing\n\nUse this command to create a release issue of [Release Train Issue Template](docs/release-train-issue-template.md) and follow the steps.\n\n```bash\n~/akka-persistence-jdbc> scripts/create-release-issue.sh `version-to-be-released`\n```\n\n### Releasing only updated docs\n\nIt is possible to release a revised documentation to the already existing release.\n\n1. Create a new branch from a release tag. If a revised documentation is for the `v0.3` release, then the name of the new branch should be `docs/v0.3`.\n1. Add and commit `version.sbt` file that pins the version to the one, that is being revised. Also set `isSnapshot` to `false` for the stable documentation links. For example:\n    ```scala\n    ThisBuild / version := \"4.0.0\"\n    ThisBuild / isSnapshot := false\n    ```\n1. Make all of the required changes to the documentation.\n1. Build documentation locally with `CI` settings:\n    ```sh\n    env CI=true sbt docs/previewSite\n    ```\n1. If the generated documentation looks good, send it to Gustav:\n    ```sh\n    env CI=true sbt docs/publishRsync\n    ```\n1. Do not forget to push the new branch back to GitHub.\n1. Commit the changes to Gustav's local git repo\n\n### Releasing a Snapshot\n\nSnapshots are released automatically when commits are pushed to master.\n"
  },
  {
    "path": "build.sbt",
    "content": "import com.lightbend.paradox.apidoc.ApidocPlugin.autoImport.apidocRootPackage\nimport com.geirsson.CiReleasePlugin\n\nlazy val `akka-persistence-jdbc` = project\n  .in(file(\".\"))\n  .enablePlugins(ScalaUnidocPlugin)\n  .disablePlugins(MimaPlugin, SitePlugin, CiReleasePlugin)\n  .aggregate(core, docs, migrator)\n  .settings(name := \"akka-persistence-jdbc-root\", publish / skip := true)\n\nlazy val core = project\n  .in(file(\"core\"))\n  .enablePlugins(MimaPlugin)\n  .disablePlugins(SitePlugin, CiReleasePlugin)\n  .settings(\n    name := \"akka-persistence-jdbc\",\n    AutomaticModuleName.settings(\"akka.persistence.jdbc\"),\n    libraryDependencies ++= Dependencies.Libraries,\n    // Workaround for https://github.com/slick/slick/issues/2933\n    libraryDependencies ++=\n      (if (scalaVersion.value.startsWith(\"2.13\")) Seq(\"org.scala-lang\" % \"scala-reflect\" % scalaVersion.value)\n       else Nil),\n    mimaReportSignatureProblems := true,\n    mimaPreviousArtifacts := {\n      if (scalaVersion.value.startsWith(\"3\")) {\n        Set.empty\n      } else {\n        Set(\n          organization.value %% name.value % previousStableVersion.value.getOrElse(\n            throw new Error(\"Unable to determine previous version for MiMa\")))\n      }\n    })\n\nlazy val integration = project\n  .in(file(\"integration\"))\n  .settings(IntegrationTests.settings)\n  .settings(name := \"akka-persistence-jdbc-integration\", libraryDependencies ++= Dependencies.Libraries)\n  .disablePlugins(MimaPlugin, SitePlugin, CiReleasePlugin)\n  .dependsOn(core % \"compile->compile;test->test\")\n\nlazy val migrator = project\n  .in(file(\"migrator\"))\n  .disablePlugins(SitePlugin, MimaPlugin, CiReleasePlugin)\n  .settings(\n    name := \"akka-persistence-jdbc-migrator\",\n    AutomaticModuleName.settings(\"akka.persistence.jdbc.migrator\"),\n    libraryDependencies ++= Dependencies.Migration ++ Dependencies.Libraries,\n    // TODO remove this when ready to publish it\n    publish / skip := true)\n  .dependsOn(core % \"compile->compile;test->test\")\n\nlazy val `migrator-integration` = project\n  .in(file(\"migrator-integration\"))\n  .settings(IntegrationTests.settings)\n  .settings(name := \"akka-persistence-jdbc-migrator-integration\", libraryDependencies ++= Dependencies.Libraries)\n  .disablePlugins(MimaPlugin, SitePlugin, CiReleasePlugin)\n  .dependsOn(migrator)\n\nlazy val docs = project\n  .enablePlugins(ProjectAutoPlugin, AkkaParadoxPlugin, ParadoxSitePlugin, PreprocessPlugin, PublishRsyncPlugin)\n  .disablePlugins(MimaPlugin, CiReleasePlugin)\n  .settings(\n    name := \"Akka Persistence plugin for JDBC\",\n    publish / skip := true,\n    makeSite := makeSite.dependsOn(LocalRootProject / ScalaUnidoc / doc).value,\n    previewPath := (Paradox / siteSubdirName).value,\n    Preprocess / siteSubdirName := s\"api/akka-persistence-jdbc/${if (isSnapshot.value) \"snapshot\"\n    else version.value}\",\n    Preprocess / sourceDirectory := (LocalRootProject / ScalaUnidoc / unidoc / target).value,\n    Paradox / siteSubdirName := s\"libraries/akka-persistence-jdbc/${if (isSnapshot.value) \"snapshot\" else version.value}\",\n    Compile / paradoxProperties ++= Map(\n      \"project.url\" -> \"https://doc.akka.io/libraries/akka-persistence-jdbc/current/\",\n      \"github.base_url\" -> \"https://github.com/akka/akka-persistence-jdbc/\",\n      \"canonical.base_url\" -> \"https://doc.akka.io/libraries/akka-persistence-jdbc/current\",\n      \"akka.version\" -> Dependencies.AkkaVersion,\n      \"slick.version\" -> Dependencies.SlickVersion,\n      \"extref.github.base_url\" -> s\"https://github.com/akka/akka-persistence-jdbc/blob/${if (isSnapshot.value) \"master\"\n      else \"v\" + version.value}/%s\",\n      // Slick\n      \"extref.slick.base_url\" -> s\"https://scala-slick.org/doc/${Dependencies.SlickVersion}/%s\",\n      // Akka\n      \"extref.akka.base_url\" -> s\"https://doc.akka.io/libraries/akka-core/${Dependencies.AkkaBinaryVersion}/%s\",\n      \"scaladoc.akka.base_url\" -> s\"https://doc.akka.io/api/akka-core/${Dependencies.AkkaBinaryVersion}/\",\n      \"javadoc.akka.base_url\" -> s\"https://doc.akka.io/japi/akka-core/${Dependencies.AkkaBinaryVersion}/\",\n      \"javadoc.akka.link_style\" -> \"direct\",\n      // Java\n      \"javadoc.base_url\" -> \"https://docs.oracle.com/javase/8/docs/api/\",\n      // Scala\n      \"scaladoc.scala.base_url\" -> s\"https://www.scala-lang.org/api/${scalaBinaryVersion.value}.x/\",\n      \"scaladoc.akka.persistence.jdbc.base_url\" -> s\"/${(Preprocess / siteSubdirName).value}/\"),\n    paradoxGroups := Map(\"Language\" -> Seq(\"Java\", \"Scala\")),\n    resolvers += Resolver.jcenterRepo,\n    publishRsyncArtifacts += makeSite.value -> \"www/\",\n    publishRsyncHost := \"akkarepo@gustav.akka.io\",\n    apidocRootPackage := \"akka\")\n\nGlobal / onLoad := (Global / onLoad).value.andThen { s =>\n  val v = version.value\n  if (dynverGitDescribeOutput.value.hasNoTags)\n    throw new MessageOnlyException(\n      s\"Failed to derive version from git tags. Maybe run `git fetch --unshallow`? Derived version: $v\")\n  s\n}\n\nTaskKey[Unit](\"verifyCodeFmt\") := {\n  scalafmtCheckAll.all(ScopeFilter(inAnyProject)).result.value.toEither.left.foreach { _ =>\n    throw new MessageOnlyException(\n      \"Unformatted Scala code found. Please run 'scalafmtAll' and commit the reformatted code\")\n  }\n  (Compile / scalafmtSbtCheck).result.value.toEither.left.foreach { _ =>\n    throw new MessageOnlyException(\n      \"Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code\")\n  }\n}\n\naddCommandAlias(\"verifyCodeStyle\", \"headerCheck; verifyCodeFmt\")\n\nval isJdk11orHigher: Boolean = {\n  val result = VersionNumber(sys.props(\"java.specification.version\")).matchesSemVer(SemanticSelector(\">=11\"))\n  if (!result)\n    throw new IllegalArgumentException(\"JDK 11 or higher is required\")\n  result\n}\n"
  },
  {
    "path": "core/src/main/mima-filters/3.5.3.backwards.excludes/issue-322-messagesWithBatch.excludes",
    "content": "# #322 Adding messagesWithBatch to Dao traits\nProblemFilters.exclude[InheritedNewAbstractMethodProblem](\"akka.persistence.jdbc.journal.dao.JournalDao.messagesWithBatch\")\nProblemFilters.exclude[InheritedNewAbstractMethodProblem](\"akka.persistence.jdbc.journal.dao.H2JournalDao.messagesWithBatch\")\nProblemFilters.exclude[InheritedNewAbstractMethodProblem](\"akka.persistence.jdbc.journal.dao.JournalDaoWithUpdates.messagesWithBatch\")\nProblemFilters.exclude[InheritedNewAbstractMethodProblem](\"akka.persistence.jdbc.query.dao.BaseByteArrayReadJournalDao.ec\")\nProblemFilters.exclude[InheritedNewAbstractMethodProblem](\"akka.persistence.jdbc.query.dao.BaseByteArrayReadJournalDao.mat\")\nProblemFilters.exclude[InheritedNewAbstractMethodProblem](\"akka.persistence.jdbc.query.dao.H2ReadJournalDao.messagesWithBatch\")\nProblemFilters.exclude[InheritedNewAbstractMethodProblem](\"akka.persistence.jdbc.query.dao.OracleReadJournalDao.messagesWithBatch\")\nProblemFilters.exclude[InheritedNewAbstractMethodProblem](\"akka.persistence.jdbc.query.dao.ReadJournalDao.messagesWithBatch\")\n"
  },
  {
    "path": "core/src/main/mima-filters/3.5.3.backwards.excludes/issue-91-ordering-offset.excludes",
    "content": "# #91 changing signature of messages and messagesWithBatch in JournalDaoWithReadMessages\n#     tuple (PersistentRepr, Long) to include the ordering number\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.serialization.FlowPersistentReprSerializer.deserializeFlowWithoutTags\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.journal.dao.ByteArrayJournalSerializer.deserializeFlowWithoutTags\")\nProblemFilters.exclude[MissingClassProblem](\"akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$ContinueDelayed$\")\nProblemFilters.exclude[MissingClassProblem](\"akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$FlowControl\")\nProblemFilters.exclude[MissingClassProblem](\"akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$Stop$\")\nProblemFilters.exclude[MissingClassProblem](\"akka.persistence.jdbc.query.scaladsl.JdbcReadJournal$Continue$\")\n"
  },
  {
    "path": "core/src/main/mima-filters/4.x.x.backwards.excludes/pr-401-highest-seq-nr.excludes",
    "content": "# https://github.com/akka/akka-persistence-jdbc/pull/401/files\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.journal.dao.JournalQueries.highestSequenceNrForPersistenceId\")\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.journal.dao.JournalQueries.highestMarkedSequenceNrForPersistenceId\")\n"
  },
  {
    "path": "core/src/main/mima-filters/5.0.1.backwards.excludes/pr-570-akka-serialization.excludes",
    "content": "# https://github.com/akka/akka-persistence-jdbc/pull/570/files\n# The problem comes from an earlier PR where the class akka.persistence.jdbc.journal.dao.AkkaSerialization\n# was moved to akka.persistence.jdbc.AkkaSerialization as it was also being used from durable state\nProblemFilters.exclude[MissingClassProblem](\"akka.persistence.jdbc.journal.dao.AkkaSerialization\")\nProblemFilters.exclude[MissingClassProblem](\"akka.persistence.jdbc.journal.dao.AkkaSerialization$\")\nProblemFilters.exclude[MissingClassProblem](\"akka.persistence.jdbc.journal.dao.AkkaSerialization$AkkaSerialized\")\nProblemFilters.exclude[MissingClassProblem](\"akka.persistence.jdbc.journal.dao.AkkaSerialization$AkkaSerialized$\")"
  },
  {
    "path": "core/src/main/mima-filters/5.0.2.backwards.excludes/issue-585-performance-regression.excludes",
    "content": "# internals\nProblemFilters.exclude[IncompatibleTemplateDefProblem](\"akka.persistence.jdbc.journal.dao.BaseDao\")\nProblemFilters.exclude[MissingTypesProblem](\"akka.persistence.jdbc.journal.dao.DefaultJournalDao\")\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.journal.dao.DefaultJournalDao.queueWriteJournalRows\")\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.journal.dao.DefaultJournalDao.writeQueue\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.journal.dao.JournalQueries.insertAndReturn\")\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.journal.dao.JournalQueries.writeJournalRows\")\nProblemFilters.exclude[MissingTypesProblem](\"akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao\")\nProblemFilters.exclude[MissingTypesProblem](\"akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao\")\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.queueWriteJournalRows\")\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.writeQueue\")"
  },
  {
    "path": "core/src/main/mima-filters/5.1.0.backwards.excludes/issue-557-logical-delete.excludes",
    "content": "ProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.config.BaseDaoConfig.logicalDelete\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.config.ReadJournalConfig.includeDeleted\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.logWarnAboutLogicalDeletionDeprecation\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.logWarnAboutLogicalDeletionDeprecation\")\n"
  },
  {
    "path": "core/src/main/mima-filters/5.4.0.backwards.excludes/issue-710-tag-fk.excludes",
    "content": "ProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.journal.dao.JournalTables#EventTags.eventId\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.journal.dao.JournalTables#TagRow.eventId\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.journal.dao.JournalTables#TagRow.copy\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.journal.dao.JournalTables#TagRow.copy$default$1\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.journal.dao.JournalTables#TagRow.copy$default$2\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.journal.dao.JournalTables#TagRow.this\")\nProblemFilters.exclude[MissingTypesProblem](\"akka.persistence.jdbc.journal.dao.JournalTables$TagRow$\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.journal.dao.JournalTables#TagRow.apply\")\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.journal.dao.JournalTables#TagRow.unapply\")"
  },
  {
    "path": "core/src/main/mima-filters/5.4.0.backwards.excludes/issue-775-slick-3.50.excludes",
    "content": "ProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.db.EagerSlickDatabase.apply\")\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.db.EagerSlickDatabase.unapply\")\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.db.EagerSlickDatabase.tupled\")\nProblemFilters.exclude[IncompatibleSignatureProblem](\"akka.persistence.jdbc.db.EagerSlickDatabase.curried\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.db.EagerSlickDatabase.database\")\nProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.db.EagerSlickDatabase.copy\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.db.EagerSlickDatabase.copy$default$1\")\nProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.db.EagerSlickDatabase.this\")\n\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.db.LazySlickDatabase.database\")\n\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.db.SlickDatabase.forConfig\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.db.SlickDatabase.database\")\nProblemFilters.exclude[ReversedMissingMethodProblem](\"akka.persistence.jdbc.db.SlickDatabase.database\")\n\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.journal.JdbcAsyncWriteJournal.db\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.journal.dao.DefaultJournalDao.db\")\nProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.journal.dao.DefaultJournalDao.this\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.db\")\nProblemFilters.exclude[ReversedMissingMethodProblem](\"akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.db\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.db\")\nProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.this\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.query.dao.DefaultReadJournalDao.db\")\nProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.query.dao.DefaultReadJournalDao.this\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.query.dao.legacy.BaseByteArrayReadJournalDao.db\")\nProblemFilters.exclude[ReversedMissingMethodProblem](\"akka.persistence.jdbc.query.dao.legacy.BaseByteArrayReadJournalDao.db\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao.db\")\nProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao.this\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.query.dao.legacy.OracleReadJournalDao.db\")\nProblemFilters.exclude[ReversedMissingMethodProblem](\"akka.persistence.jdbc.query.dao.legacy.OracleReadJournalDao.db\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.snapshot.JdbcSnapshotStore.db\")\nProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao.this\")\nProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.snapshot.dao.legacy.ByteArraySnapshotDao.this\")\nProblemFilters.exclude[IncompatibleResultTypeProblem](\"akka.persistence.jdbc.state.JdbcDurableStateStoreProvider.db\")\nProblemFilters.exclude[IncompatibleMethTypeProblem](\"akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore.this\")\n\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.query.JournalSequenceActor.receive\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.query.JournalSequenceActor.receive$default$4\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.query.JournalSequenceActor.findGaps\")\n\n\n\n\n"
  },
  {
    "path": "core/src/main/mima-filters/5.5.0.backwards.excludes/issue-891-durable-store.excludes",
    "content": "# internal api changes\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.state.JdbcDurableStateStoreProvider.this\")\nProblemFilters.exclude[DirectMissingMethodProblem](\"akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore.this\")\n"
  },
  {
    "path": "core/src/main/mima-filters/5.5.2.backwards.excludes/pr-928-cleanup-tool.excludes",
    "content": "# internal api changes\nProblemFilters.exclude[ReversedMissingMethodProblem](\"akka.persistence.jdbc.journal.dao.JournalDao.deleteEventsTo\")\nProblemFilters.exclude[NewMixinForwarderProblem](\"akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.delete\")\n"
  },
  {
    "path": "core/src/main/resources/reference.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\nakka-persistence-jdbc {\n\n  # The tag separator to use when tagging events with more than one tag.\n  # This property affects jdbc-journal.tagSeparator and jdbc-read-journal.tagSeparator.\n  tagSeparator = \",\"\n\n  database-provider-fqcn = \"akka.persistence.jdbc.db.DefaultSlickDatabaseProvider\"\n\n  shared-databases {\n    // Shared databases can be defined here.\n    // This reference config contains a partial example if a shared database which is enabled by configuring \"slick\" as the shared db\n    // this example is ignored by default as long as no profile is default\n    slick {\n\n      # This property indicates which profile must be used by Slick.\n      # Possible values are:\n      #  - slick.jdbc.PostgresProfile$\n      #  - slick.jdbc.MySQLProfile$\n      #  - slick.jdbc.H2Profile$\n      #  - slick.jdbc.SQLServerProfile$\n      #  - slick.jdbc.OracleProfile$\n      # (uncomment and set the property below to match your needs)\n      # profile = \"slick.jdbc.PostgresProfile$\"\n\n      db {\n        connectionPool = \"HikariCP\"\n\n        # The JDBC URL for the chosen database\n        # (uncomment and set the property below to match your needs)\n        # url = \"jdbc:postgresql://localhost:5432/akka-plugin\"\n\n        # The database username\n        # (uncomment and set the property below to match your needs)\n        # user = \"akka-plugin\"\n\n        # The username's password\n        # (uncomment and set the property below to match your needs)\n        # password = \"akka-plugin\"\n\n        # The JDBC driver to use\n        # (uncomment and set the property below to match your needs)\n        # driver = \"org.postgresql.Driver\"\n\n        # hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n        # Slick will use an async executor with a fixed size queue of 10.000 objects\n        # The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n        # This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n        queueSize = 10000 // number of objects that can be queued by the async executor\n\n        # This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection\n        # from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown.\n        # 1000ms is the minimum value. Default: 180000 (3 minutes)\n        connectionTimeout = 180000\n\n        # This property controls the maximum amount of time that a connection will be tested for aliveness.\n        # This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n        validationTimeout = 5000\n\n        # 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool.\n        # Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation\n        # of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections\n        # are never removed from the pool. Default: 600000 (10 minutes)\n        idleTimeout = 600000\n\n        # 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout\n        # it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired,\n        # only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds\n        # less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime),\n        # subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n        maxLifetime = 1800000\n\n        # This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a\n        # possible connection leak. A value of 0 means leak detection is disabled.\n        # Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n        leakDetectionThreshold = 0\n\n        # ensures that the database does not get dropped while we are using it\n        keepAliveConnection = on\n\n        # See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n        # Keep in mind that the number of threads must equal the maximum number of connections.\n        numThreads = 20\n        maxConnections = 20\n        minConnections = 20\n      }\n    }\n  }\n}\n\n# the akka-persistence-journal in use\njdbc-journal {\n  class = \"akka.persistence.jdbc.journal.JdbcAsyncWriteJournal\"\n\n  tables {\n\n    # Only used in pre 5.0.0 Dao\n    legacy_journal {\n      tableName = \"journal\"\n      schemaName = \"\"\n\n      columnNames {\n        ordering = \"ordering\"\n        deleted = \"deleted\"\n        persistenceId = \"persistence_id\"\n        sequenceNumber = \"sequence_number\"\n        created = \"created\"\n        tags = \"tags\"\n        message = \"message\"\n      }\n    }\n\n    event_journal {\n      tableName = \"event_journal\"\n      schemaName = \"\"\n\n      columnNames {\n        ordering = \"ordering\"\n        deleted = \"deleted\"\n        persistenceId = \"persistence_id\"\n        sequenceNumber = \"sequence_number\"\n        writer = \"writer\"\n        writeTimestamp = \"write_timestamp\"\n        adapterManifest = \"adapter_manifest\"\n        eventPayload = \"event_payload\"\n        eventSerId = \"event_ser_id\"\n        eventSerManifest = \"event_ser_manifest\"\n        metaPayload = \"meta_payload\"\n        metaSerId = \"meta_ser_id\"\n        metaSerManifest = \"meta_ser_manifest\"\n      }\n    }\n\n    event_tag {\n      tableName = \"event_tag\"\n      schemaName = \"\"\n\n      columnNames {\n        # use for older foreign key.\n        eventId = \"event_id\"\n        persistenceId = \"persistence_id\"\n        sequenceNumber = \"sequence_number\"\n        tag = \"tag\"\n      }\n\n      # For rolling updates the event_tag table migration.\n      # switch those to enable new region key write and read.\n      legacy-tag-key = true\n    }\n\n    # Otherwise it would be a pinned dispatcher, see https://github.com/akka/akka/issues/31058\n    plugin-dispatcher = \"akka.actor.default-dispatcher\"\n  }\n\n  # The tag separator to use when tagging events with more than one tag.\n  # should not be configured directly, but through property akka-persistence-jdbc.tagSeparator\n  # in order to keep consistent behavior over write/read sides\n  # Only used for the legacy schema\n  tagSeparator = ${akka-persistence-jdbc.tagSeparator}\n\n  # If you have data from pre 5.0.0 use the legacy akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao\n  # Dao. Migration to the new dao will be added in the future.\n  dao = \"akka.persistence.jdbc.journal.dao.DefaultJournalDao\"\n\n  # The size of the buffer used when queueing up events for batch writing. This number must be bigger then the number\n  # of events that may be written concurrently. In other words this number must be bigger than the number of persistent\n  # actors that are actively persisting at the same time.\n  bufferSize = 1000\n  # The maximum size of the batches in which journal rows will be inserted\n  batchSize = 400\n  # The maximum size of the batches in which journal rows will be read when recovering\n  replayBatchSize = 400\n  # The maximum number of batch-inserts that may be running concurrently\n  parallelism = 8\n\n  # This setting can be used to configure usage of a shared database.\n  # To disable usage of a shared database, set to null or an empty string.\n  # When set to a non empty string, this setting does two things:\n  # - The actor which manages the write-journal will not automatically close the db when the actor stops (since it is shared)\n  # - If akka-persistence-jdbc.database-provider-fqcn is set to akka.persistence.jdbc.db.DefaultSlickDatabaseProvider\n  #   then the shared database with the given name will be used. (shared databases are configured as part of akka-persistence-jdbc.shared-databases)\n  #   Please note that the database will only be shared with the other journals if the use-shared-db is also set\n  #   to the same value for these other journals.\n  use-shared-db = null\n\n  slick {\n\n    # This property indicates which profile must be used by Slick.\n    # Possible values are:\n    #  - slick.jdbc.PostgresProfile$\n    #  - slick.jdbc.MySQLProfile$\n    #  - slick.jdbc.H2Profile$\n    #  - slick.jdbc.SQLServerProfile$\n    #  - slick.jdbc.OracleProfile$\n    # (uncomment and set the property below to match your needs)\n    # profile = \"slick.jdbc.PostgresProfile$\"\n\n    db {\n      connectionPool = \"HikariCP\"\n\n      # The JDBC URL for the chosen database\n      # (uncomment and set the property below to match your needs)\n      # url = \"jdbc:postgresql://localhost:5432/akka-plugin\"\n\n      # The database username\n      # (uncomment and set the property below to match your needs)\n      # user = \"akka-plugin\"\n\n      # The username's password\n      # (uncomment and set the property below to match your needs)\n      # password = \"akka-plugin\"\n\n      # The JDBC driver to use\n      # (uncomment and set the property below to match your needs)\n      # driver = \"org.postgresql.Driver\"\n\n      # hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n      # Slick will use an async executor with a fixed size queue of 10.000 objects\n      # The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n      # This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n      queueSize = 10000 // number of objects that can be queued by the async executor\n\n      # This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection\n      # from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown.\n      # 1000ms is the minimum value. Default: 180000 (3 minutes)\n      connectionTimeout = 180000\n\n      # This property controls the maximum amount of time that a connection will be tested for aliveness.\n      # This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n      validationTimeout = 5000\n\n      # 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool.\n      # Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation\n      # of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections\n      # are never removed from the pool. Default: 600000 (10 minutes)\n      idleTimeout = 600000\n\n      # 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout\n      # it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired,\n      # only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds\n      # less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime),\n      # subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n      maxLifetime = 1800000\n\n      # This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a\n      # possible connection leak. A value of 0 means leak detection is disabled.\n      # Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n      leakDetectionThreshold = 0\n\n      # ensures that the database does not get dropped while we are using it\n      keepAliveConnection = on\n\n      # See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n      # Keep in mind that the number of threads must equal the maximum number of connections.\n      numThreads = 20\n      maxConnections = 20\n      minConnections = 20\n    }\n  }\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  class = \"akka.persistence.jdbc.snapshot.JdbcSnapshotStore\"\n\n  tables {\n    legacy_snapshot {\n      tableName = \"snapshot\"\n      schemaName = \"\"\n      columnNames {\n        persistenceId = \"persistence_id\"\n        sequenceNumber = \"sequence_number\"\n        created = \"created\"\n        snapshot = \"snapshot\"\n      }\n    }\n\n    snapshot {\n      tableName = \"snapshot\"\n      schemaName = \"\"\n      columnNames {\n        persistenceId = \"persistence_id\"\n        sequenceNumber = \"sequence_number\"\n        created = \"created\"\n\n        snapshotPayload = \"snapshot_payload\"\n        snapshotSerId = \"snapshot_ser_id\"\n        snapshotSerManifest = \"snapshot_ser_manifest\"\n\n        metaPayload = \"meta_payload\"\n        metaSerId = \"meta_ser_id\"\n        metaSerManifest = \"meta_ser_manifest\"\n      }\n    }\n\n    # Otherwise it would be a pinned dispatcher, see https://github.com/akka/akka/issues/31058\n    plugin-dispatcher = \"akka.actor.default-dispatcher\"\n  }\n\n  # This setting can be used to configure usage of a shared database.\n  # To disable usage of a shared database, set to null or an empty string.\n  # When set to a non empty string, this setting does two things:\n  # - The actor which manages the snapshot-journal will not automatically close the db when the actor stops (since it is shared)\n  # - If akka-persistence-jdbc.database-provider-fqcn is set to akka.persistence.jdbc.db.DefaultSlickDatabaseProvider\n  #   then the shared database with the given name will be used. (shared databases are configured as part of akka-persistence-jdbc.shared-databases)\n  #   Please note that the database will only be shared with the other journals if the use-shared-db is also set\n  #   to the same value for these other journals.\n  use-shared-db = null\n\n  dao = \"akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao\"\n\n  slick {\n\n    # This property indicates which profile must be used by Slick.\n    # Possible values are:\n    #  - slick.jdbc.PostgresProfile$\n    #  - slick.jdbc.MySQLProfile$\n    #  - slick.jdbc.H2Profile$\n    #  - slick.jdbc.SQLServerProfile$\n    #  - slick.jdbc.OracleProfile$\n    # (uncomment and set the property below to match your needs)\n    # profile = \"slick.jdbc.PostgresProfile$\"\n\n    db {\n      connectionPool = \"HikariCP\"\n\n      # The JDBC URL for the chosen database\n      # (uncomment and set the property below to match your needs)\n      # url = \"jdbc:postgresql://localhost:5432/akka-plugin\"\n\n      # The database username\n      # (uncomment and set the property below to match your needs)\n      # user = \"akka-plugin\"\n\n      # The username's password\n      # (uncomment and set the property below to match your needs)\n      # password = \"akka-plugin\"\n\n      # The JDBC driver to use\n      # (uncomment and set the property below to match your needs)\n      # driver = \"org.postgresql.Driver\"\n\n      # hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n      # Slick will use an async executor with a fixed size queue of 10.000 objects\n      # The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n      # This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n      queueSize = 10000 // number of objects that can be queued by the async executor\n\n      # This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection\n      # from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown.\n      # 1000ms is the minimum value. Default: 180000 (3 minutes)\n      connectionTimeout = 180000\n\n      # This property controls the maximum amount of time that a connection will be tested for aliveness.\n      # This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n      validationTimeout = 5000\n\n      # 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool.\n      # Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation\n      # of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections\n      # are never removed from the pool. Default: 600000 (10 minutes)\n      idleTimeout = 600000\n\n      # 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout\n      # it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired,\n      # only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds\n      # less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime),\n      # subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n      maxLifetime = 1800000\n\n      # This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a\n      # possible connection leak. A value of 0 means leak detection is disabled.\n      # Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n      leakDetectionThreshold = 0\n\n      # ensures that the database does not get dropped while we are using it\n      keepAliveConnection = on\n\n      # See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n      # Keep in mind that the number of threads must equal the maximum number of connections.\n      numThreads = 20\n      maxConnections = 20\n      minConnections = 20\n    }\n  }\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  class = \"akka.persistence.jdbc.query.JdbcReadJournalProvider\"\n\n  # Absolute path to the write journal plugin configuration section.\n  # Read journal uses event adapters from the write plugin\n  # to adapt events.\n  write-plugin = \"jdbc-journal\"\n\n  # New events are retrieved (polled) with this interval.\n  refresh-interval = \"1s\"\n\n  # How many events to fetch in one query (replay) and keep buffered until they\n  # are delivered downstreams.\n  max-buffer-size = \"500\"\n\n  # Number of 'max-buffer-size's to limit each events by tag query to\n  #\n  # Events by tag will fetch batches of elements limiting both using the DB LIMIT support and\n  # the \"ordering\" column of the journal. When executing a query starting from the beginning of the\n  # journal, for example adding a new projection to an existing application with a large number\n  # of already persisted events this can cause performance problems in some databases.\n  #\n  # This factor limits the \"slices\" of ordering the journal is queried for into smaller chunks,\n  # issuing more queries where each query covers a smaller slice of the journal instead of one\n  # covering the entire journal.\n  #\n  # Note that setting this too low will have a performance overhead in many queries being issued where\n  # each query returns no or very few entries, but what number is to low depends on how many tags are\n  # used and how well those are distributed, setting this value requires application specific benchmarking\n  # to find a good number.\n  #\n  # 0 means disable the factor and query the entire journal and limit to max-buffer-size elements\n  events-by-tag-buffer-sizes-per-query = 0\n\n  # If enabled, automatically close the database connection when the actor system is terminated\n  add-shutdown-hook = true\n\n  # This setting can be used to configure usage of a shared database.\n  # To disable usage of a shared database, set to null or an empty string.\n  # This setting only has effect if akka-persistence-jdbc.database-provider-fqcn is set to\n  # akka.persistence.jdbc.db.DefaultSlickDatabaseProvider. When this setting is set to a non empty string\n  # then the shared database with the given name will be used. (shared databases are configured as part of akka-persistence-jdbc.shared-databases)\n  # Please note that the database will only be shared with the other journals if the use-shared-db is also set\n  # to the same value for these other journals.\n  use-shared-db = null\n\n  dao = \"akka.persistence.jdbc.query.dao.DefaultReadJournalDao\"\n\n  # Settings for determining if ids (ordering column) in the journal are out of sequence.\n  journal-sequence-retrieval {\n    # The maximum number of ids that will be retrieved in each batch\n    batch-size = 10000\n    # In case a number in the sequence is missing, this is the amount of retries that will be done to see\n    # if the number is still found. Note that the time after which a number in the sequence is assumed missing is\n    # equal to maxTries * queryDelay\n    # (maxTries may not be zero)\n    max-tries = 10\n    # How often the actor will query for new data\n    query-delay = 1 second\n    # The maximum backoff time before trying to query again in case of database failures\n    max-backoff-query-delay = 1 minute\n    # The ask timeout to use when querying the journal sequence actor, the actor should normally respond very quickly,\n    # since it always replies with its current internal state\n    ask-timeout = 1 second\n  }\n\n  tables {\n      legacy_journal = ${jdbc-journal.tables.legacy_journal}\n      event_journal = ${jdbc-journal.tables.event_journal}\n      event_tag = ${jdbc-journal.tables.event_tag}\n  }\n\n  # The tag separator to use when tagging events with more than one tag.\n  # should not be configured directly, but through property akka-persistence-jdbc.tagSeparator\n  # in order to keep consistent behavior over write/read sides\n  tagSeparator = ${akka-persistence-jdbc.tagSeparator}\n\n  slick {\n\n    # This property indicates which profile must be used by Slick.\n    # Possible values are:\n    #  - slick.jdbc.PostgresProfile$\n    #  - slick.jdbc.MySQLProfile$\n    #  - slick.jdbc.H2Profile$\n    #  - slick.jdbc.SQLServerProfile$\n    #  - slick.jdbc.OracleProfile$\n    # (uncomment and set the property below to match your needs)\n    # profile = \"slick.jdbc.PostgresProfile$\"\n\n    db {\n      connectionPool = \"HikariCP\"\n\n      # The JDBC URL for the chosen database\n      # (uncomment and set the property below to match your needs)\n      # url = \"jdbc:postgresql://localhost:5432/akka-plugin\"\n\n      # The database username\n      # (uncomment and set the property below to match your needs)\n      # user = \"akka-plugin\"\n\n      # The username's password\n      # (uncomment and set the property below to match your needs)\n      # password = \"akka-plugin\"\n\n      # The JDBC driver to use\n      # (uncomment and set the property below to match your needs)\n      # driver = \"org.postgresql.Driver\"\n\n      # hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n      # Slick will use an async executor with a fixed size queue of 10.000 objects\n      # The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n      # This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n      queueSize = 10000 // number of objects that can be queued by the async executor\n\n      # This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection\n      # from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown.\n      # 1000ms is the minimum value. Default: 180000 (3 minutes)\n      connectionTimeout = 180000\n\n      # This property controls the maximum amount of time that a connection will be tested for aliveness.\n      # This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n      validationTimeout = 5000\n\n      # 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool.\n      # Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation\n      # of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections\n      # are never removed from the pool. Default: 600000 (10 minutes)\n      idleTimeout = 600000\n\n      # 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout\n      # it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired,\n      # only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds\n      # less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime),\n      # subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n      maxLifetime = 1800000\n\n      # This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a\n      # possible connection leak. A value of 0 means leak detection is disabled.\n      # Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n      leakDetectionThreshold = 0\n\n      # ensures that the database does not get dropped while we are using it\n      keepAliveConnection = on\n\n      # See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n      # Keep in mind that the number of threads must equal the maximum number of connections.\n      numThreads = 20\n      maxConnections = 20\n      minConnections = 20\n    }\n  }\n}\n\n# the akka-persistence-durable-state-store in use\njdbc-durable-state-store {\n  class = \"akka.persistence.jdbc.state.JdbcDurableStateStoreProvider\"\n\n  # number of records fetched from the store at once\n  batchSize = 500\n  # New states are retrieved (polled) with this interval.\n  refreshInterval = \"1s\"\n\n  tables {\n    durable_state {\n      ## The table and column names are not always read and used in SQL statements. If you change\n      ## these values you may need to edit some source code\n      ## https://github.com/akka/akka-persistence-jdbc/issues/573\n      tableName = \"durable_state\"\n      schemaName = \"\"\n      columnNames {\n        globalOffset = \"global_offset\"\n        persistenceId = \"persistence_id\"\n        revision = \"revision\"\n        statePayload = \"state_payload\"\n        stateSerId = \"state_serial_id\"\n        stateSerManifest = \"state_serial_manifest\"\n        tag = \"tag\"\n        stateTimestamp = \"state_timestamp\"\n      }\n    }\n  }\n\n  # Settings for determining if global_offset column in the durable-state are out of sequence.\n  durable-state-sequence-retrieval {\n    # The maximum number of ids that will be retrieved in each batch\n    batch-size = 10000\n    # In case a number in the sequence is missing, this is the amount of retries that will be done to see\n    # if the number is still found. Note that the time after which a number in the sequence is assumed missing is\n    # equal to maxTries * queryDelay\n    # (maxTries may not be zero)\n    max-tries = 5\n    # How often the actor will query for new data\n    query-delay = 1 second\n    # The maximum backoff time before trying to query again in case of database failures\n    max-backoff-query-delay = 1 minute\n    # The ask timeout to use when querying the durable-state sequence actor, the actor should normally respond very quickly,\n    # since it always replies with its current internal state\n    ask-timeout = 1 second\n    # cache of revision numbers per persistence id\n    revision-cache-capacity = 10000\n  }\n\n  slick {\n\n    # This property indicates which profile must be used by Slick.\n    # Possible values are:\n    #  - slick.jdbc.PostgresProfile$\n    #  - slick.jdbc.MySQLProfile$\n    #  - slick.jdbc.H2Profile$\n    #  - slick.jdbc.SQLServerProfile$\n    #  - slick.jdbc.OracleProfile$\n    # (uncomment and set the property below to match your needs)\n    # profile = \"slick.jdbc.PostgresProfile$\"\n\n    db {\n      connectionPool = \"HikariCP\"\n\n      # The JDBC URL for the chosen database\n      # (uncomment and set the property below to match your needs)\n      # url = \"jdbc:postgresql://localhost:5432/akka-plugin\"\n\n      # The database username\n      # (uncomment and set the property below to match your needs)\n      # user = \"akka-plugin\"\n\n      # The username's password\n      # (uncomment and set the property below to match your needs)\n      # password = \"akka-plugin\"\n\n      # The JDBC driver to use\n      # (uncomment and set the property below to match your needs)\n      # driver = \"org.postgresql.Driver\"\n\n      # hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n      # Slick will use an async executor with a fixed size queue of 10.000 objects\n      # The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n      # This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n      queueSize = 10000 // number of objects that can be queued by the async executor\n\n      # This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection\n      # from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown.\n      # 1000ms is the minimum value. Default: 180000 (3 minutes)\n      connectionTimeout = 180000\n\n      # This property controls the maximum amount of time that a connection will be tested for aliveness.\n      # This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n      validationTimeout = 5000\n\n      # 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool.\n      # Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation\n      # of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections\n      # are never removed from the pool. Default: 600000 (10 minutes)\n      idleTimeout = 600000\n\n      # 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout\n      # it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired,\n      # only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds\n      # less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime),\n      # subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n      maxLifetime = 1800000\n\n      # This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a\n      # possible connection leak. A value of 0 means leak detection is disabled.\n      # Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n      leakDetectionThreshold = 0\n\n      # ensures that the database does not get dropped while we are using it\n      keepAliveConnection = on\n\n      # See some tips on thread/connection pool sizing on https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n      # Keep in mind that the number of threads must equal the maximum number of connections.\n      numThreads = 20\n      maxConnections = 20\n      minConnections = 20\n    }\n  }\n}\n"
  },
  {
    "path": "core/src/main/resources/schema/h2/h2-create-schema-legacy.sql",
    "content": "CREATE TABLE IF NOT EXISTS PUBLIC.\"journal\" (\n  \"ordering\" BIGINT AUTO_INCREMENT,\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" BIGINT NOT NULL,\n  \"deleted\" BOOLEAN DEFAULT FALSE NOT NULL,\n  \"tags\" VARCHAR(255) DEFAULT NULL,\n  \"message\" BYTEA NOT NULL,\n  PRIMARY KEY(\"persistence_id\", \"sequence_number\")\n);\nCREATE UNIQUE INDEX IF NOT EXISTS  \"journal_ordering_idx\" ON PUBLIC.\"journal\"(\"ordering\");\n\nCREATE TABLE IF NOT EXISTS PUBLIC.\"snapshot\" (\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" BIGINT NOT NULL,\n  \"created\" BIGINT NOT NULL,\n  \"snapshot\" BYTEA NOT NULL,\n  PRIMARY KEY(\"persistence_id\", \"sequence_number\")\n);\n\n\nCREATE TABLE IF NOT EXISTS \"durable_state\" (\n    \"global_offset\" BIGINT NOT NULL AUTO_INCREMENT,\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"revision\" BIGINT NOT NULL,\n    \"state_payload\" BLOB NOT NULL,\n    \"state_serial_id\" INTEGER NOT NULL,\n    \"state_serial_manifest\" VARCHAR,\n    \"tag\" VARCHAR,\n    \"state_timestamp\" BIGINT NOT NULL,\n    PRIMARY KEY(\"persistence_id\")\n    );\n\nCREATE INDEX \"state_tag_idx\" on \"durable_state\" (\"tag\");\nCREATE INDEX \"state_global_offset_idx\" on \"durable_state\" (\"global_offset\");\n"
  },
  {
    "path": "core/src/main/resources/schema/h2/h2-create-schema.sql",
    "content": "CREATE TABLE IF NOT EXISTS \"event_journal\" (\n    \"ordering\" BIGINT UNIQUE NOT NULL AUTO_INCREMENT,\n    \"deleted\" BOOLEAN DEFAULT false NOT NULL,\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"sequence_number\" BIGINT NOT NULL,\n    \"writer\" VARCHAR NOT NULL,\n    \"write_timestamp\" BIGINT NOT NULL,\n    \"adapter_manifest\" VARCHAR NOT NULL,\n    \"event_payload\" BLOB NOT NULL,\n    \"event_ser_id\" INTEGER NOT NULL,\n    \"event_ser_manifest\" VARCHAR NOT NULL,\n    \"meta_payload\" BLOB,\n    \"meta_ser_id\" INTEGER,\n    \"meta_ser_manifest\" VARCHAR,\n    PRIMARY KEY(\"persistence_id\",\"sequence_number\")\n    );\n\nCREATE UNIQUE INDEX \"event_journal_ordering_idx\" on \"event_journal\" (\"ordering\");\n\nCREATE TABLE IF NOT EXISTS \"event_tag\" (\n    \"event_id\" BIGINT,\n    \"persistence_id\" VARCHAR(255),\n    \"sequence_number\" BIGINT,\n    \"tag\" VARCHAR NOT NULL,\n    PRIMARY KEY(\"persistence_id\", \"sequence_number\", \"tag\"),\n    CONSTRAINT fk_event_journal\n      FOREIGN KEY(\"persistence_id\", \"sequence_number\")\n      REFERENCES \"event_journal\"(\"persistence_id\", \"sequence_number\")\n      ON DELETE CASCADE\n);\n\nCREATE TABLE IF NOT EXISTS \"snapshot\" (\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"sequence_number\" BIGINT NOT NULL,\n    \"created\" BIGINT NOT NULL,\"snapshot_ser_id\" INTEGER NOT NULL,\n    \"snapshot_ser_manifest\" VARCHAR NOT NULL,\n    \"snapshot_payload\" BLOB NOT NULL,\n    \"meta_ser_id\" INTEGER,\n    \"meta_ser_manifest\" VARCHAR,\n    \"meta_payload\" BLOB,\n    PRIMARY KEY(\"persistence_id\",\"sequence_number\")\n    );\n\nCREATE SEQUENCE IF NOT EXISTS \"global_offset_seq\";\n\nCREATE TABLE IF NOT EXISTS \"durable_state\" (\n    \"global_offset\" BIGINT DEFAULT NEXT VALUE FOR \"global_offset_seq\",\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"revision\" BIGINT NOT NULL,\n    \"state_payload\" BLOB NOT NULL,\n    \"state_serial_id\" INTEGER NOT NULL,\n    \"state_serial_manifest\" VARCHAR,\n    \"tag\" VARCHAR,\n    \"state_timestamp\" BIGINT NOT NULL,\n    PRIMARY KEY(\"persistence_id\")\n    );\nCREATE INDEX IF NOT EXISTS \"state_tag_idx\" on \"durable_state\" (\"tag\");\nCREATE INDEX IF NOT EXISTS \"state_global_offset_idx\" on \"durable_state\" (\"global_offset\");\n"
  },
  {
    "path": "core/src/main/resources/schema/h2/h2-drop-schema-legacy.sql",
    "content": "DROP TABLE IF EXISTS PUBLIC.\"journal\";\nDROP TABLE IF EXISTS PUBLIC.\"snapshot\";\nDROP TABLE IF EXISTS PUBLIC.\"durable_state\";\n"
  },
  {
    "path": "core/src/main/resources/schema/h2/h2-drop-schema.sql",
    "content": "DROP TABLE IF EXISTS PUBLIC.\"event_tag\";\nDROP TABLE IF EXISTS PUBLIC.\"event_journal\";\nDROP TABLE IF EXISTS PUBLIC.\"snapshot\";\nDROP TABLE IF EXISTS PUBLIC.\"durable_state\";\nDROP SEQUENCE IF EXISTS PUBLIC.\"global_offset_seq\";\n"
  },
  {
    "path": "core/src/main/resources/schema/mysql/mysql-create-schema-legacy.sql",
    "content": "CREATE TABLE IF NOT EXISTS journal (\n  ordering SERIAL,\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  deleted BOOLEAN DEFAULT FALSE NOT NULL,\n  tags VARCHAR(255) DEFAULT NULL,\n  message BLOB NOT NULL,\n  PRIMARY KEY(persistence_id, sequence_number)\n);\nCREATE UNIQUE INDEX journal_ordering_idx ON journal(ordering);\n\nCREATE TABLE IF NOT EXISTS snapshot (\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  created BIGINT NOT NULL,\n  snapshot BLOB NOT NULL,\n  PRIMARY KEY (persistence_id, sequence_number)\n);\n"
  },
  {
    "path": "core/src/main/resources/schema/mysql/mysql-create-schema.sql",
    "content": "CREATE TABLE IF NOT EXISTS event_journal (\n    ordering SERIAL,\n    deleted BOOLEAN DEFAULT false NOT NULL,\n    persistence_id VARCHAR(255) NOT NULL,\n    sequence_number BIGINT NOT NULL,\n    writer TEXT NOT NULL,\n    write_timestamp BIGINT NOT NULL,\n    adapter_manifest TEXT NOT NULL,\n    event_payload BLOB NOT NULL,\n    event_ser_id INTEGER NOT NULL,\n    event_ser_manifest TEXT NOT NULL,\n    meta_payload BLOB,\n    meta_ser_id INTEGER,meta_ser_manifest TEXT,\n    PRIMARY KEY(persistence_id,sequence_number)\n);\n\nCREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering);\n\nCREATE TABLE IF NOT EXISTS event_tag (\n    event_id BIGINT UNSIGNED,\n    persistence_id VARCHAR(255),\n    sequence_number BIGINT,\n    tag VARCHAR(255) NOT NULL,\n    PRIMARY KEY(persistence_id, sequence_number, tag),\n    FOREIGN KEY (persistence_id, sequence_number)\n        REFERENCES event_journal(persistence_id, sequence_number)\n        ON DELETE CASCADE\n    );\n\nCREATE TABLE IF NOT EXISTS snapshot (\n    persistence_id VARCHAR(255) NOT NULL,\n    sequence_number BIGINT NOT NULL,\n    created BIGINT NOT NULL,\n    snapshot_ser_id INTEGER NOT NULL,\n    snapshot_ser_manifest TEXT NOT NULL,\n    snapshot_payload BLOB NOT NULL,\n    meta_ser_id INTEGER,\n    meta_ser_manifest TEXT,\n    meta_payload BLOB,\n  PRIMARY KEY (persistence_id, sequence_number));\n"
  },
  {
    "path": "core/src/main/resources/schema/mysql/mysql-drop-schema-legacy.sql",
    "content": "DROP TABLE IF EXISTS journal;\nDROP TABLE IF EXISTS snapshot;\n"
  },
  {
    "path": "core/src/main/resources/schema/mysql/mysql-drop-schema.sql",
    "content": "DROP TABLE IF EXISTS event_tag;\nDROP TABLE IF EXISTS event_journal;\nDROP TABLE IF EXISTS snapshot;\n"
  },
  {
    "path": "core/src/main/resources/schema/mysql/mysql-event-tag-migration.sql",
    "content": "-- **************** first step ****************\n-- add new column\nALTER TABLE event_tag\n    ADD persistence_id  VARCHAR(255),\n    ADD sequence_number BIGINT;\n-- **************** second step ****************\n-- migrate rows\nUPDATE event_tag\nINNER JOIN event_journal ON event_tag.event_id = event_journal.ordering\nSET event_tag.persistence_id = event_journal.persistence_id,\n    event_tag.sequence_number = event_journal.sequence_number;\n-- drop old FK constraint\nSELECT CONSTRAINT_NAME\nINTO @fk_constraint_name\nFROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS\nWHERE TABLE_NAME = 'event_tag';\nSET @alter_query = CONCAT('ALTER TABLE event_tag DROP FOREIGN KEY ', @fk_constraint_name);\nPREPARE stmt FROM @alter_query;\nEXECUTE stmt;\nDEALLOCATE PREPARE stmt;\n-- drop old PK  constraint\nALTER TABLE event_tag\nDROP PRIMARY KEY;\n-- create new PK constraint for PK column.\nALTER TABLE event_tag\n    ADD CONSTRAINT\n        PRIMARY KEY (persistence_id, sequence_number, tag);\n-- create new FK constraint for PK column.\nALTER TABLE event_tag\n    ADD CONSTRAINT fk_event_journal_on_pk\n        FOREIGN KEY (persistence_id, sequence_number)\n            REFERENCES event_journal (persistence_id, sequence_number)\n            ON DELETE CASCADE;\n-- alter the event_id to nullable, so we can skip the InsertAndReturn.\nALTER TABLE event_tag\n    MODIFY COLUMN event_id BIGINT UNSIGNED NULL;"
  },
  {
    "path": "core/src/main/resources/schema/oracle/oracle-create-schema-legacy.sql",
    "content": "CREATE SEQUENCE \"ordering_seq\" START WITH 1 INCREMENT BY 1 NOMAXVALUE\n/\n\nCREATE TABLE \"journal\" (\n  \"ordering\" NUMERIC,\n  \"deleted\" char check (\"deleted\" in (0,1)) NOT NULL,\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" NUMERIC NOT NULL,\n  \"tags\" VARCHAR(255) DEFAULT NULL,\n  \"message\" BLOB NOT NULL,\n  PRIMARY KEY(\"persistence_id\", \"sequence_number\")\n)\n/\n\nCREATE UNIQUE INDEX \"journal_ordering_idx\" ON \"journal\"(\"ordering\")\n/\n\nCREATE OR REPLACE TRIGGER \"ordering_seq_trigger\"\nBEFORE INSERT ON \"journal\"\nFOR EACH ROW\nBEGIN\n  SELECT \"ordering_seq\".NEXTVAL INTO :NEW.\"ordering\" FROM DUAL;\nEND;\n/\n\nCREATE OR REPLACE PROCEDURE \"reset_sequence\"\nIS\n  l_value NUMBER;\nBEGIN\n  EXECUTE IMMEDIATE 'SELECT \"ordering_seq\".nextval FROM dual' INTO l_value;\n  EXECUTE IMMEDIATE 'ALTER SEQUENCE \"ordering_seq\" INCREMENT BY -' || l_value || ' MINVALUE 0';\n  EXECUTE IMMEDIATE 'SELECT \"ordering_seq\".nextval FROM dual' INTO l_value;\n  EXECUTE IMMEDIATE 'ALTER SEQUENCE \"ordering_seq\" INCREMENT BY 1 MINVALUE 0';\nEND;\n/\n\nCREATE TABLE \"snapshot\" (\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" NUMERIC NOT NULL,\n  \"created\" NUMERIC NOT NULL,\n  \"snapshot\" BLOB NOT NULL,\n  PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n)\n/"
  },
  {
    "path": "core/src/main/resources/schema/oracle/oracle-create-schema.sql",
    "content": "CREATE SEQUENCE EVENT_JOURNAL__ORDERING_SEQ START WITH 1 INCREMENT BY 1 NOMAXVALUE\n/\n\nCREATE TABLE EVENT_JOURNAL (\n    ORDERING NUMERIC UNIQUE,\n    DELETED CHAR(1) DEFAULT 0 NOT NULL check (DELETED in (0, 1)),\n    PERSISTENCE_ID VARCHAR(255) NOT NULL,\n    SEQUENCE_NUMBER NUMERIC NOT NULL,\n    WRITER VARCHAR(255) NOT NULL,\n    WRITE_TIMESTAMP NUMBER(19) NOT NULL,\n    ADAPTER_MANIFEST VARCHAR(255),\n    EVENT_PAYLOAD BLOB NOT NULL,\n    EVENT_SER_ID NUMBER(10) NOT NULL,\n    EVENT_SER_MANIFEST VARCHAR(255),\n    META_PAYLOAD BLOB,\n    META_SER_ID NUMBER(10),\n    META_SER_MANIFEST VARCHAR(255),\n    PRIMARY KEY(PERSISTENCE_ID, SEQUENCE_NUMBER)\n    )\n/\n\nCREATE OR REPLACE TRIGGER EVENT_JOURNAL__ORDERING_TRG before insert on EVENT_JOURNAL REFERENCING NEW AS NEW FOR EACH ROW WHEN (new.ORDERING is null) begin select EVENT_JOURNAL__ORDERING_seq.nextval into :new.ORDERING from sys.dual; end;\n/\n\nCREATE TABLE EVENT_TAG (\n    EVENT_ID NUMERIC,\n    PERSISTENCE_ID VARCHAR(255),\n    SEQUENCE_NUMBER NUMERIC,\n    TAG VARCHAR(255) NOT NULL,\n    PRIMARY KEY(PERSISTENCE_ID, SEQUENCE_NUMBER, TAG),\n    FOREIGN KEY(PERSISTENCE_ID, SEQUENCE_NUMBER) REFERENCES EVENT_JOURNAL(PERSISTENCE_ID, SEQUENCE_NUMBER)\n    ON DELETE CASCADE\n    )\n/\n\nCREATE TABLE SNAPSHOT (\n    PERSISTENCE_ID VARCHAR(255) NOT NULL,\n    SEQUENCE_NUMBER NUMERIC NOT NULL,\n    CREATED NUMERIC NOT NULL,\n    SNAPSHOT_SER_ID NUMBER(10) NOT NULL,\n    SNAPSHOT_SER_MANIFEST VARCHAR(255),\n    SNAPSHOT_PAYLOAD BLOB NOT NULL,\n    META_SER_ID NUMBER(10),\n    META_SER_MANIFEST VARCHAR(255),\n    META_PAYLOAD BLOB,\n    PRIMARY KEY(PERSISTENCE_ID,SEQUENCE_NUMBER)\n    )\n/\n\nCREATE OR REPLACE PROCEDURE \"reset_sequence\"\nIS\n  l_value NUMBER;\nBEGIN\n  EXECUTE IMMEDIATE 'SELECT EVENT_JOURNAL__ORDERING_SEQ.nextval FROM dual' INTO l_value;\n  EXECUTE IMMEDIATE 'ALTER SEQUENCE EVENT_JOURNAL__ORDERING_SEQ INCREMENT BY -' || l_value || ' MINVALUE 0';\n  EXECUTE IMMEDIATE 'SELECT EVENT_JOURNAL__ORDERING_SEQ.nextval FROM dual' INTO l_value;\n  EXECUTE IMMEDIATE 'ALTER SEQUENCE EVENT_JOURNAL__ORDERING_SEQ INCREMENT BY 1 MINVALUE 0';\nEND;\n/\n"
  },
  {
    "path": "core/src/main/resources/schema/oracle/oracle-drop-schema-legacy.sql",
    "content": "-- (ddl lock timeout in seconds) this allows tests which are still writing to the db to finish gracefully\nALTER SESSION SET ddl_lock_timeout = 150\n/\n\nDROP TABLE \"journal\" CASCADE CONSTRAINT\n/\n\nDROP TABLE \"snapshot\" CASCADE CONSTRAINT\n/\n\nDROP TABLE \"deleted_to\" CASCADE CONSTRAINT\n/\n\nDROP TRIGGER \"ordering_seq_trigger\"\n/\n\nDROP PROCEDURE \"reset_sequence\"\n/\n\nDROP SEQUENCE \"ordering_seq\"\n/\n"
  },
  {
    "path": "core/src/main/resources/schema/oracle/oracle-drop-schema.sql",
    "content": "ALTER SESSION SET ddl_lock_timeout = 15\n/\n\nDROP TABLE EVENT_TAG CASCADE CONSTRAINT\n/\n\nDROP TABLE EVENT_JOURNAL CASCADE CONSTRAINT\n/\n\nDROP TABLE SNAPSHOT CASCADE CONSTRAINT\n/\n\nDROP TABLE SNAPSHOT CASCADE CONSTRAINT\n/\n\nDROP SEQUENCE EVENT_JOURNAL__ORDERING_SEQ\n/\n\nDROP TRIGGER EVENT_JOURNAL__ORDERING_TRG\n/\n"
  },
  {
    "path": "core/src/main/resources/schema/oracle/oracle-event-tag-migration.sql",
    "content": "-- **************** first step ****************\n-- add new column\nALTER TABLE EVENT_TAG\n    ADD (PERSISTENCE_ID VARCHAR2(255),\n         SEQUENCE_NUMBER NUMERIC);\n-- **************** second step ****************\n-- migrate rows\nUPDATE EVENT_TAG\nSET PERSISTENCE_ID  = (SELECT PERSISTENCE_ID\n                       FROM EVENT_JOURNAL\n                       WHERE EVENT_TAG.EVENT_ID = EVENT_JOURNAL.ORDERING),\n    SEQUENCE_NUMBER = (SELECT SEQUENCE_NUMBER\n                       FROM EVENT_JOURNAL\n                       WHERE EVENT_TAG.EVENT_ID = EVENT_JOURNAL.ORDERING)\n-- drop old FK constraint\nDECLARE\nv_constraint_name VARCHAR2(255);\nBEGIN\nSELECT CONSTRAINT_NAME\nINTO v_constraint_name\nFROM USER_CONSTRAINTS\nWHERE TABLE_NAME = 'EVENT_TAG'\n  AND CONSTRAINT_TYPE = 'R';\n\nIF v_constraint_name IS NOT NULL THEN\n        EXECUTE IMMEDIATE 'ALTER TABLE EVENT_TAG DROP CONSTRAINT ' || v_constraint_name;\nEND IF;\n\nCOMMIT;\nEXCEPTION\n    WHEN OTHERS THEN\n        ROLLBACK;\n        RAISE;\nEND;\n/\n\n-- drop old PK  constraint\nALTER TABLE EVENT_TAG\nDROP PRIMARY KEY;\n-- create new PK constraint for PK column.\nALTER TABLE EVENT_TAG\n    ADD CONSTRAINT \"pk_event_tag\"\n        PRIMARY KEY (PERSISTENCE_ID, SEQUENCE_NUMBER, TAG);\n-- create new FK constraint for PK column.\nALTER TABLE EVENT_TAG\n    ADD CONSTRAINT fk_EVENT_JOURNAL_on_pk\n        FOREIGN KEY (PERSISTENCE_ID, SEQUENCE_NUMBER)\n            REFERENCES EVENT_JOURNAL (PERSISTENCE_ID, SEQUENCE_NUMBER)\n            ON DELETE CASCADE;\n-- alter the EVENT_ID to nullable, so we can skip the InsertAndReturn.\nALTER TABLE EVENT_TAG\n    MODIFY EVENT_ID NULL;"
  },
  {
    "path": "core/src/main/resources/schema/postgres/postgres-create-schema-legacy.sql",
    "content": "CREATE TABLE IF NOT EXISTS public.journal (\n  ordering BIGSERIAL,\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  deleted BOOLEAN DEFAULT FALSE NOT NULL,\n  tags VARCHAR(255) DEFAULT NULL,\n  message BYTEA NOT NULL,\n  PRIMARY KEY(persistence_id, sequence_number)\n);\nCREATE UNIQUE INDEX IF NOT EXISTS journal_ordering_idx ON public.journal(ordering);\n\nCREATE TABLE IF NOT EXISTS public.snapshot (\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  created BIGINT NOT NULL,\n  snapshot BYTEA NOT NULL,\n  PRIMARY KEY(persistence_id, sequence_number)\n);\n\nCREATE TABLE IF NOT EXISTS public.durable_state (\n    global_offset BIGSERIAL,\n    persistence_id VARCHAR(255) NOT NULL,\n    revision BIGINT NOT NULL,\n    state_payload BYTEA NOT NULL,\n    state_serial_id INTEGER NOT NULL,\n    state_serial_manifest VARCHAR(255),\n    tag VARCHAR,\n    state_timestamp BIGINT NOT NULL,\n    PRIMARY KEY(persistence_id)\n    );\nCREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag);\nCREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_state (global_offset);\n"
  },
  {
    "path": "core/src/main/resources/schema/postgres/postgres-create-schema.sql",
    "content": "CREATE TABLE IF NOT EXISTS public.event_journal (\n  ordering BIGSERIAL,\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  deleted BOOLEAN DEFAULT FALSE NOT NULL,\n\n  writer VARCHAR(255) NOT NULL,\n  write_timestamp BIGINT,\n  adapter_manifest VARCHAR(255),\n\n  event_ser_id INTEGER NOT NULL,\n  event_ser_manifest VARCHAR(255) NOT NULL,\n  event_payload BYTEA NOT NULL,\n\n  meta_ser_id INTEGER,\n  meta_ser_manifest VARCHAR(255),\n  meta_payload BYTEA,\n\n  PRIMARY KEY(persistence_id, sequence_number)\n);\n\nCREATE UNIQUE INDEX event_journal_ordering_idx ON public.event_journal(ordering);\n\nCREATE TABLE IF NOT EXISTS public.event_tag(\n    event_id BIGINT,\n    persistence_id VARCHAR(255),\n    sequence_number BIGINT,\n    tag VARCHAR(256),\n    PRIMARY KEY(persistence_id, sequence_number, tag),\n    CONSTRAINT fk_event_journal\n      FOREIGN KEY(persistence_id, sequence_number)\n      REFERENCES event_journal(persistence_id, sequence_number)\n      ON DELETE CASCADE\n);\n\nCREATE TABLE IF NOT EXISTS public.snapshot (\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  created BIGINT NOT NULL,\n\n  snapshot_ser_id INTEGER NOT NULL,\n  snapshot_ser_manifest VARCHAR(255) NOT NULL,\n  snapshot_payload BYTEA NOT NULL,\n\n  meta_ser_id INTEGER,\n  meta_ser_manifest VARCHAR(255),\n  meta_payload BYTEA,\n\n  PRIMARY KEY(persistence_id, sequence_number)\n);\n\nCREATE TABLE IF NOT EXISTS public.durable_state (\n    global_offset BIGSERIAL,\n    persistence_id VARCHAR(255) NOT NULL,\n    revision BIGINT NOT NULL,\n    state_payload BYTEA NOT NULL,\n    state_serial_id INTEGER NOT NULL,\n    state_serial_manifest VARCHAR(255),\n    tag VARCHAR,\n    state_timestamp BIGINT NOT NULL,\n    PRIMARY KEY(persistence_id)\n    );\nCREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag);\nCREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_state (global_offset);\n"
  },
  {
    "path": "core/src/main/resources/schema/postgres/postgres-drop-schema-legacy.sql",
    "content": "DROP TABLE IF EXISTS public.journal;\nDROP TABLE IF EXISTS public.snapshot;\nDROP TABLE IF EXISTS public.durable_state;\n"
  },
  {
    "path": "core/src/main/resources/schema/postgres/postgres-drop-schema.sql",
    "content": "DROP TABLE IF EXISTS public.event_tag;\nDROP TABLE IF EXISTS public.event_journal;\nDROP TABLE IF EXISTS public.snapshot;\nDROP TABLE IF EXISTS public.durable_state;\n\n"
  },
  {
    "path": "core/src/main/resources/schema/postgres/postgres-event-tag-migration.sql",
    "content": "-- **************** first step ****************\n-- add new column\nALTER TABLE public.event_tag\n    ADD persistence_id  VARCHAR(255),\n    ADD sequence_number BIGINT;\n-- **************** second step ****************\n-- migrate rows\nUPDATE public.event_tag\nSET persistence_id  = public.event_journal.persistence_id,\n    sequence_number = public.event_journal.sequence_number\nFROM event_journal\nWHERE public.event_tag.event_id = public.event_journal.ordering;\n-- drop old FK constraint\nALTER TABLE public.event_tag\nDROP CONSTRAINT \"fk_event_journal\";\n-- drop old PK  constraint\nALTER TABLE public.event_tag\nDROP CONSTRAINT \"event_tag_pkey\";\n-- create new PK constraint for PK column.\nALTER TABLE public.event_tag\n    ADD CONSTRAINT \"pk_event_tag\"\n        PRIMARY KEY (persistence_id, sequence_number, tag);\n-- create new FK constraint for PK column.\nALTER TABLE public.event_tag\n    ADD CONSTRAINT \"fk_event_journal_on_pk\"\n        FOREIGN KEY (persistence_id, sequence_number)\n            REFERENCES public.event_journal (persistence_id, sequence_number)\n            ON DELETE CASCADE;\n-- alter the event_id to nullable, so we can skip the InsertAndReturn.\nALTER TABLE public.event_tag\n    ALTER COLUMN event_id DROP NOT NULL;"
  },
  {
    "path": "core/src/main/resources/schema/sqlserver/sqlserver-create-schema-legacy.sql",
    "content": "\nIF  NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'\"journal\"') AND type in (N'U'))\nbegin\nCREATE TABLE journal (\n  \"ordering\" BIGINT IDENTITY(1,1) NOT NULL,\n  \"deleted\" BIT DEFAULT 0 NOT NULL,\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" NUMERIC(10,0) NOT NULL,\n  \"tags\" VARCHAR(255) NULL DEFAULT NULL,\n  \"message\" VARBINARY(max) NOT NULL,\n  PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n)\nCREATE UNIQUE INDEX journal_ordering_idx ON journal (ordering)\nend;\n\n\nIF  NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'\"snapshot\"') AND type in (N'U'))\nCREATE TABLE snapshot (\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" NUMERIC(10,0) NOT NULL,\n  \"created\" NUMERIC NOT NULL,\n  \"snapshot\" VARBINARY(max) NOT NULL,\n  PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n);\nend;\n"
  },
  {
    "path": "core/src/main/resources/schema/sqlserver/sqlserver-create-schema-varchar.sql",
    "content": "/*\nAkka Persistence JDBC versions from 5.0.0 through 5.1.0 used this schema.  The only difference from the\npost-5.0.4 schema is the use of VARCHAR instead of NVARCHAR for string fields.  It is strongly\nrecommended that new uses of Akka Persistence JDBC 5.0.0 and later use the NVARCHAR schema.  This schema is\nstill usable with post-5.0.4 versions of Akka Persistence JDBC, though will not support Unicode persistence IDs,\nmanifests, or tags.\n\nAdditionally, if using this schema, it is highly recommended to not have the SQL Server JDBC client send\nstrings as Unicode, by appending ;sendStringParametersAsUnicode=false to the JDBC connection string.\n*/\n\nCREATE TABLE event_journal(\n    \"ordering\" BIGINT IDENTITY(1,1) NOT NULL,\n    \"deleted\" BIT DEFAULT 0 NOT NULL,\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"sequence_number\" NUMERIC(10,0) NOT NULL,\n    \"writer\" VARCHAR(255) NOT NULL,\n    \"write_timestamp\" BIGINT NOT NULL,\n    \"adapter_manifest\" VARCHAR(MAX) NOT NULL,\n    \"event_payload\" VARBINARY(MAX) NOT NULL,\n    \"event_ser_id\" INTEGER NOT NULL,\n    \"event_ser_manifest\" VARCHAR(MAX) NOT NULL,\n    \"meta_payload\" VARBINARY(MAX),\n    \"meta_ser_id\" INTEGER,\n    \"meta_ser_manifest\" VARCHAR(MAX)\n    PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n);\n\nCREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering);\n\nCREATE TABLE event_tag (\n    \"event_id\" BIGINT NOT NULL,\n    \"tag\" VARCHAR(255) NOT NULL\n    PRIMARY KEY (\"event_id\",\"tag\")\n    constraint \"fk_event_journal\"\n        foreign key(\"event_id\")\n        references \"dbo\".\"event_journal\"(\"ordering\")\n        on delete CASCADE\n);\n\nCREATE TABLE \"snapshot\" (\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"sequence_number\" NUMERIC(10,0) NOT NULL,\n    \"created\" BIGINT NOT NULL,\n    \"snapshot_ser_id\" INTEGER NOT NULL,\n    \"snapshot_ser_manifest\" VARCHAR(255) NOT NULL,\n    \"snapshot_payload\" VARBINARY(MAX) NOT NULL,\n    \"meta_ser_id\" INTEGER,\n    \"meta_ser_manifest\" VARCHAR(255),\n    \"meta_payload\" VARBINARY(MAX),\n    PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n  )\n\n"
  },
  {
    "path": "core/src/main/resources/schema/sqlserver/sqlserver-create-schema.sql",
    "content": "CREATE TABLE event_journal (\n    \"ordering\" BIGINT IDENTITY(1,1) NOT NULL,\n    \"deleted\" BIT DEFAULT 0 NOT NULL,\n    \"persistence_id\" NVARCHAR(255) NOT NULL,\n    \"sequence_number\" NUMERIC(10,0) NOT NULL,\n    \"writer\" NVARCHAR(255) NOT NULL,\n    \"write_timestamp\" BIGINT NOT NULL,\n    \"adapter_manifest\" NVARCHAR(MAX) NOT NULL,\n    \"event_payload\" VARBINARY(MAX) NOT NULL,\n    \"event_ser_id\" INTEGER NOT NULL,\n    \"event_ser_manifest\" NVARCHAR(MAX) NOT NULL,\n    \"meta_payload\" VARBINARY(MAX),\n    \"meta_ser_id\" INTEGER,\n    \"meta_ser_manifest\" NVARCHAR(MAX)\n    PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n);\n\nCREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering);\n\nCREATE TABLE event_tag (\n    \"event_id\" BIGINT,\n    \"persistence_id\" NVARCHAR(255),\n    \"sequence_number\" NUMERIC(10,0),\n    \"tag\" NVARCHAR(255) NOT NULL\n    PRIMARY KEY (\"persistence_id\", \"sequence_number\",\"tag\"),\n    constraint \"fk_event_journal\"\n        foreign key(\"persistence_id\", \"sequence_number\")\n        references \"dbo\".\"event_journal\"(\"persistence_id\", \"sequence_number\")\n        on delete CASCADE\n);\n\nCREATE TABLE \"snapshot\" (\n    \"persistence_id\" NVARCHAR(255) NOT NULL,\n    \"sequence_number\" NUMERIC(10,0) NOT NULL,\n    \"created\" BIGINT NOT NULL,\n    \"snapshot_ser_id\" INTEGER NOT NULL,\n    \"snapshot_ser_manifest\" NVARCHAR(255) NOT NULL,\n    \"snapshot_payload\" VARBINARY(MAX) NOT NULL,\n    \"meta_ser_id\" INTEGER,\n    \"meta_ser_manifest\" NVARCHAR(255),\n    \"meta_payload\" VARBINARY(MAX),\n    PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n  )\n\n"
  },
  {
    "path": "core/src/main/resources/schema/sqlserver/sqlserver-drop-schema-legacy.sql",
    "content": "DROP TABLE IF EXISTS journal;\nDROP TABLE IF EXISTS snapshot;\n"
  },
  {
    "path": "core/src/main/resources/schema/sqlserver/sqlserver-drop-schema.sql",
    "content": "DROP TABLE IF EXISTS event_tag;\nDROP TABLE IF EXISTS event_journal;\nDROP TABLE IF EXISTS snapshot;\n"
  },
  {
    "path": "core/src/main/resources/schema/sqlserver/sqlserver-event-tag-migration.sql",
    "content": "-- **************** first step ****************\n-- add new column\nALTER TABLE event_tag\n    ADD persistence_id  VARCHAR(255),\n    ADD sequence_number BIGINT;\n-- **************** second step ****************\n-- migrate rows\nUPDATE event_tag\nSET persistence_id  = event_journal.persistence_id,\n    sequence_number = event_journal.sequence_number\nFROM event_journal\nWHERE event_tag.event_id = event_journal.ordering;\n-- drop old FK constraint\nDECLARE @fkConstraintName NVARCHAR(MAX);\nDECLARE @dropFKConstraintQuery NVARCHAR(MAX);\n\nSELECT @fkConstraintName = CONSTRAINT_NAME\nFROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS\nWHERE TABLE_NAME = 'event_tag'\n  AND CONSTRAINT_TYPE = 'FOREIGN KEY';\n\nIF @fkConstraintName IS NOT NULL\nBEGIN\n        SET @dropFKConstraintQuery = 'ALTER TABLE event_tag DROP CONSTRAINT ' + QUOTENAME(@fkConstraintName);\nEXEC sp_executesql @dropFKConstraintQuery;\nEND\n-- drop old PK  constraint\nDECLARE @constraintName NVARCHAR(MAX);\nDECLARE @dropConstraintQuery NVARCHAR(MAX);\n\nSELECT @constraintName = CONSTRAINT_NAME\nFROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS\nWHERE TABLE_NAME = 'event_tag'\n  AND CONSTRAINT_TYPE = 'PRIMARY KEY';\n\nIF @constraintName IS NOT NULL\nBEGIN\n        SET @dropConstraintQuery = 'ALTER TABLE event_tag DROP CONSTRAINT ' + QUOTENAME(@constraintName);\nEXEC sp_executesql @dropConstraintQuery;\nEND\n-- create new PK constraint for PK column.\nALTER TABLE event_tag\nALTER COLUMN persistence_id NVARCHAR(255) NOT NULL\nALTER TABLE event_tag\nALTER COLUMN sequence_number NUMERIC(10, 0) NOT NULL\nALTER TABLE event_tag\n    ADD CONSTRAINT \"pk_event_tag\"\n        PRIMARY KEY (persistence_id, sequence_number, TAG)\n-- create new FK constraint for PK column.\nALTER TABLE event_tag\n    ADD CONSTRAINT \"fk_event_journal_on_pk\"\n        FOREIGN KEY (persistence_id, sequence_number)\n            REFERENCES event_journal (persistence_id, sequence_number)\n            ON DELETE CASCADE\n-- alter the event_id to nullable, so we can skip the InsertAndReturn.\nALTER TABLE event_tag\nALTER COLUMN event_id BIGINT NULL"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/AkkaSerialization.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\n\nimport akka.annotation.InternalApi\nimport akka.persistence.PersistentRepr\nimport akka.persistence.jdbc.state.DurableStateTables\nimport akka.persistence.jdbc.journal.dao.JournalTables.JournalAkkaSerializationRow\nimport akka.serialization.{ Serialization, Serializers }\n\nimport scala.util.{ Success, Try }\n\n/**\n * INTERNAL API\n */\n@InternalApi\nobject AkkaSerialization {\n\n  case class AkkaSerialized(serId: Int, serManifest: String, payload: Array[Byte])\n\n  def serialize(serialization: Serialization, payload: Any): Try[AkkaSerialized] = {\n    val p2 = payload.asInstanceOf[AnyRef]\n    val serializer = serialization.findSerializerFor(p2)\n    val serManifest = Serializers.manifestFor(serializer, p2)\n    val serialized = serialization.serialize(p2)\n    serialized.map(payload => AkkaSerialized(serializer.identifier, serManifest, payload))\n  }\n\n  def fromRow(serialization: Serialization)(row: JournalAkkaSerializationRow): Try[(PersistentRepr, Long)] = {\n    serialization.deserialize(row.eventPayload, row.eventSerId, row.eventSerManifest).flatMap { payload =>\n\n      val metadata = for {\n        mPayload <- row.metaPayload\n        mSerId <- row.metaSerId\n      } yield (mPayload, mSerId)\n\n      val repr = PersistentRepr(\n        payload,\n        row.sequenceNumber,\n        row.persistenceId,\n        row.adapterManifest,\n        row.deleted,\n        sender = null,\n        writerUuid = row.writer)\n\n      // This means that failure to deserialize the meta will fail the read, I think this is the correct to do\n      for {\n        withMeta <- metadata match {\n          case None => Success(repr)\n          case Some((payload, id)) =>\n            serialization.deserialize(payload, id, row.metaSerManifest.getOrElse(\"\")).map { meta =>\n              repr.withMetadata(meta)\n            }\n        }\n      } yield (withMeta.withTimestamp(row.writeTimestamp), row.ordering)\n    }\n  }\n\n  def fromDurableStateRow(serialization: Serialization)(row: DurableStateTables.DurableStateRow): Try[AnyRef] = {\n    serialization.deserialize(row.statePayload, row.stateSerId, row.stateSerManifest.getOrElse(\"\"))\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/JournalRow.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\n\nfinal case class JournalRow(\n    ordering: Long,\n    deleted: Boolean,\n    persistenceId: String,\n    sequenceNumber: Long,\n    message: Array[Byte],\n    tags: Option[String] = None)\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/cleanup/javadsl/EventSourcedCleanup.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.cleanup.javadsl\n\nimport java.util.concurrent.CompletionStage\nimport scala.jdk.FutureConverters._\n\nimport akka.Done\nimport akka.actor.ClassicActorSystemProvider\nimport akka.annotation.ApiMayChange\nimport akka.persistence.jdbc.cleanup.scaladsl\n\n/**\n * Java API: Tool for deleting events and/or snapshots for a `persistenceId` without using persistent actors.\n *\n * When running an operation with `EventSourcedCleanup` that deletes all events for a persistence id, the actor with\n * that persistence id must not be running! If the actor is restarted it would in that case be recovered to the wrong\n * state since the stored events have been deleted. Delete events before snapshot can still be used while the actor is\n * running.\n *\n * If `resetSequenceNumber` is `true` then the creating entity with the same `persistenceId` will start from 0.\n * Otherwise it will continue from the latest highest used sequence number.\n *\n * WARNING: reusing the same `persistenceId` after resetting the sequence number should be avoided, since it might be\n * confusing to reuse the same sequence number for new events.\n */\n@ApiMayChange\nfinal class EventSourcedCleanup private (delegate: scaladsl.EventSourcedCleanup) {\n\n  def this(systemProvider: ClassicActorSystemProvider, journalConfigPath: String, snapshotConfigPath: String) =\n    this(new scaladsl.EventSourcedCleanup(systemProvider, journalConfigPath, snapshotConfigPath))\n\n  def this(systemProvider: ClassicActorSystemProvider) =\n    this(systemProvider, \"jdbc-journal\", \"jdbc-snapshot-store\")\n\n  /**\n   * Delete all events related to one single `persistenceId`. Snapshots are not deleted.\n   */\n  def deleteAllEvents(persistenceId: String, resetSequenceNumber: Boolean): CompletionStage[Done] =\n    delegate.deleteAllEvents(persistenceId, resetSequenceNumber).asJava\n\n  /**\n   * Delete snapshots related to one single `persistenceId`. Events are not deleted.\n   */\n  def deleteSnapshot(persistenceId: String): CompletionStage[Done] =\n    delegate.deleteSnapshot(persistenceId).asJava\n\n  /**\n   * Delete everything related to one single `persistenceId`. All events and snapshots are deleted.\n   */\n  def deleteAll(persistenceId: String, resetSequenceNumber: Boolean): CompletionStage[Done] =\n    delegate.deleteAll(persistenceId, resetSequenceNumber).asJava\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/cleanup/scaladsl/EventSourcedCleanup.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.cleanup.scaladsl\n\nimport scala.concurrent.{ ExecutionContext, Future }\nimport akka.Done\nimport akka.actor.{ ActorSystem, ClassicActorSystemProvider }\nimport akka.annotation.ApiMayChange\nimport akka.persistence.jdbc.config.{ JournalConfig, SnapshotConfig }\nimport akka.persistence.jdbc.db.SlickExtension\nimport akka.persistence.jdbc.journal.dao.JournalDaoInstantiation\nimport akka.persistence.jdbc.snapshot.dao.SnapshotDaoInstantiation\nimport akka.stream.{ Materializer, SystemMaterializer }\n\n/**\n * Scala API: Tool for deleting events and/or snapshots for a `persistenceId` without using persistent actors.\n *\n * When running an operation with `EventSourcedCleanup` that deletes all events for a persistence id, the actor with\n * that persistence id must not be running! If the actor is restarted it would in that case be recovered to the wrong\n * state since the stored events have been deleted. Delete events before snapshot can still be used while the actor is\n * running.\n *\n * If `resetSequenceNumber` is `true` then the creating entity with the same `persistenceId` will start from 0.\n * Otherwise it will continue from the latest highest used sequence number.\n *\n * WARNING: reusing the same `persistenceId` after resetting the sequence number should be avoided, since it might be\n * confusing to reuse the same sequence number for new events.\n */\n@ApiMayChange\nfinal class EventSourcedCleanup(\n    systemProvider: ClassicActorSystemProvider,\n    journalConfigPath: String,\n    snapshotConfigPath: String) {\n\n  def this(systemProvider: ClassicActorSystemProvider) =\n    this(systemProvider, \"jdbc-journal\", \"jdbc-snapshot-store\")\n\n  private implicit val system: ActorSystem = systemProvider.classicSystem\n  private implicit val executionContext: ExecutionContext = system.dispatchers.defaultGlobalDispatcher\n  private implicit val mat: Materializer = SystemMaterializer(system).materializer\n  private val slick = SlickExtension(system)\n\n  private val journalConfig = system.settings.config.getConfig(journalConfigPath)\n  private val journalDao =\n    JournalDaoInstantiation.journalDao(new JournalConfig(journalConfig), slick.database(journalConfig))\n\n  private val snapshotConfig = system.settings.config.getConfig(snapshotConfigPath)\n  private val snapshotDao =\n    SnapshotDaoInstantiation.snapshotDao(new SnapshotConfig(snapshotConfig), slick.database(snapshotConfig))\n\n  /**\n   * Delete all events related to one single `persistenceId`. Snapshots are not deleted.\n   */\n  def deleteAllEvents(persistenceId: String, resetSequenceNumber: Boolean): Future[Done] = {\n    journalDao.deleteEventsTo(persistenceId, toSequenceNr = Long.MaxValue, resetSequenceNumber).map(_ => Done)\n  }\n\n  /**\n   * Delete snapshots related to one single `persistenceId`. Events are not deleted.\n   */\n  def deleteSnapshot(persistenceId: String): Future[Done] = {\n    snapshotDao.deleteUpToMaxSequenceNr(persistenceId, Long.MaxValue).map(_ => Done)\n  }\n\n  /**\n   * Delete everything related to one single `persistenceId`. All events and snapshots are deleted.\n   */\n  def deleteAll(persistenceId: String, resetSequenceNumber: Boolean): Future[Done] = {\n    for {\n      _ <- deleteAllEvents(persistenceId, resetSequenceNumber)\n      _ <- deleteSnapshot(persistenceId)\n    } yield Done\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/config/AkkaPersistenceConfig.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.config\n\nimport akka.persistence.jdbc.util.ConfigOps._\nimport com.typesafe.config.Config\n\nimport scala.concurrent.duration._\n\nobject ConfigKeys {\n  val useSharedDb = \"use-shared-db\"\n}\n\nclass SlickConfiguration(config: Config) {\n  val jndiName: Option[String] = config.asStringOption(\"jndiName\")\n  val jndiDbName: Option[String] = config.asStringOption(\"jndiDbName\")\n  override def toString: String = s\"SlickConfiguration($jndiName,$jndiDbName)\"\n}\n\nclass LegacyJournalTableColumnNames(config: Config) {\n  private val cfg = config.getConfig(\"tables.legacy_journal.columnNames\")\n  val ordering: String = cfg.getString(\"ordering\")\n  val deleted: String = cfg.getString(\"deleted\")\n  val persistenceId: String = cfg.getString(\"persistenceId\")\n  val sequenceNumber: String = cfg.getString(\"sequenceNumber\")\n  val created: String = cfg.getString(\"created\")\n  val tags: String = cfg.getString(\"tags\")\n  val message: String = cfg.getString(\"message\")\n  override def toString: String = s\"JournalTableColumnNames($persistenceId,$sequenceNumber,$created,$tags,$message)\"\n}\n\nclass EventJournalTableColumnNames(config: Config) {\n  private val cfg = config.getConfig(\"tables.event_journal.columnNames\")\n  val ordering: String = cfg.getString(\"ordering\")\n  val deleted: String = cfg.getString(\"deleted\")\n  val persistenceId: String = cfg.getString(\"persistenceId\")\n  val sequenceNumber: String = cfg.getString(\"sequenceNumber\")\n  val writer: String = cfg.getString(\"writer\")\n  val writeTimestamp: String = cfg.getString(\"writeTimestamp\")\n  val adapterManifest: String = cfg.getString(\"adapterManifest\")\n\n  val eventPayload: String = cfg.getString(\"eventPayload\")\n  val eventSerId: String = cfg.getString(\"eventSerId\")\n  val eventSerManifest: String = cfg.getString(\"eventSerManifest\")\n\n  val metaPayload: String = cfg.getString(\"metaPayload\")\n  val metaSerId: String = cfg.getString(\"metaSerId\")\n  val metaSerManifest: String = cfg.getString(\"metaSerManifest\")\n}\n\nclass EventTagTableColumnNames(config: Config) {\n  private val cfg = config.getConfig(\"tables.event_tag.columnNames\")\n  val eventId: String = cfg.getString(\"eventId\") // for compatibility\n  val persistenceId: String = cfg.getString(\"persistenceId\")\n  val sequenceNumber: String = cfg.getString(\"sequenceNumber\")\n  val tag: String = cfg.getString(\"tag\")\n}\n\nclass LegacyJournalTableConfiguration(config: Config) {\n  private val cfg = config.getConfig(\"tables.legacy_journal\")\n  val tableName: String = cfg.getString(\"tableName\")\n  val schemaName: Option[String] = cfg.asStringOption(\"schemaName\")\n  val columnNames: LegacyJournalTableColumnNames = new LegacyJournalTableColumnNames(config)\n  override def toString: String = s\"LegacyJournalTableConfiguration($tableName,$schemaName,$columnNames)\"\n}\n\nclass EventJournalTableConfiguration(config: Config) {\n  private val cfg = config.getConfig(\"tables.event_journal\")\n  val tableName: String = cfg.getString(\"tableName\")\n  val schemaName: Option[String] = cfg.asStringOption(\"schemaName\")\n  val columnNames: EventJournalTableColumnNames = new EventJournalTableColumnNames(config)\n  override def toString: String = s\"EventJournalTableConfiguration($tableName,$schemaName,$columnNames)\"\n}\nclass EventTagTableConfiguration(config: Config) {\n  private val cfg = config.getConfig(\"tables.event_tag\")\n  val legacyTagKey: Boolean = cfg.getBoolean(\"legacy-tag-key\")\n  val tableName: String = cfg.getString(\"tableName\")\n  val schemaName: Option[String] = cfg.asStringOption(\"schemaName\")\n  val columnNames: EventTagTableColumnNames = new EventTagTableColumnNames(config)\n}\nclass LegacySnapshotTableColumnNames(config: Config) {\n  private val cfg = config.getConfig(\"tables.legacy_snapshot.columnNames\")\n  val persistenceId: String = cfg.getString(\"persistenceId\")\n  val sequenceNumber: String = cfg.getString(\"sequenceNumber\")\n  val created: String = cfg.getString(\"created\")\n  val snapshot: String = cfg.getString(\"snapshot\")\n  override def toString: String = s\"SnapshotTableColumnNames($persistenceId,$sequenceNumber,$created,$snapshot)\"\n}\n\nclass SnapshotTableColumnNames(config: Config) {\n  private val cfg = config.getConfig(\"tables.snapshot.columnNames\")\n  val persistenceId: String = cfg.getString(\"persistenceId\")\n  val sequenceNumber: String = cfg.getString(\"sequenceNumber\")\n  val created: String = cfg.getString(\"created\")\n\n  val snapshotPayload: String = cfg.getString(\"snapshotPayload\")\n  val snapshotSerId: String = cfg.getString(\"snapshotSerId\")\n  val snapshotSerManifest: String = cfg.getString(\"snapshotSerManifest\")\n\n  val metaPayload: String = cfg.getString(\"metaPayload\")\n  val metaSerId: String = cfg.getString(\"metaSerId\")\n  val metaSerManifest: String = cfg.getString(\"metaSerManifest\")\n}\n\nclass LegacySnapshotTableConfiguration(config: Config) {\n  private val cfg = config.getConfig(\"tables.legacy_snapshot\")\n  val tableName: String = cfg.getString(\"tableName\")\n  val schemaName: Option[String] = cfg.asStringOption(\"schemaName\")\n  val columnNames: LegacySnapshotTableColumnNames = new LegacySnapshotTableColumnNames(config)\n  override def toString: String = s\"LegacySnapshotTableConfiguration($tableName,$schemaName,$columnNames)\"\n}\n\nclass SnapshotTableConfiguration(config: Config) {\n  private val cfg = config.getConfig(\"tables.snapshot\")\n  val tableName: String = cfg.getString(\"tableName\")\n  val schemaName: Option[String] = cfg.asStringOption(\"schemaName\")\n  val columnNames: SnapshotTableColumnNames = new SnapshotTableColumnNames(config)\n  override def toString: String = s\"SnapshotTableConfiguration($tableName,$schemaName,$columnNames)\"\n}\n\nclass JournalPluginConfig(config: Config) {\n  val tagSeparator: String = config.getString(\"tagSeparator\")\n  val dao: String = config.getString(\"dao\")\n  override def toString: String = s\"JournalPluginConfig($tagSeparator,$dao)\"\n}\n\nclass BaseDaoConfig(config: Config) {\n  val bufferSize: Int = config.getInt(\"bufferSize\")\n  val batchSize: Int = config.getInt(\"batchSize\")\n  val replayBatchSize: Int = config.getInt(\"replayBatchSize\")\n  val parallelism: Int = config.getInt(\"parallelism\")\n  override def toString: String = s\"BaseDaoConfig($bufferSize,$batchSize,$parallelism)\"\n}\n\nclass ReadJournalPluginConfig(config: Config) {\n  val tagSeparator: String = config.getString(\"tagSeparator\")\n  val dao: String = config.getString(\"dao\")\n  override def toString: String = s\"ReadJournalPluginConfig($tagSeparator,$dao)\"\n}\n\nclass SnapshotPluginConfig(config: Config) {\n  val dao: String = config.getString(\"dao\")\n  override def toString: String = s\"SnapshotPluginConfig($dao)\"\n}\n\n// aggregations\n\nclass JournalConfig(config: Config) {\n  val journalTableConfiguration = new LegacyJournalTableConfiguration(config)\n  val eventJournalTableConfiguration = new EventJournalTableConfiguration(config)\n  val eventTagTableConfiguration = new EventTagTableConfiguration(config)\n  val pluginConfig = new JournalPluginConfig(config)\n  val daoConfig = new BaseDaoConfig(config)\n  val useSharedDb: Option[String] = config.asStringOption(ConfigKeys.useSharedDb)\n  override def toString: String = s\"JournalConfig($journalTableConfiguration,$pluginConfig,$useSharedDb)\"\n}\n\nclass SnapshotConfig(config: Config) {\n  val legacySnapshotTableConfiguration = new LegacySnapshotTableConfiguration(config)\n  val snapshotTableConfiguration = new SnapshotTableConfiguration(config)\n  val pluginConfig = new SnapshotPluginConfig(config)\n  val useSharedDb: Option[String] = config.asStringOption(ConfigKeys.useSharedDb)\n  override def toString: String = s\"SnapshotConfig($snapshotTableConfiguration,$pluginConfig,$useSharedDb)\"\n}\n\nobject JournalSequenceRetrievalConfig {\n  def apply(config: Config): JournalSequenceRetrievalConfig =\n    JournalSequenceRetrievalConfig(\n      batchSize = config.getInt(\"journal-sequence-retrieval.batch-size\"),\n      maxTries = config.getInt(\"journal-sequence-retrieval.max-tries\"),\n      queryDelay = config.asFiniteDuration(\"journal-sequence-retrieval.query-delay\"),\n      maxBackoffQueryDelay = config.asFiniteDuration(\"journal-sequence-retrieval.max-backoff-query-delay\"),\n      askTimeout = config.asFiniteDuration(\"journal-sequence-retrieval.ask-timeout\"))\n}\ncase class JournalSequenceRetrievalConfig(\n    batchSize: Int,\n    maxTries: Int,\n    queryDelay: FiniteDuration,\n    maxBackoffQueryDelay: FiniteDuration,\n    askTimeout: FiniteDuration)\n\nclass ReadJournalConfig(config: Config) {\n  val journalTableConfiguration = new LegacyJournalTableConfiguration(config)\n  val eventJournalTableConfiguration = new EventJournalTableConfiguration(config)\n  val eventTagTableConfiguration = new EventTagTableConfiguration(config)\n  val journalSequenceRetrievalConfiguration = JournalSequenceRetrievalConfig(config)\n  val pluginConfig = new ReadJournalPluginConfig(config)\n  val refreshInterval: FiniteDuration = config.asFiniteDuration(\"refresh-interval\")\n  val maxBufferSize: Int = config.getInt(\"max-buffer-size\")\n  val eventsByTagBufferSizesPerQuery: Long = config.getLong(\"events-by-tag-buffer-sizes-per-query\")\n  require(eventsByTagBufferSizesPerQuery >= 0, \"events-by-tag-buffer-sizes-per-query must not be negative\")\n  val addShutdownHook: Boolean = config.getBoolean(\"add-shutdown-hook\")\n\n  override def toString: String =\n    s\"ReadJournalConfig($journalTableConfiguration,$pluginConfig,$refreshInterval,$maxBufferSize,$addShutdownHook)\"\n}\n\nclass DurableStateTableColumnNames(config: Config) {\n  private val cfg = config.getConfig(\"tables.durable_state.columnNames\")\n  val globalOffset: String = cfg.getString(\"globalOffset\")\n  val persistenceId: String = cfg.getString(\"persistenceId\")\n  val revision: String = cfg.getString(\"revision\")\n  val statePayload: String = cfg.getString(\"statePayload\")\n  val stateSerId: String = cfg.getString(\"stateSerId\")\n  val stateSerManifest: String = cfg.getString(\"stateSerManifest\")\n  val tag: String = cfg.getString(\"tag\")\n  val stateTimestamp: String = cfg.getString(\"stateTimestamp\")\n}\n\nclass DurableStateTableConfiguration(config: Config) {\n  private val cfg = config.getConfig(\"tables.durable_state\")\n  val tableName: String = cfg.getString(\"tableName\")\n  val refreshInterval: FiniteDuration = config.asFiniteDuration(\"refreshInterval\")\n  val batchSize: Int = config.getInt(\"batchSize\")\n  val schemaName: Option[String] = cfg.asStringOption(\"schemaName\")\n  val columnNames: DurableStateTableColumnNames = new DurableStateTableColumnNames(config)\n  val stateSequenceConfig = DurableStateSequenceRetrievalConfig(config)\n  override def toString: String = s\"DurableStateTableConfiguration($tableName,$schemaName,$columnNames)\"\n}\n\nobject DurableStateSequenceRetrievalConfig {\n  def apply(config: Config): DurableStateSequenceRetrievalConfig =\n    DurableStateSequenceRetrievalConfig(\n      batchSize = config.getInt(\"durable-state-sequence-retrieval.batch-size\"),\n      maxTries = config.getInt(\"durable-state-sequence-retrieval.max-tries\"),\n      queryDelay = config.asFiniteDuration(\"durable-state-sequence-retrieval.query-delay\"),\n      maxBackoffQueryDelay = config.asFiniteDuration(\"durable-state-sequence-retrieval.max-backoff-query-delay\"),\n      askTimeout = config.asFiniteDuration(\"durable-state-sequence-retrieval.ask-timeout\"),\n      revisionCacheCapacity = config.getInt(\"durable-state-sequence-retrieval.revision-cache-capacity\"))\n}\ncase class DurableStateSequenceRetrievalConfig(\n    batchSize: Int,\n    maxTries: Int,\n    queryDelay: FiniteDuration,\n    maxBackoffQueryDelay: FiniteDuration,\n    askTimeout: FiniteDuration,\n    revisionCacheCapacity: Int)\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/db/SlickDatabase.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.db\n\nimport akka.actor.ActorSystem\nimport akka.annotation.InternalApi\n\nimport javax.naming.InitialContext\nimport akka.persistence.jdbc.config.SlickConfiguration\nimport com.typesafe.config.Config\nimport slick.basic.DatabaseConfig\nimport slick.jdbc.JdbcProfile\nimport slick.jdbc.JdbcBackend._\n\n/**\n * INTERNAL API\n */\n@deprecated(message = \"Internal API, will be removed in 4.0.0\", since = \"3.4.0\")\nobject SlickDriver {\n\n  /**\n   * INTERNAL API\n   */\n  @deprecated(message = \"Internal API, will be removed in 4.0.0\", since = \"3.4.0\")\n  def forDriverName(config: Config): JdbcProfile =\n    SlickDatabase.profile(config, \"slick\")\n}\n\n/**\n * INTERNAL API\n */\nobject SlickDatabase {\n\n  /**\n   * INTERNAL API\n   */\n  @deprecated(message = \"Internal API, will be removed in 4.0.0\", since = \"3.4.0\")\n  def forConfig(config: Config, slickConfiguration: SlickConfiguration): Database = {\n    database(config, slickConfiguration, \"slick.db\")\n  }\n\n  /**\n   * INTERNAL API\n   */\n  private[jdbc] def profile(config: Config, path: String): JdbcProfile =\n    DatabaseConfig.forConfig[JdbcProfile](path, config).profile\n\n  /**\n   * INTERNAL API\n   */\n  private[jdbc] def database(config: Config, slickConfiguration: SlickConfiguration, path: String): Database = {\n    slickConfiguration.jndiName\n      .map(Database.forName(_, None))\n      .orElse {\n        slickConfiguration.jndiDbName.map(new InitialContext().lookup(_).asInstanceOf[Database])\n      }\n      .getOrElse(Database.forConfig(path, config))\n  }\n\n  /**\n   * INTERNAL API\n   */\n  private[jdbc] def initializeEagerly(\n      config: Config,\n      slickConfiguration: SlickConfiguration,\n      path: String): SlickDatabase = {\n    val dbPath = if (path.isEmpty) \"db\" else s\"$path.db\"\n    EagerSlickDatabase(database(config, slickConfiguration, dbPath), profile(config, path))\n  }\n}\n\ntrait SlickDatabase {\n  def database: Database\n  def profile: JdbcProfile\n\n  /**\n   * If true, the requesting side usually a (read/write/snapshot journal)\n   * should shutdown the database when it closes. If false, it should leave\n   * the database connection pool open, since it might still be used elsewhere.\n   */\n  def allowShutdown: Boolean\n}\n\n@InternalApi\ncase class EagerSlickDatabase(database: Database, profile: JdbcProfile) extends SlickDatabase {\n  override def allowShutdown: Boolean = true\n}\n\n/**\n * A LazySlickDatabase lazily initializes a database, it also manages the shutdown of the database\n * @param config The configuration used to create the database\n */\n@InternalApi\nclass LazySlickDatabase(config: Config, system: ActorSystem) extends SlickDatabase {\n  val profile: JdbcProfile = SlickDatabase.profile(config, path = \"\")\n\n  lazy val database: Database = {\n    val db = SlickDatabase.database(config, new SlickConfiguration(config), path = \"db\")\n    system.registerOnTermination {\n      db.close()\n    }\n    db\n  }\n\n  /** This database shutdown is managed by the db holder, so users of this db do not need to bother shutting it down */\n  override def allowShutdown: Boolean = false\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/db/SlickExtension.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.db\n\nimport akka.actor.{ ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider }\nimport akka.persistence.jdbc.config.{ ConfigKeys, SlickConfiguration }\nimport akka.persistence.jdbc.util.ConfigOps._\nimport com.typesafe.config.{ Config, ConfigObject }\n\nimport scala.jdk.CollectionConverters._\nimport scala.util.{ Failure, Success }\n\nobject SlickExtension extends ExtensionId[SlickExtensionImpl] with ExtensionIdProvider {\n  override def lookup: SlickExtension.type = SlickExtension\n  override def createExtension(system: ExtendedActorSystem) = new SlickExtensionImpl(system)\n}\n\nclass SlickExtensionImpl(system: ExtendedActorSystem) extends Extension {\n\n  private val dbProvider: SlickDatabaseProvider = {\n    val fqcn = system.settings.config.getString(\"akka-persistence-jdbc.database-provider-fqcn\")\n    val args = List(classOf[ActorSystem] -> system)\n    system.dynamicAccess.createInstanceFor[SlickDatabaseProvider](fqcn, args) match {\n      case Success(result) => result\n      case Failure(t)      => throw new RuntimeException(\"Failed to create SlickDatabaseProvider\", t)\n    }\n  }\n\n  def database(config: Config): SlickDatabase = dbProvider.database(config)\n}\n\n/**\n * User overridable database provider.\n * Since this provider is called from an akka extension it must be thread safe!\n *\n * A SlickDatabaseProvider is loaded using reflection,\n * The instance is created using the following:\n * - The fully qualified class name as configured in `jdbc-journal.database-provider-fqcn`.\n * - The constructor with one argument of type [[akka.actor.ActorSystem]] is used to create the instance.\n *   Therefore the class must have such a constructor.\n */\ntrait SlickDatabaseProvider {\n\n  /**\n   * Create or retrieve the database\n   * @param config The configuration which may be used to create the database. If the database is shared\n   *               then the SlickDatabaseProvider implementation may choose to ignore this parameter.\n   */\n  def database(config: Config): SlickDatabase\n}\n\nclass DefaultSlickDatabaseProvider(system: ActorSystem) extends SlickDatabaseProvider {\n  val sharedDatabases: Map[String, LazySlickDatabase] = system.settings.config\n    .getObject(\"akka-persistence-jdbc.shared-databases\")\n    .asScala\n    .flatMap {\n      case (key, confObj: ConfigObject) =>\n        val conf = confObj.toConfig\n        if (conf.hasPath(\"profile\")) {\n          // Only create the LazySlickDatabase if a profile has actually been configured, this ensures that the example in the reference conf is ignored\n          List(key -> new LazySlickDatabase(conf, system))\n        } else Nil\n      case (key, notAnObject) =>\n        throw new RuntimeException(\n          s\"\"\"Expected \"akka-persistence-jdbc.shared-databases.$key\" to be a config ConfigObject, but got ${notAnObject\n            .valueType()} (${notAnObject.getClass})\"\"\")\n    }\n    .toMap\n\n  private def getSharedDbOrThrow(sharedDbName: String): LazySlickDatabase =\n    sharedDatabases.getOrElse(\n      sharedDbName,\n      throw new RuntimeException(\n        s\"No shared database is configured under akka-persistence-jdbc.shared-databases.$sharedDbName\"))\n\n  def database(config: Config): SlickDatabase = {\n    config.asStringOption(ConfigKeys.useSharedDb) match {\n      case None => SlickDatabase.initializeEagerly(config, new SlickConfiguration(config.getConfig(\"slick\")), \"slick\")\n      case Some(sharedDbName) =>\n        getSharedDbOrThrow(sharedDbName)\n    }\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/JdbcAsyncWriteJournal.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal\n\nimport java.util.{ HashMap => JHMap, Map => JMap }\nimport akka.Done\nimport akka.actor.ActorSystem\nimport akka.persistence.jdbc.config.JournalConfig\nimport akka.persistence.jdbc.journal.JdbcAsyncWriteJournal.{ InPlaceUpdateEvent, WriteFinished }\nimport akka.persistence.jdbc.journal.dao.{ JournalDao, JournalDaoInstantiation, JournalDaoWithUpdates }\nimport akka.persistence.jdbc.db.{ SlickDatabase, SlickExtension }\nimport akka.persistence.journal.AsyncWriteJournal\nimport akka.persistence.{ AtomicWrite, PersistentRepr }\nimport akka.stream.{ Materializer, SystemMaterializer }\nimport com.typesafe.config.Config\nimport slick.jdbc.JdbcBackend._\n\nimport scala.collection.immutable._\nimport scala.concurrent.{ ExecutionContext, Future }\nimport scala.util.{ Failure, Success, Try }\nimport akka.pattern.pipe\nimport akka.persistence.jdbc.util.PluginVersionChecker\n\nobject JdbcAsyncWriteJournal {\n  private case class WriteFinished(pid: String, f: Future[_])\n\n  /**\n   * Extra Plugin API: May be used to issue in-place updates for events.\n   * To be used only for data migrations such as \"encrypt all events\" and similar operations.\n   *\n   * The write payload may be wrapped in a [[akka.persistence.journal.Tagged]],\n   * in which case the new tags will overwrite the existing tags of the event.\n   */\n  final case class InPlaceUpdateEvent(persistenceId: String, seqNr: Long, write: AnyRef)\n}\n\nclass JdbcAsyncWriteJournal(config: Config) extends AsyncWriteJournal {\n\n  implicit val ec: ExecutionContext = context.dispatcher\n  implicit val system: ActorSystem = context.system\n  implicit val mat: Materializer = SystemMaterializer(system).materializer\n  val journalConfig = new JournalConfig(config)\n\n  PluginVersionChecker.check()\n\n  val slickDb: SlickDatabase = SlickExtension(system).database(config)\n  def db: Database = slickDb.database\n  val journalDao: JournalDao = JournalDaoInstantiation.journalDao(journalConfig, slickDb)\n\n  // only accessed if we need to perform Updates -- which is very rarely\n  def journalDaoWithUpdates: JournalDaoWithUpdates =\n    journalDao match {\n      case upgraded: JournalDaoWithUpdates => upgraded\n      case _ =>\n        throw new IllegalStateException(s\"The ${journalDao.getClass} does NOT implement [JournalDaoWithUpdates], \" +\n        s\"which is required to perform updates of events! Please configure a valid update capable DAO (e.g. the default [ByteArrayJournalDao].\")\n    }\n\n  // readHighestSequence must be performed after pending write for a persistenceId\n  // when the persistent actor is restarted.\n  private val writeInProgress: JMap[String, Future[_]] = new JHMap\n\n  override def asyncWriteMessages(messages: Seq[AtomicWrite]): Future[Seq[Try[Unit]]] = {\n    // add timestamp to all payloads in all AtomicWrite messages\n    val now = System.currentTimeMillis()\n    val timedMessages =\n      messages.map { atomWrt =>\n        atomWrt.copy(payload = atomWrt.payload.map(pr => pr.withTimestamp(now)))\n      }\n\n    val future = journalDao.asyncWriteMessages(timedMessages)\n    val persistenceId = timedMessages.head.persistenceId\n    writeInProgress.put(persistenceId, future)\n    future.onComplete(_ => self ! WriteFinished(persistenceId, future))\n    future\n  }\n\n  override def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] =\n    journalDao.delete(persistenceId, toSequenceNr)\n\n  override def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = {\n    def fetchHighestSeqNr() = journalDao.highestSequenceNr(persistenceId, fromSequenceNr)\n    writeInProgress.get(persistenceId) match {\n      case null                      => fetchHighestSeqNr()\n      case f: Future[Any @unchecked] =>\n        // we must fetch the highest sequence number after the previous write has completed\n        // If the previous write failed then we can ignore this\n        f.recover { case _ => () }.flatMap(_ => fetchHighestSeqNr())\n    }\n  }\n\n  private def asyncUpdateEvent(persistenceId: String, sequenceNr: Long, message: AnyRef): Future[Done] = {\n    journalDaoWithUpdates.update(persistenceId, sequenceNr, message)\n  }\n\n  override def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(\n      recoveryCallback: (PersistentRepr) => Unit): Future[Unit] =\n    journalDao\n      .messagesWithBatch(persistenceId, fromSequenceNr, toSequenceNr, journalConfig.daoConfig.replayBatchSize, None)\n      .take(max)\n      .runForeach {\n        case Success((repr, _)) =>\n          recoveryCallback(repr)\n        case Failure(ex) => throw ex\n      }\n      .map(_ => ())\n\n  override def postStop(): Unit = {\n    if (slickDb.allowShutdown) {\n      // Since a (new) db is created when this actor (re)starts, we must close it when the actor stops\n      db.close()\n    }\n    super.postStop()\n  }\n\n  override def receivePluginInternal: Receive = {\n    case WriteFinished(persistenceId, future) =>\n      writeInProgress.remove(persistenceId, future)\n    case InPlaceUpdateEvent(pid, seq, write) =>\n      asyncUpdateEvent(pid, seq, write).pipeTo(sender())\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/BaseDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport akka.persistence.jdbc.config.BaseDaoConfig\nimport akka.stream.scaladsl.{ Keep, Sink, Source, SourceQueueWithComplete }\nimport akka.stream.{ Materializer, OverflowStrategy, QueueOfferResult }\n\nimport scala.collection.immutable.{ Seq, Vector }\nimport scala.concurrent.{ ExecutionContext, Future, Promise }\n\n// Shared with the legacy DAO\nabstract class BaseDao[T] {\n  implicit val mat: Materializer\n  implicit val ec: ExecutionContext\n\n  def baseDaoConfig: BaseDaoConfig\n\n  val writeQueue: SourceQueueWithComplete[(Promise[Unit], Seq[T])] = Source\n    .queue[(Promise[Unit], Seq[T])](baseDaoConfig.bufferSize, OverflowStrategy.dropNew)\n    .batchWeighted[(Seq[Promise[Unit]], Seq[T])](baseDaoConfig.batchSize, _._2.size, tup => Vector(tup._1) -> tup._2) {\n      case ((promises, rows), (newPromise, newRows)) => (promises :+ newPromise) -> (rows ++ newRows)\n    }\n    .mapAsync(baseDaoConfig.parallelism) { case (promises, rows) =>\n      writeJournalRows(rows).map(unit => promises.foreach(_.success(unit))).recover { case t =>\n        promises.foreach(_.failure(t))\n      }\n    }\n    .toMat(Sink.ignore)(Keep.left)\n    .run()\n\n  def writeJournalRows(xs: Seq[T]): Future[Unit]\n\n  def queueWriteJournalRows(xs: Seq[T]): Future[Unit] = {\n    val promise = Promise[Unit]()\n    writeQueue.offer(promise -> xs).flatMap {\n      case QueueOfferResult.Enqueued =>\n        promise.future\n      case QueueOfferResult.Failure(t) =>\n        Future.failed(new Exception(\"Failed to write journal row batch\", t))\n      case QueueOfferResult.Dropped =>\n        Future.failed(new Exception(\n          s\"Failed to enqueue journal row batch write, the queue buffer was full (${baseDaoConfig.bufferSize} elements) please check the jdbc-journal.bufferSize setting\"))\n      case QueueOfferResult.QueueClosed =>\n        Future.failed(new Exception(\"Failed to enqueue journal row batch write, the queue was closed\"))\n    }\n  }\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/BaseJournalDaoWithReadMessages.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport akka.NotUsed\nimport akka.actor.Scheduler\nimport akka.persistence.PersistentRepr\nimport akka.persistence.jdbc.journal.dao.FlowControl.{ Continue, ContinueDelayed, Stop }\nimport akka.stream.Materializer\nimport akka.stream.scaladsl.{ Sink, Source }\n\nimport scala.concurrent.{ ExecutionContext, Future }\nimport scala.concurrent.duration.FiniteDuration\nimport scala.util.{ Failure, Success, Try }\n\ntrait BaseJournalDaoWithReadMessages extends JournalDaoWithReadMessages {\n\n  implicit val ec: ExecutionContext\n  implicit val mat: Materializer\n\n  override def messagesWithBatch(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long,\n      batchSize: Int,\n      refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[Try[(PersistentRepr, Long)], NotUsed] = {\n\n    Source\n      .unfoldAsync[(Long, FlowControl), Seq[Try[(PersistentRepr, Long)]]]((Math.max(1, fromSequenceNr), Continue)) {\n        case (from, control) =>\n          def retrieveNextBatch(): Future[Option[((Long, FlowControl), Seq[Try[(PersistentRepr, Long)]])]] = {\n            for {\n              xs <- messages(persistenceId, from, toSequenceNr, batchSize).runWith(Sink.seq)\n            } yield {\n              val hasMoreEvents = xs.size == batchSize\n              // Events are ordered by sequence number, therefore the last one is the largest)\n              val lastSeqNrInBatch: Option[Long] = xs.lastOption match {\n                case Some(Success((repr, _))) => Some(repr.sequenceNr)\n                case Some(Failure(e))         => throw e // fail the returned Future\n                case None                     => None\n              }\n              val hasLastEvent = lastSeqNrInBatch.exists(_ >= toSequenceNr)\n              val nextControl: FlowControl =\n                if (hasLastEvent || from > toSequenceNr) Stop\n                else if (hasMoreEvents) Continue\n                else if (refreshInterval.isEmpty) Stop\n                else ContinueDelayed\n\n              val nextFrom: Long = lastSeqNrInBatch match {\n                // Continue querying from the last sequence number (the events are ordered)\n                case Some(lastSeqNr) => lastSeqNr + 1\n                case None            => from\n              }\n              Some(((nextFrom, nextControl), xs))\n            }\n          }\n\n          control match {\n            case Stop     => Future.successful(None)\n            case Continue => retrieveNextBatch()\n            case ContinueDelayed =>\n              val (delay, scheduler) = refreshInterval.get\n              akka.pattern.after(delay, scheduler)(retrieveNextBatch())\n          }\n      }\n      .mapConcat(identity)\n  }\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/DefaultJournalDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport scala.collection.immutable\nimport scala.collection.immutable.Nil\nimport scala.collection.immutable.Seq\nimport scala.concurrent.ExecutionContext\nimport scala.concurrent.Future\nimport scala.util.Try\n\nimport akka.NotUsed\nimport akka.persistence.jdbc.AkkaSerialization\nimport akka.persistence.jdbc.config.BaseDaoConfig\nimport akka.persistence.jdbc.config.JournalConfig\nimport akka.persistence.jdbc.journal.dao.JournalTables.JournalAkkaSerializationRow\nimport akka.persistence.journal.Tagged\nimport akka.persistence.AtomicWrite\nimport akka.persistence.PersistentRepr\nimport akka.serialization.Serialization\nimport akka.stream.Materializer\nimport akka.stream.scaladsl.Source\nimport slick.jdbc.JdbcBackend.Database\nimport slick.jdbc.JdbcProfile\n\n/**\n * A [[JournalDao]] that uses Akka serialization to serialize the payload and store\n * the manifest and serializer id used.\n */\nclass DefaultJournalDao(\n    val db: Database,\n    val profile: JdbcProfile,\n    val journalConfig: JournalConfig,\n    serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer)\n    extends BaseDao[(JournalAkkaSerializationRow, Set[String])]\n    with BaseJournalDaoWithReadMessages\n    with JournalDao\n    with H2Compat {\n\n  import profile.api._\n\n  override def baseDaoConfig: BaseDaoConfig = journalConfig.daoConfig\n\n  override def writeJournalRows(xs: immutable.Seq[(JournalAkkaSerializationRow, Set[String])]): Future[Unit] = {\n    db.run(queries.writeJournalRows(xs).transactionally).map(_ => ())(ExecutionContext.parasitic)\n  }\n\n  val queries =\n    new JournalQueries(profile, journalConfig.eventJournalTableConfiguration, journalConfig.eventTagTableConfiguration)\n\n  override def deleteEventsTo(persistenceId: String, toSequenceNr: Long, resetSequenceNumber: Boolean): Future[Unit] = {\n\n    // note: the passed toSequenceNr will be Long.MaxValue when doing a 'full' journal clean-up\n    // see JournalSpec's test: 'not reset highestSequenceNr after journal cleanup'\n    val actions: DBIOAction[Unit, NoStream, Effect.Write with Effect.Read] = {\n      // If we're resetting the sequence number, no need to determine the highest sequence number.\n      if (resetSequenceNumber) {\n        queries.delete(persistenceId, toSequenceNr).map(_ => ())\n      } else {\n        highestSequenceNrAction(persistenceId)\n          .flatMap {\n            // are we trying to delete the highest or even higher seqNr ?\n            case highestSeqNr if highestSeqNr <= toSequenceNr =>\n              // if so, we delete up to the before last and\n              // mark the last as logically deleted preserving highestSeqNr\n              queries\n                .delete(persistenceId, highestSeqNr - 1)\n                .flatMap(_ => queries.markAsDeleted(persistenceId, highestSeqNr))\n            case _ =>\n              // if not, we delete up to the requested seqNr\n              queries.delete(persistenceId, toSequenceNr)\n          }\n          .map(_ => ())\n      }\n    }\n\n    db.run(actions.transactionally)\n  }\n\n  override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] =\n    db.run(highestSequenceNrAction(persistenceId))\n\n  private def highestSequenceNrAction(persistenceId: String): DBIOAction[Long, NoStream, Effect.Read] =\n    queries.highestSequenceNrForPersistenceId(persistenceId).result.map(_.getOrElse(0))\n\n  private def highestMarkedSequenceNr(persistenceId: String) =\n    queries.highestMarkedSequenceNrForPersistenceId(persistenceId).result\n\n  override def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = {\n\n    def serializeAtomicWrite(aw: AtomicWrite): Try[Seq[(JournalAkkaSerializationRow, Set[String])]] = {\n      Try(aw.payload.map(serialize))\n    }\n\n    def serialize(pr: PersistentRepr): (JournalAkkaSerializationRow, Set[String]) = {\n\n      val (updatedPr, tags) = pr.payload match {\n        case Tagged(payload, tags) => (pr.withPayload(payload), tags)\n        case _                     => (pr, Set.empty[String])\n      }\n\n      val serializedPayload = AkkaSerialization.serialize(serialization, updatedPr.payload).get\n      val serializedMetadata = updatedPr.metadata.flatMap(m => AkkaSerialization.serialize(serialization, m).toOption)\n      val row = JournalAkkaSerializationRow(\n        Long.MinValue,\n        updatedPr.deleted,\n        updatedPr.persistenceId,\n        updatedPr.sequenceNr,\n        updatedPr.writerUuid,\n        updatedPr.timestamp,\n        updatedPr.manifest,\n        serializedPayload.payload,\n        serializedPayload.serId,\n        serializedPayload.serManifest,\n        serializedMetadata.map(_.payload),\n        serializedMetadata.map(_.serId),\n        serializedMetadata.map(_.serManifest))\n\n      (row, tags)\n    }\n\n    val serializedTries = messages.map(serializeAtomicWrite)\n\n    val rowsToWrite: Seq[(JournalAkkaSerializationRow, Set[String])] = for {\n      serializeTry <- serializedTries\n      row <- serializeTry.getOrElse(Seq.empty)\n    } yield row\n\n    def resultWhenWriteComplete =\n      if (serializedTries.forall(_.isSuccess)) Nil else serializedTries.map(_.map(_ => ()))\n\n    queueWriteJournalRows(rowsToWrite).map(_ => resultWhenWriteComplete)\n  }\n\n  override def messages(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long,\n      max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = {\n    Source\n      .fromPublisher(\n        db.stream(\n          queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result))\n      .map(AkkaSerialization.fromRow(serialization)(_))\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/FlowControl.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nprivate[jdbc] sealed trait FlowControl\n\nprivate[jdbc] object FlowControl {\n\n  /** Keep querying - used when we are sure that there is more events to fetch */\n  case object Continue extends FlowControl\n\n  /**\n   * Keep querying with delay - used when we have consumed all events,\n   * but want to poll for future events\n   */\n  case object ContinueDelayed extends FlowControl\n\n  /** Stop querying - used when we reach the desired offset */\n  case object Stop extends FlowControl\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/H2Compat.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport slick.jdbc.JdbcProfile\n\ntrait H2Compat {\n\n  val profile: JdbcProfile\n\n  private lazy val isH2Driver = profile match {\n    case slick.jdbc.H2Profile => true\n    case _                    => false\n  }\n\n  def correctMaxForH2Driver(max: Long): Long = {\n    if (isH2Driver) {\n      Math.min(max, Int.MaxValue) // H2 only accepts a LIMIT clause as an Integer\n    } else {\n      max\n    }\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport akka.persistence.AtomicWrite\n\nimport java.time.Instant\nimport scala.collection.immutable.Seq\nimport scala.concurrent.Future\nimport scala.util.Try\n\ntrait JournalDao extends JournalDaoWithReadMessages {\n\n  /**\n   * Deletes all persistent messages up to toSequenceNr (inclusive) for the persistenceId\n   */\n  def delete(persistenceId: String, toSequenceNr: Long): Future[Unit] =\n    deleteEventsTo(persistenceId, toSequenceNr, false)\n\n  /**\n   * Deletes all persistent events up to toSequenceNr (inclusive) for the persistenceId\n   */\n  def deleteEventsTo(persistenceId: String, toSequenceNr: Long, resetSequenceNumber: Boolean): Future[Unit]\n\n  /**\n   * Returns the highest sequence number for the events that are stored for that `persistenceId`. When no events are\n   * found for the `persistenceId`, 0L will be the highest sequence number\n   */\n  def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long]\n\n  /**\n   * @see [[akka.persistence.journal.AsyncWriteJournal.asyncWriteMessages(messages)]]\n   */\n  def asyncWriteMessages(messages: Seq[AtomicWrite]): Future[Seq[Try[Unit]]]\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoInstantiation.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport akka.actor.{ ActorSystem, ExtendedActorSystem }\nimport akka.annotation.InternalApi\nimport akka.persistence.jdbc.config.JournalConfig\nimport akka.persistence.jdbc.db.SlickDatabase\nimport akka.serialization.{ Serialization, SerializationExtension }\nimport akka.stream.Materializer\nimport slick.jdbc.JdbcBackend.Database\nimport slick.jdbc.JdbcProfile\n\nimport scala.concurrent.ExecutionContext\nimport scala.util.{ Failure, Success }\n\n@InternalApi\nprivate[jdbc] object JournalDaoInstantiation {\n\n  def journalDao(\n      journalConfig: JournalConfig,\n      slickDb: SlickDatabase)(implicit system: ActorSystem, ec: ExecutionContext, mat: Materializer): JournalDao = {\n    val fqcn = journalConfig.pluginConfig.dao\n    val profile: JdbcProfile = slickDb.profile\n    val args = Seq(\n      (classOf[Database], slickDb.database),\n      (classOf[JdbcProfile], profile),\n      (classOf[JournalConfig], journalConfig),\n      (classOf[Serialization], SerializationExtension(system)),\n      (classOf[ExecutionContext], ec),\n      (classOf[Materializer], mat))\n    system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[JournalDao](fqcn, args) match {\n      case Success(dao)   => dao\n      case Failure(cause) => throw cause\n    }\n  }\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoWithReadMessages.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport scala.concurrent.duration.FiniteDuration\nimport scala.util.Try\n\nimport akka.NotUsed\nimport akka.actor.Scheduler\nimport akka.persistence.PersistentRepr\nimport akka.stream.scaladsl.Source\n\ntrait JournalDaoWithReadMessages {\n\n  /**\n   * Returns a Source of PersistentRepr and ordering number for a certain persistenceId.\n   * It includes the events with sequenceNr between `fromSequenceNr` (inclusive) and\n   * `toSequenceNr` (inclusive).\n   */\n  def messages(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long,\n      max: Long): Source[Try[(PersistentRepr, Long)], NotUsed]\n\n  /**\n   * Returns a Source of PersistentRepr and ordering number for a certain persistenceId.\n   * It includes the events with sequenceNr between `fromSequenceNr` (inclusive) and\n   * `toSequenceNr` (inclusive).\n   */\n  def messagesWithBatch(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long,\n      batchSize: Int,\n      refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[Try[(PersistentRepr, Long)], NotUsed]\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalDaoWithUpdates.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport akka.Done\n\nimport scala.concurrent.Future\n\n/**\n * A [[JournalDao]] with extended capabilities, such as updating payloads and tags of existing events.\n * These operations should be used sparingly, for example for migrating data from un-encrypted to encrypted formats\n */\ntrait JournalDaoWithUpdates extends JournalDao {\n\n  /**\n   * Update (!) an existing event with the passed in data.\n   */\n  def update(persistenceId: String, sequenceNr: Long, payload: AnyRef): Future[Done]\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalQueries.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport akka.persistence.jdbc.config.{ EventJournalTableConfiguration, EventTagTableConfiguration }\nimport akka.persistence.jdbc.journal.dao.JournalTables.{ JournalAkkaSerializationRow, TagRow }\nimport slick.jdbc.JdbcProfile\n\nimport scala.concurrent.ExecutionContext\n\nclass JournalQueries(\n    val profile: JdbcProfile,\n    override val journalTableCfg: EventJournalTableConfiguration,\n    override val tagTableCfg: EventTagTableConfiguration)\n    extends JournalTables {\n\n  import profile.api._\n\n  private val JournalTableC = Compiled(JournalTable)\n  private val insertAndReturn = JournalTable.returning(JournalTable.map(_.ordering))\n  private val TagTableC = Compiled(TagTable)\n\n  def writeJournalRows(xs: Seq[(JournalAkkaSerializationRow, Set[String])])(\n      implicit ec: ExecutionContext): DBIOAction[Any, NoStream, Effect.Write] = {\n    val sorted = xs.sortBy(event => event._1.sequenceNumber)\n    if (sorted.exists(_._2.nonEmpty)) {\n      // only if there are any tags\n      writeEventsAndTags(sorted)\n    } else {\n      // optimization avoid some work when not using tags\n      val events = sorted.map(_._1)\n      JournalTableC ++= events\n    }\n  }\n\n  private def writeEventsAndTags(sorted: Seq[(JournalAkkaSerializationRow, Set[String])])(\n      implicit ec: ExecutionContext): DBIOAction[Any, NoStream, Effect.Write] = {\n    val (events, _) = sorted.unzip\n    if (tagTableCfg.legacyTagKey) {\n      for {\n        ids <- insertAndReturn ++= events\n        tagInserts = ids.zip(sorted).flatMap { case (id, (e, tags)) =>\n          tags.map(tag => TagRow(Some(id), Some(e.persistenceId), Some(e.sequenceNumber), tag))\n        }\n        _ <- TagTableC ++= tagInserts\n      } yield ()\n    } else {\n      val tagInserts = sorted.map { case (e, tags) =>\n        tags.map(t => TagRow(None, Some(e.persistenceId), Some(e.sequenceNumber), t))\n      }\n      // optimization using batch insert\n      for {\n        _ <- JournalTableC ++= events\n        _ <- TagTableC ++= tagInserts.flatten\n      } yield ()\n    }\n  }\n\n  private def selectAllJournalForPersistenceIdDesc(persistenceId: Rep[String]) =\n    selectAllJournalForPersistenceId(persistenceId).sortBy(_.sequenceNumber.desc)\n\n  private def selectAllJournalForPersistenceId(persistenceId: Rep[String]) =\n    JournalTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc)\n\n  def delete(persistenceId: String, toSequenceNr: Long) = {\n    JournalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber <= toSequenceNr).delete\n  }\n\n  private[akka] def markAsDeleted(persistenceId: String, seqNr: Long) =\n    JournalTable\n      .filter(_.persistenceId === persistenceId)\n      .filter(_.sequenceNumber === seqNr)\n      .filter(_.deleted === false)\n      .map(_.deleted)\n      .update(true)\n\n  @deprecated(message = \"Intended to be internal API\", since = \"5.4.2\")\n  def markJournalMessagesAsDeleted(persistenceId: String, maxSequenceNr: Long) =\n    JournalTable\n      .filter(_.persistenceId === persistenceId)\n      .filter(_.sequenceNumber <= maxSequenceNr)\n      .filter(_.deleted === false)\n      .map(_.deleted)\n      .update(true)\n\n  private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] =\n    selectAllJournalForPersistenceId(persistenceId).take(1).map(_.sequenceNumber).max\n\n  private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] =\n    selectAllJournalForPersistenceId(persistenceId).filter(_.deleted === true).take(1).map(_.sequenceNumber).max\n\n  val highestSequenceNrForPersistenceId = Compiled(_highestSequenceNrForPersistenceId _)\n\n  val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _)\n\n  private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) =\n    selectAllJournalForPersistenceIdDesc(persistenceId).filter(_.sequenceNumber <= maxSequenceNr)\n\n  val selectByPersistenceIdAndMaxSequenceNumber = Compiled(_selectByPersistenceIdAndMaxSequenceNumber _)\n\n  private def _allPersistenceIdsDistinct: Query[Rep[String], String, Seq] =\n    JournalTable.map(_.persistenceId).distinct\n\n  val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct)\n\n  def journalRowByPersistenceIds(persistenceIds: Iterable[String]): Query[Rep[String], String, Seq] =\n    for {\n      query <- JournalTable.map(_.persistenceId)\n      if query.inSetBind(persistenceIds)\n    } yield query\n\n  private def _messagesQuery(\n      persistenceId: Rep[String],\n      fromSequenceNr: Rep[Long],\n      toSequenceNr: Rep[Long],\n      max: ConstColumn[Long]) =\n    JournalTable\n      .filter(_.persistenceId === persistenceId)\n      .filter(_.deleted === false)\n      .filter(_.sequenceNumber >= fromSequenceNr)\n      .filter(_.sequenceNumber <= toSequenceNr)\n      .sortBy(_.sequenceNumber.asc)\n      .take(max)\n\n  val messagesQuery = Compiled(_messagesQuery _)\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/JournalTables.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport akka.annotation.InternalApi\nimport akka.persistence.jdbc.config.{ EventJournalTableConfiguration, EventTagTableConfiguration }\nimport akka.persistence.jdbc.journal.dao.JournalTables.{ JournalAkkaSerializationRow, TagRow }\n\n/**\n * INTERNAL API\n */\n@InternalApi\nobject JournalTables {\n  case class JournalAkkaSerializationRow(\n      ordering: Long,\n      deleted: Boolean,\n      persistenceId: String,\n      sequenceNumber: Long,\n      writer: String,\n      writeTimestamp: Long,\n      adapterManifest: String,\n      eventPayload: Array[Byte],\n      eventSerId: Int,\n      eventSerManifest: String,\n      metaPayload: Option[Array[Byte]],\n      metaSerId: Option[Int],\n      metaSerManifest: Option[String])\n\n  case class TagRow(eventId: Option[Long], persistenceId: Option[String], sequenceNumber: Option[Long], tag: String)\n}\n\n/**\n * For the schema added in 5.0.0\n * INTERNAL API\n */\n@InternalApi\ntrait JournalTables {\n  val profile: slick.jdbc.JdbcProfile\n\n  import profile.api._\n\n  def journalTableCfg: EventJournalTableConfiguration\n  def tagTableCfg: EventTagTableConfiguration\n\n  class JournalEvents(_tableTag: Tag)\n      extends Table[JournalAkkaSerializationRow](\n        _tableTag,\n        _schemaName = journalTableCfg.schemaName,\n        _tableName = journalTableCfg.tableName) {\n    def * =\n      (\n        ordering,\n        deleted,\n        persistenceId,\n        sequenceNumber,\n        writer,\n        timestamp,\n        adapterManifest,\n        eventPayload,\n        eventSerId,\n        eventSerManifest,\n        metaPayload,\n        metaSerId,\n        metaSerManifest).<>((JournalAkkaSerializationRow.apply _).tupled, JournalAkkaSerializationRow.unapply)\n\n    val ordering: Rep[Long] = column[Long](journalTableCfg.columnNames.ordering, O.AutoInc)\n    val persistenceId: Rep[String] =\n      column[String](journalTableCfg.columnNames.persistenceId, O.Length(255, varying = true))\n    val sequenceNumber: Rep[Long] = column[Long](journalTableCfg.columnNames.sequenceNumber)\n    val deleted: Rep[Boolean] = column[Boolean](journalTableCfg.columnNames.deleted, O.Default(false))\n\n    val writer: Rep[String] = column[String](journalTableCfg.columnNames.writer)\n    val adapterManifest: Rep[String] = column[String](journalTableCfg.columnNames.adapterManifest)\n    val timestamp: Rep[Long] = column[Long](journalTableCfg.columnNames.writeTimestamp)\n\n    val eventPayload: Rep[Array[Byte]] = column[Array[Byte]](journalTableCfg.columnNames.eventPayload)\n    val eventSerId: Rep[Int] = column[Int](journalTableCfg.columnNames.eventSerId)\n    val eventSerManifest: Rep[String] = column[String](journalTableCfg.columnNames.eventSerManifest)\n\n    val metaPayload: Rep[Option[Array[Byte]]] = column[Option[Array[Byte]]](journalTableCfg.columnNames.metaPayload)\n    val metaSerId: Rep[Option[Int]] = column[Option[Int]](journalTableCfg.columnNames.metaSerId)\n    val metaSerManifest: Rep[Option[String]] = column[Option[String]](journalTableCfg.columnNames.metaSerManifest)\n\n    val pk = primaryKey(s\"${tableName}_pk\", (persistenceId, sequenceNumber))\n    val orderingIdx = index(s\"${tableName}_ordering_idx\", ordering, unique = true)\n  }\n\n  lazy val JournalTable = new TableQuery(tag => new JournalEvents(tag))\n\n  class EventTags(_tableTag: Tag) extends Table[TagRow](_tableTag, tagTableCfg.schemaName, tagTableCfg.tableName) {\n    override def * = (eventId, persistenceId, sequenceNumber, tag).<>((TagRow.apply _).tupled, TagRow.unapply)\n    // allow null value insert.\n    val eventId: Rep[Option[Long]] = column[Option[Long]](tagTableCfg.columnNames.eventId)\n    val persistenceId: Rep[Option[String]] = column[Option[String]](tagTableCfg.columnNames.persistenceId)\n    val sequenceNumber: Rep[Option[Long]] = column[Option[Long]](tagTableCfg.columnNames.sequenceNumber)\n    val tag: Rep[String] = column[String](tagTableCfg.columnNames.tag)\n\n    val pk = primaryKey(s\"${tagTableCfg.tableName}_pk\", (persistenceId, sequenceNumber, tag))\n    val journalEvent =\n      foreignKey(s\"fk_${journalTableCfg.tableName}\", (persistenceId, sequenceNumber), JournalTable)(e =>\n        (Rep.Some(e.persistenceId), Rep.Some(e.sequenceNumber)))\n  }\n\n  lazy val TagTable = new TableQuery(tag => new EventTags(tag))\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao.legacy\n\nimport akka.persistence.jdbc.config.{ BaseDaoConfig, JournalConfig }\nimport akka.persistence.jdbc.journal.dao.{ BaseDao, BaseJournalDaoWithReadMessages, H2Compat, JournalDaoWithUpdates }\nimport akka.persistence.jdbc.serialization.FlowPersistentReprSerializer\nimport akka.persistence.{ AtomicWrite, PersistentRepr }\nimport akka.serialization.Serialization\nimport akka.stream.Materializer\nimport akka.stream.scaladsl.Source\nimport akka.{ Done, NotUsed }\nimport org.slf4j.LoggerFactory\nimport slick.jdbc.JdbcBackend.Database\nimport slick.jdbc.JdbcProfile\n\nimport scala.annotation.nowarn\nimport scala.collection.immutable.{ Nil, Seq }\nimport scala.concurrent.{ ExecutionContext, Future }\nimport scala.util.{ Failure, Success, Try }\n\nclass ByteArrayJournalDao(\n    val db: Database,\n    val profile: JdbcProfile,\n    val journalConfig: JournalConfig,\n    serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer)\n    extends BaseByteArrayJournalDao {\n  val queries = new JournalQueries(profile, journalConfig.journalTableConfiguration)\n  val serializer: ByteArrayJournalSerializer =\n    new ByteArrayJournalSerializer(serialization, journalConfig.pluginConfig.tagSeparator)\n}\n\n/**\n * The DefaultJournalDao contains all the knowledge to persist and load serialized journal entries\n */\ntrait BaseByteArrayJournalDao\n    extends BaseDao[JournalRow]\n    with JournalDaoWithUpdates\n    with BaseJournalDaoWithReadMessages\n    with H2Compat {\n  val db: Database\n  val profile: JdbcProfile\n  val queries: JournalQueries\n  val journalConfig: JournalConfig\n  override def baseDaoConfig: BaseDaoConfig = journalConfig.daoConfig\n  @nowarn(\"msg=deprecated\")\n  val serializer: FlowPersistentReprSerializer[JournalRow]\n  implicit val ec: ExecutionContext\n  implicit val mat: Materializer\n\n  import profile.api._\n\n  val logger = LoggerFactory.getLogger(this.getClass)\n\n  def writeJournalRows(xs: Seq[JournalRow]): Future[Unit] = { // Write atomically without auto-commit\n    db.run(queries.writeJournalRows(xs).transactionally).map(_ => ())\n  }\n\n  /**\n   * @see [[akka.persistence.journal.AsyncWriteJournal.asyncWriteMessages(messages)]]\n   */\n  def asyncWriteMessages(messages: Seq[AtomicWrite]): Future[Seq[Try[Unit]]] = {\n    val serializedTries: Seq[Try[Seq[JournalRow]]] = serializer.serialize(messages)\n\n    // If serialization fails for some AtomicWrites, the other AtomicWrites may still be written\n    val rowsToWrite: Seq[JournalRow] = for {\n      serializeTry <- serializedTries\n      row <- serializeTry.getOrElse(Seq.empty)\n    } yield row\n\n    def resultWhenWriteComplete =\n      if (serializedTries.forall(_.isSuccess)) Nil else serializedTries.map(_.map(_ => ()))\n\n    queueWriteJournalRows(rowsToWrite).map(_ => resultWhenWriteComplete)\n  }\n\n  override def deleteEventsTo(\n      persistenceId: String,\n      maxSequenceNr: Long,\n      resetSequenceNumber: Boolean): Future[Unit] = {\n    val actions: DBIOAction[Unit, NoStream, Effect.Write with Effect.Read] = if (resetSequenceNumber) {\n      queries.delete(persistenceId, maxSequenceNr).map(_ => ())\n    } else {\n      // We should keep journal record with highest sequence number in order to be compliant\n      // with @see [[akka.persistence.journal.JournalSpec]]\n      for {\n        _ <- queries.markJournalMessagesAsDeleted(persistenceId, maxSequenceNr)\n        highestMarkedSequenceNr <- highestMarkedSequenceNr(persistenceId)\n        _ <- queries.delete(persistenceId, highestMarkedSequenceNr.getOrElse(0L) - 1)\n      } yield ()\n    }\n\n    db.run(actions.transactionally)\n  }\n\n  def update(persistenceId: String, sequenceNr: Long, payload: AnyRef): Future[Done] = {\n    val write = PersistentRepr(payload, sequenceNr, persistenceId)\n    val serializedRow = serializer.serialize(write) match {\n      case Success(t) => t\n      case Failure(cause) =>\n        throw new IllegalArgumentException(\n          s\"Failed to serialize ${write.getClass} for update of [$persistenceId] @ [$sequenceNr]\",\n          cause)\n    }\n    db.run(queries.update(persistenceId, sequenceNr, serializedRow.message).map(_ => Done))\n  }\n\n  private def highestMarkedSequenceNr(persistenceId: String) =\n    queries.highestMarkedSequenceNrForPersistenceId(persistenceId).result\n\n  override def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] =\n    for {\n      maybeHighestSeqNo <- db.run(queries.highestSequenceNrForPersistenceId(persistenceId).result)\n    } yield maybeHighestSeqNo.getOrElse(0L)\n\n  override def messages(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long,\n      max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] =\n    Source\n      .fromPublisher(\n        db.stream(\n          queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result))\n      .via(serializer.deserializeFlow)\n      .map {\n        case Success((repr, _, ordering)) => Success(repr -> ordering)\n        case Failure(e)                   => Failure(e)\n      }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalSerializer.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\npackage journal.dao.legacy\n\nimport akka.persistence.PersistentRepr\nimport akka.persistence.jdbc.serialization.FlowPersistentReprSerializer\nimport akka.serialization.Serialization\n\nimport scala.annotation.nowarn\nimport scala.collection.immutable._\nimport scala.util.Try\n\n@nowarn(\"msg=deprecated\")\nclass ByteArrayJournalSerializer(serialization: Serialization, separator: String)\n    extends FlowPersistentReprSerializer[JournalRow] {\n  override def serialize(persistentRepr: PersistentRepr, tags: Set[String]): Try[JournalRow] = {\n    serialization\n      .serialize(persistentRepr)\n      .map(\n        JournalRow(\n          Long.MinValue,\n          persistentRepr.deleted,\n          persistentRepr.persistenceId,\n          persistentRepr.sequenceNr,\n          _,\n          encodeTags(tags, separator)))\n  }\n\n  override def deserialize(journalRow: JournalRow): Try[(PersistentRepr, Set[String], Long)] = {\n    serialization\n      .deserialize(journalRow.message, classOf[PersistentRepr])\n      .map((_, decodeTags(journalRow.tags, separator), journalRow.ordering))\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/JournalQueries.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\npackage journal.dao.legacy\n\nimport akka.persistence.jdbc.config.LegacyJournalTableConfiguration\nimport slick.jdbc.JdbcProfile\n\nclass JournalQueries(val profile: JdbcProfile, override val journalTableCfg: LegacyJournalTableConfiguration)\n    extends JournalTables {\n  import profile.api._\n\n  private val JournalTableC = Compiled(JournalTable)\n\n  def writeJournalRows(xs: Seq[JournalRow]) =\n    JournalTableC ++= xs.sortBy(_.sequenceNumber)\n\n  private def selectAllJournalForPersistenceIdDesc(persistenceId: Rep[String]) =\n    selectAllJournalForPersistenceId(persistenceId).sortBy(_.sequenceNumber.desc)\n\n  private def selectAllJournalForPersistenceId(persistenceId: Rep[String]) =\n    JournalTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc)\n\n  def delete(persistenceId: String, toSequenceNr: Long) = {\n    JournalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber <= toSequenceNr).delete\n  }\n\n  /**\n   * Updates (!) a payload stored in a specific events row.\n   * Intended to be used sparingly, e.g. moving all events to their encrypted counterparts.\n   */\n  def update(persistenceId: String, seqNr: Long, replacement: Array[Byte]) = {\n    val baseQuery = JournalTable.filter(_.persistenceId === persistenceId).filter(_.sequenceNumber === seqNr)\n\n    baseQuery.map(_.message).update(replacement)\n  }\n\n  def markJournalMessagesAsDeleted(persistenceId: String, maxSequenceNr: Long) =\n    JournalTable\n      .filter(_.persistenceId === persistenceId)\n      .filter(_.sequenceNumber <= maxSequenceNr)\n      .filter(_.deleted === false)\n      .map(_.deleted)\n      .update(true)\n\n  private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] =\n    selectAllJournalForPersistenceId(persistenceId).take(1).map(_.sequenceNumber).max\n\n  private def _highestMarkedSequenceNrForPersistenceId(persistenceId: Rep[String]): Rep[Option[Long]] =\n    selectAllJournalForPersistenceId(persistenceId).filter(_.deleted === true).take(1).map(_.sequenceNumber).max\n\n  val highestSequenceNrForPersistenceId = Compiled(_highestSequenceNrForPersistenceId _)\n\n  val highestMarkedSequenceNrForPersistenceId = Compiled(_highestMarkedSequenceNrForPersistenceId _)\n\n  private def _selectByPersistenceIdAndMaxSequenceNumber(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) =\n    selectAllJournalForPersistenceIdDesc(persistenceId).filter(_.sequenceNumber <= maxSequenceNr)\n\n  val selectByPersistenceIdAndMaxSequenceNumber = Compiled(_selectByPersistenceIdAndMaxSequenceNumber _)\n\n  private def _allPersistenceIdsDistinct: Query[Rep[String], String, Seq] =\n    JournalTable.map(_.persistenceId).distinct\n\n  val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct)\n\n  def journalRowByPersistenceIds(persistenceIds: Iterable[String]): Query[Rep[String], String, Seq] =\n    for {\n      query <- JournalTable.map(_.persistenceId)\n      if query.inSetBind(persistenceIds)\n    } yield query\n\n  private def _messagesQuery(\n      persistenceId: Rep[String],\n      fromSequenceNr: Rep[Long],\n      toSequenceNr: Rep[Long],\n      max: ConstColumn[Long]) =\n    JournalTable\n      .filter(_.persistenceId === persistenceId)\n      .filter(_.deleted === false)\n      .filter(_.sequenceNumber >= fromSequenceNr)\n      .filter(_.sequenceNumber <= toSequenceNr)\n      .sortBy(_.sequenceNumber.asc)\n      .take(max)\n\n  val messagesQuery = Compiled(_messagesQuery _)\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/JournalTables.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao.legacy\n\nimport akka.persistence.jdbc.config.LegacyJournalTableConfiguration\n\ntrait JournalTables {\n  val profile: slick.jdbc.JdbcProfile\n\n  import profile.api._\n\n  def journalTableCfg: LegacyJournalTableConfiguration\n\n  class Journal(_tableTag: Tag)\n      extends Table[JournalRow](\n        _tableTag,\n        _schemaName = journalTableCfg.schemaName,\n        _tableName = journalTableCfg.tableName) {\n    def * = (ordering, deleted, persistenceId, sequenceNumber, message, tags)\n      .<>((JournalRow.apply _).tupled, JournalRow.unapply)\n\n    val ordering: Rep[Long] = column[Long](journalTableCfg.columnNames.ordering, O.AutoInc)\n    val persistenceId: Rep[String] =\n      column[String](journalTableCfg.columnNames.persistenceId, O.Length(255, varying = true))\n    val sequenceNumber: Rep[Long] = column[Long](journalTableCfg.columnNames.sequenceNumber)\n    val deleted: Rep[Boolean] = column[Boolean](journalTableCfg.columnNames.deleted, O.Default(false))\n    val tags: Rep[Option[String]] =\n      column[Option[String]](journalTableCfg.columnNames.tags, O.Length(255, varying = true))\n    val message: Rep[Array[Byte]] = column[Array[Byte]](journalTableCfg.columnNames.message)\n    val pk = primaryKey(s\"${tableName}_pk\", (persistenceId, sequenceNumber))\n    val orderingIdx = index(s\"${tableName}_ordering_idx\", ordering, unique = true)\n  }\n\n  lazy val JournalTable = new TableQuery(tag => new Journal(tag))\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/package.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\npackage object legacy {\n\n  final case class JournalRow(\n      ordering: Long,\n      deleted: Boolean,\n      persistenceId: String,\n      sequenceNumber: Long,\n      message: Array[Byte],\n      tags: Option[String] = None)\n\n  def encodeTags(tags: Set[String], separator: String): Option[String] =\n    if (tags.isEmpty) None else Option(tags.mkString(separator))\n\n  def decodeTags(tags: Option[String], separator: String): Set[String] =\n    tags.map(_.split(separator).toSet).getOrElse(Set.empty[String])\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/query/JdbcReadJournalProvider.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.actor.ExtendedActorSystem\nimport akka.persistence.query.ReadJournalProvider\nimport com.typesafe.config.Config\n\nclass JdbcReadJournalProvider(system: ExtendedActorSystem, config: Config, configPath: String)\n    extends ReadJournalProvider {\n  override def scaladslReadJournal(): scaladsl.JdbcReadJournal =\n    new scaladsl.JdbcReadJournal(config, configPath)(system)\n\n  override def javadslReadJournal(): javadsl.JdbcReadJournal = new javadsl.JdbcReadJournal(scaladslReadJournal())\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/query/JournalSequenceActor.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\npackage query\n\nimport akka.actor.{ Actor, ActorLogging, Props, Status, Timers }\nimport akka.pattern.pipe\nimport akka.persistence.jdbc.config.JournalSequenceRetrievalConfig\nimport akka.persistence.jdbc.query.dao.ReadJournalDao\nimport akka.stream.Materializer\nimport akka.stream.scaladsl.Sink\n\nimport scala.collection.immutable.NumericRange\nimport scala.concurrent.duration.FiniteDuration\n\nobject JournalSequenceActor {\n  def props(readJournalDao: ReadJournalDao, config: JournalSequenceRetrievalConfig)(\n      implicit materializer: Materializer): Props = Props(new JournalSequenceActor(readJournalDao, config))\n\n  private case object QueryOrderingIds\n  private case class NewOrderingIds(originalOffset: Long, elements: Seq[OrderingId])\n\n  private case class ScheduleAssumeMaxOrderingId(max: OrderingId)\n  private case class AssumeMaxOrderingId(max: OrderingId)\n\n  case object GetMaxOrderingId\n  case class MaxOrderingId(maxOrdering: OrderingId)\n\n  private case object QueryOrderingIdsTimerKey\n  private case object AssumeMaxOrderingIdTimerKey\n\n  private type OrderingId = Long\n\n  /**\n   * Efficient representation of missing elements using NumericRanges.\n   * It can be seen as a collection of OrderingIds\n   */\n  private case class MissingElements(elements: Seq[NumericRange[OrderingId]]) {\n    def addRange(from: OrderingId, until: OrderingId): MissingElements = {\n      val newRange = from.until(until)\n      MissingElements(elements :+ newRange)\n    }\n    def contains(id: OrderingId): Boolean = elements.exists(_.containsTyped(id))\n    def isEmpty: Boolean = elements.forall(_.isEmpty)\n  }\n  private object MissingElements {\n    def empty: MissingElements = MissingElements(Vector.empty)\n  }\n}\n\n/**\n * To support the EventsByTag query, this actor keeps track of which rows are visible in the database.\n * This is required to guarantee the EventByTag does not skip any rows in case rows with a higher (ordering) id are\n * visible in the database before rows with a lower (ordering) id.\n */\nclass JournalSequenceActor(readJournalDao: ReadJournalDao, config: JournalSequenceRetrievalConfig)(\n    implicit materializer: Materializer)\n    extends Actor\n    with ActorLogging\n    with Timers {\n  import JournalSequenceActor._\n  import context.dispatcher\n  import config.{ batchSize, maxBackoffQueryDelay, maxTries, queryDelay }\n\n  override def receive: Receive = receive(0L, Map.empty, 0)\n\n  override def preStart(): Unit = {\n    self ! QueryOrderingIds\n    readJournalDao.maxJournalSequence().mapTo[Long].onComplete {\n      case scala.util.Success(maxInDatabase) =>\n        self ! ScheduleAssumeMaxOrderingId(maxInDatabase)\n      case scala.util.Failure(t) =>\n        log.info(\"Failed to recover fast, using event-by-event recovery instead. Cause: {}\", t)\n    }\n  }\n\n  /**\n   * @param currentMaxOrdering The highest ordering value for which it is known that no missing elements exist\n   * @param missingByCounter A map with missing orderingIds. The key of the map is the count at which the missing elements\n   *                         can be assumed to be \"skipped ids\" (they are no longer assumed missing).\n   * @param moduloCounter A counter which is incremented every time a new query have been executed, modulo `maxTries`\n   * @param previousDelay The last used delay (may change in case failures occur)\n   */\n  private def receive(\n      currentMaxOrdering: OrderingId,\n      missingByCounter: Map[Int, MissingElements],\n      moduloCounter: Int,\n      previousDelay: FiniteDuration = queryDelay): Receive = {\n    case ScheduleAssumeMaxOrderingId(max) =>\n      // All elements smaller than max can be assumed missing after this delay\n      val delay = queryDelay * maxTries\n      timers.startSingleTimer(key = AssumeMaxOrderingIdTimerKey, AssumeMaxOrderingId(max), delay)\n\n    case AssumeMaxOrderingId(max) =>\n      if (currentMaxOrdering < max) {\n        context.become(receive(max, missingByCounter, moduloCounter, previousDelay))\n      }\n\n    case GetMaxOrderingId =>\n      sender() ! MaxOrderingId(currentMaxOrdering)\n\n    case QueryOrderingIds =>\n      readJournalDao\n        .journalSequence(currentMaxOrdering, batchSize)\n        .runWith(Sink.seq)\n        .map(result => NewOrderingIds(currentMaxOrdering, result))\n        .pipeTo(self)\n\n    case NewOrderingIds(originalOffset, _) if originalOffset < currentMaxOrdering =>\n      // search was done using an offset that became obsolete in the meantime\n      // therefore we start a new query\n      self ! QueryOrderingIds\n\n    case NewOrderingIds(_, elements) =>\n      findGaps(elements, currentMaxOrdering, missingByCounter, moduloCounter)\n\n    case Status.Failure(t) =>\n      val newDelay = maxBackoffQueryDelay.min(previousDelay * 2)\n      if (newDelay == maxBackoffQueryDelay) {\n        log.warning(\"Failed to query max ordering id because of {}, retrying in {}\", t, newDelay)\n      }\n      scheduleQuery(newDelay)\n      context.become(receive(currentMaxOrdering, missingByCounter, moduloCounter, newDelay))\n  }\n\n  /**\n   * This method that implements the \"find gaps\" algo. It's the meat and main purpose of this actor.\n   */\n  private def findGaps(\n      elements: Seq[OrderingId],\n      currentMaxOrdering: OrderingId,\n      missingByCounter: Map[Int, MissingElements],\n      moduloCounter: Int): Unit = {\n    // list of elements that will be considered as genuine gaps.\n    // `givenUp` is either empty or is was filled on a previous iteration\n    val givenUp = missingByCounter.getOrElse(moduloCounter, MissingElements.empty)\n\n    val (nextMax, _, missingElems) =\n      // using the ordering elements that were fetched, we verify if there are any gaps\n      elements.foldLeft[(OrderingId, OrderingId, MissingElements)](\n        (currentMaxOrdering, currentMaxOrdering, MissingElements.empty)) {\n        case ((currentMax, previousElement, missing), currentElement) =>\n          // we must decide if we move the cursor forward\n          val newMax = {\n            val maxCandidate = currentMax + 1\n            if ((currentElement - maxCandidate) < Int.MaxValue) {\n              if ((currentMax + 1).until(currentElement).forall(givenUp.contains)) {\n                // we move the cursor forward when:\n                // 1) they have been detected as missing on previous iteration, it's time now to give up\n                // 2) current + 1 == currentElement (meaning no gap). Note that `forall` on an empty range always returns true\n                currentElement\n              } else currentMax\n            } else {\n              // we can't iterate over this... assume that forall failed\n              // the AssumeMaxOrderingId will advance the currentMaxOrdering\n              currentMax\n            }\n          }\n\n          // we accumulate in newMissing the gaps we detect on each iteration\n          val newMissing =\n            if (previousElement + 1 == currentElement || newMax == currentElement) missing\n            else missing.addRange(previousElement + 1, currentElement)\n\n          (newMax, currentElement, newMissing)\n      }\n\n    val newMissingByCounter = missingByCounter + (moduloCounter -> missingElems)\n\n    // did we detect gaps in the current batch?\n    val noGapsFound = missingElems.isEmpty\n\n    // full batch means that we retrieved as much elements as the batchSize\n    // that happens when we are not yet at the end of the stream\n    val isFullBatch = elements.size == batchSize\n\n    if (noGapsFound && isFullBatch) {\n      // Many elements have been retrieved but none are missing\n      // We can query again immediately, as this allows the actor to rapidly retrieve the real max ordering\n      self ! QueryOrderingIds\n      context.become(receive(nextMax, newMissingByCounter, moduloCounter))\n    } else {\n      // either we detected gaps or we reached the end of stream (batch not full)\n      // in this case we want to keep querying but not immediately\n      scheduleQuery(queryDelay)\n      context.become(receive(nextMax, newMissingByCounter, (moduloCounter + 1) % maxTries))\n    }\n  }\n\n  def scheduleQuery(delay: FiniteDuration): Unit = {\n    timers.startSingleTimer(key = QueryOrderingIdsTimerKey, QueryOrderingIds, delay)\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/query/dao/DefaultReadJournalDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query.dao\nimport akka.NotUsed\nimport akka.persistence.PersistentRepr\nimport akka.persistence.jdbc.AkkaSerialization\nimport akka.persistence.jdbc.config.ReadJournalConfig\nimport akka.persistence.jdbc.journal.dao.{ BaseJournalDaoWithReadMessages, H2Compat }\nimport akka.serialization.Serialization\nimport akka.stream.Materializer\nimport akka.stream.scaladsl.Source\nimport slick.jdbc.JdbcBackend.Database\nimport slick.jdbc.JdbcProfile\n\nimport scala.concurrent.{ ExecutionContext, Future }\nimport scala.util.Try\n\nclass DefaultReadJournalDao(\n    val db: Database,\n    val profile: JdbcProfile,\n    val readJournalConfig: ReadJournalConfig,\n    serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer)\n    extends ReadJournalDao\n    with BaseJournalDaoWithReadMessages\n    with H2Compat {\n  import profile.api._\n\n  val queries = new ReadJournalQueries(profile, readJournalConfig)\n\n  override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] =\n    Source.fromPublisher(db.stream(queries.allPersistenceIdsDistinct(correctMaxForH2Driver(max)).result))\n\n  override def eventsByTag(\n      tag: String,\n      offset: Long,\n      maxOffset: Long,\n      max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = {\n\n    // This doesn't populate the tags. AFAICT they aren't used\n    Source\n      .fromPublisher(db.stream(queries.eventsByTag((tag, offset, maxOffset, correctMaxForH2Driver(max))).result))\n      .map(row =>\n        AkkaSerialization.fromRow(serialization)(row).map { case (repr, ordering) => (repr, Set.empty, ordering) })\n  }\n\n  override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] =\n    Source.fromPublisher(db.stream(queries.journalSequenceQuery((offset, limit)).result))\n\n  override def maxJournalSequence(): Future[Long] =\n    db.run(queries.maxJournalSequenceQuery.result)\n\n  override def messages(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long,\n      max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] =\n    Source\n      .fromPublisher(\n        db.stream(\n          queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result))\n      .map(AkkaSerialization.fromRow(serialization)(_))\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query.dao\n\nimport akka.NotUsed\nimport akka.persistence.PersistentRepr\nimport akka.persistence.jdbc.journal.dao.JournalDaoWithReadMessages\nimport akka.stream.scaladsl.Source\n\nimport scala.collection.immutable.Set\nimport scala.concurrent.Future\nimport scala.util.Try\n\ntrait ReadJournalDao extends JournalDaoWithReadMessages {\n\n  /**\n   * Returns distinct stream of persistenceIds\n   */\n  def allPersistenceIdsSource(max: Long): Source[String, NotUsed]\n\n  /**\n   * Returns a Source of deserialized data for certain tag from an offset. The result is sorted by\n   * the global ordering of the events.\n   * Each element with be a try with a PersistentRepr, set of tags, and a Long representing the global ordering of events\n   */\n  def eventsByTag(\n      tag: String,\n      offset: Long,\n      maxOffset: Long,\n      max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed]\n\n  /**\n   * @param offset Minimum value to retrieve\n   * @param limit Maximum number of values to retrieve\n   * @return A Source of journal event sequence numbers (corresponding to the Ordering column)\n   */\n  def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed]\n\n  /**\n   * @return The value of the maximum (ordering) id in the journal\n   */\n  def maxJournalSequence(): Future[Long]\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalQueries.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query.dao\n\nimport akka.persistence.jdbc.config.{ EventJournalTableConfiguration, EventTagTableConfiguration, ReadJournalConfig }\nimport akka.persistence.jdbc.journal.dao.JournalTables\nimport slick.jdbc.JdbcProfile\n\nclass ReadJournalQueries(val profile: JdbcProfile, val readJournalConfig: ReadJournalConfig) extends JournalTables {\n  override val journalTableCfg: EventJournalTableConfiguration = readJournalConfig.eventJournalTableConfiguration\n  override def tagTableCfg: EventTagTableConfiguration = readJournalConfig.eventTagTableConfiguration\n\n  import profile.api._\n\n  def journalRowByPersistenceIds(persistenceIds: Iterable[String]) =\n    for {\n      query <- JournalTable.map(_.persistenceId)\n      if query.inSetBind(persistenceIds)\n    } yield query\n\n  private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] =\n    baseTableQuery().map(_.persistenceId).distinct.take(max)\n\n  private def baseTableQuery() =\n    JournalTable.filter(_.deleted === false)\n\n  private def baseTableWithTagsQuery() = {\n    if (tagTableCfg.legacyTagKey) {\n      baseTableQuery().join(TagTable).on(_.ordering === _.eventId)\n    } else {\n      baseTableQuery()\n        .join(TagTable)\n        .on((e, t) => e.persistenceId === t.persistenceId && e.sequenceNumber === t.sequenceNumber)\n    }\n  }\n\n  val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _)\n\n  private def _messagesQuery(\n      persistenceId: Rep[String],\n      fromSequenceNr: Rep[Long],\n      toSequenceNr: Rep[Long],\n      max: ConstColumn[Long]) =\n    baseTableQuery()\n      .filter(_.persistenceId === persistenceId)\n      .filter(_.sequenceNumber >= fromSequenceNr)\n      .filter(_.sequenceNumber <= toSequenceNr)\n      .filter(!_.deleted)\n      .sortBy(_.sequenceNumber.asc)\n      .take(max)\n\n  val messagesQuery = Compiled(_messagesQuery _)\n\n  private def _eventsByTag(\n      tag: Rep[String],\n      offset: ConstColumn[Long],\n      maxOffset: ConstColumn[Long],\n      max: ConstColumn[Long]) = {\n    baseTableWithTagsQuery()\n      .filter(_._2.tag === tag)\n      .sortBy(_._1.ordering.asc)\n      .filter(row => row._1.ordering > offset && row._1.ordering <= maxOffset)\n      .take(max)\n      .map(_._1)\n  }\n\n  val eventsByTag = Compiled(_eventsByTag _)\n\n  private def _journalSequenceQuery(from: ConstColumn[Long], limit: ConstColumn[Long]) =\n    JournalTable.filter(_.ordering > from).map(_.ordering).sorted.take(limit)\n\n  val journalSequenceQuery = Compiled(_journalSequenceQuery _)\n\n  val maxJournalSequenceQuery = Compiled {\n    JournalTable.map(_.ordering).max.getOrElse(0L)\n  }\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ByteArrayReadJournalDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query.dao.legacy\n\nimport akka.NotUsed\nimport akka.persistence.PersistentRepr\nimport akka.persistence.jdbc.config.ReadJournalConfig\nimport akka.persistence.jdbc.journal.dao.{ BaseJournalDaoWithReadMessages, H2Compat }\nimport akka.persistence.jdbc.journal.dao.legacy.{ ByteArrayJournalSerializer, JournalRow }\nimport akka.persistence.jdbc.query.dao.ReadJournalDao\nimport akka.persistence.jdbc.query.dao.legacy.TagFilterFlow.perfectlyMatchTag\nimport akka.persistence.jdbc.serialization.FlowPersistentReprSerializer\nimport akka.serialization.Serialization\nimport akka.stream.Materializer\nimport akka.stream.scaladsl.{ Flow, Source }\nimport slick.jdbc.JdbcBackend._\nimport slick.jdbc.{ GetResult, JdbcProfile }\n\nimport scala.annotation.nowarn\nimport scala.collection.immutable._\nimport scala.concurrent.{ ExecutionContext, Future }\nimport scala.util.{ Failure, Success, Try }\n\ntrait BaseByteArrayReadJournalDao extends ReadJournalDao with BaseJournalDaoWithReadMessages with H2Compat {\n  def db: Database\n  val profile: JdbcProfile\n  def queries: ReadJournalQueries\n  @nowarn(\"msg=deprecated\")\n  def serializer: FlowPersistentReprSerializer[JournalRow]\n  def readJournalConfig: ReadJournalConfig\n\n  import profile.api._\n\n  override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] =\n    Source.fromPublisher(db.stream(queries.allPersistenceIdsDistinct(correctMaxForH2Driver(max)).result))\n\n  override def eventsByTag(\n      tag: String,\n      offset: Long,\n      maxOffset: Long,\n      max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = {\n\n    val publisher = db.stream(queries.eventsByTag((s\"%$tag%\", offset, maxOffset, correctMaxForH2Driver(max))).result)\n    // applies workaround for https://github.com/akka/akka-persistence-jdbc/issues/168\n    Source\n      .fromPublisher(publisher)\n      .via(perfectlyMatchTag(tag, readJournalConfig.pluginConfig.tagSeparator))\n      .via(serializer.deserializeFlow)\n  }\n\n  override def messages(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long,\n      max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = {\n    Source\n      .fromPublisher(\n        db.stream(\n          queries.messagesQuery((persistenceId, fromSequenceNr, toSequenceNr, correctMaxForH2Driver(max))).result))\n      .via(serializer.deserializeFlow)\n      .map {\n        case Success((repr, _, ordering)) => Success(repr -> ordering)\n        case Failure(e)                   => Failure(e)\n      }\n  }\n\n  override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] =\n    Source.fromPublisher(db.stream(queries.journalSequenceQuery((offset, limit)).result))\n\n  override def maxJournalSequence(): Future[Long] = {\n    db.run(queries.maxJournalSequenceQuery.result)\n  }\n}\n\nobject TagFilterFlow {\n  /*\n   * Returns a Flow that retains every event with tags that perfectly match passed tag.\n   * This is a workaround for bug https://github.com/akka/akka-persistence-jdbc/issues/168\n   */\n  private[dao] def perfectlyMatchTag(tag: String, separator: String) =\n    Flow[JournalRow].filter(_.tags.exists(tags => tags.split(separator).contains(tag)))\n}\n\ntrait OracleReadJournalDao extends ReadJournalDao {\n  val db: Database\n  val profile: JdbcProfile\n  val readJournalConfig: ReadJournalConfig\n  val queries: ReadJournalQueries\n  @nowarn(\"msg=deprecated\")\n  val serializer: FlowPersistentReprSerializer[JournalRow]\n\n  import readJournalConfig.journalTableConfiguration._\n  import columnNames._\n\n  val theTableName = schemaName.map(_ + \".\").getOrElse(\"\") + s\"\"\"\"$tableName\"\"\"\"\n\n  import profile.api._\n\n  private def isOracleDriver(profile: JdbcProfile): Boolean =\n    profile match {\n      case slick.jdbc.OracleProfile => true\n      case _                        => false\n    }\n\n  abstract override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] = {\n    if (isOracleDriver(profile)) {\n      val selectStatement =\n        sql\"\"\"SELECT DISTINCT \"#$persistenceId\" FROM #$theTableName WHERE rownum <= $max\"\"\".as[String]\n      Source.fromPublisher(db.stream(selectStatement))\n    } else {\n      super.allPersistenceIdsSource(max)\n    }\n  }\n\n  implicit val getJournalRow: GetResult[JournalRow] =\n    GetResult(r => JournalRow(r.<<, r.<<, r.<<, r.<<, r.nextBytes(), r.<<))\n\n  abstract override def eventsByTag(\n      tag: String,\n      offset: Long,\n      maxOffset: Long,\n      max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = {\n    if (isOracleDriver(profile)) {\n      val theOffset = Math.max(0, offset)\n      val theTag = s\"%$tag%\"\n      val selectStatement =\n        sql\"\"\"\n            SELECT \"#$ordering\", \"#$deleted\", \"#$persistenceId\", \"#$sequenceNumber\", \"#$message\", \"#$tags\"\n            FROM (\n              SELECT * FROM #$theTableName\n              WHERE \"#$tags\" LIKE $theTag\n              AND \"#$ordering\" > $theOffset\n              AND \"#$ordering\" <= $maxOffset\n              AND \"#$deleted\" = 0\n              ORDER BY \"#$ordering\"\n            )\n            WHERE rownum <= $max\"\"\".as[JournalRow]\n\n      // applies workaround for https://github.com/akka/akka-persistence-jdbc/issues/168\n      Source\n        .fromPublisher(db.stream(selectStatement))\n        .via(perfectlyMatchTag(tag, readJournalConfig.pluginConfig.tagSeparator))\n        .via(serializer.deserializeFlow)\n\n    } else {\n      super.eventsByTag(tag, offset, maxOffset, max)\n    }\n  }\n}\n\nclass ByteArrayReadJournalDao(\n    val db: Database,\n    val profile: JdbcProfile,\n    val readJournalConfig: ReadJournalConfig,\n    serialization: Serialization)(implicit val ec: ExecutionContext, val mat: Materializer)\n    extends BaseByteArrayReadJournalDao\n    with OracleReadJournalDao {\n  val queries = new ReadJournalQueries(profile, readJournalConfig)\n  val serializer: ByteArrayJournalSerializer =\n    new ByteArrayJournalSerializer(serialization, readJournalConfig.pluginConfig.tagSeparator)\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ReadJournalQueries.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query.dao.legacy\n\nimport akka.persistence.jdbc.config.{ LegacyJournalTableConfiguration, ReadJournalConfig }\nimport akka.persistence.jdbc.journal.dao.legacy.JournalTables\nimport slick.jdbc.JdbcProfile\n\nclass ReadJournalQueries(val profile: JdbcProfile, val readJournalConfig: ReadJournalConfig) extends JournalTables {\n  override val journalTableCfg: LegacyJournalTableConfiguration = readJournalConfig.journalTableConfiguration\n\n  import profile.api._\n\n  def journalRowByPersistenceIds(persistenceIds: Iterable[String]) =\n    for {\n      query <- JournalTable.map(_.persistenceId)\n      if query.inSetBind(persistenceIds)\n    } yield query\n\n  private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] =\n    baseTableQuery().map(_.persistenceId).distinct.take(max)\n\n  private def baseTableQuery() =\n    JournalTable.filter(_.deleted === false)\n\n  val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _)\n\n  private def _messagesQuery(\n      persistenceId: Rep[String],\n      fromSequenceNr: Rep[Long],\n      toSequenceNr: Rep[Long],\n      max: ConstColumn[Long]) =\n    baseTableQuery()\n      .filter(_.persistenceId === persistenceId)\n      .filter(_.sequenceNumber >= fromSequenceNr)\n      .filter(_.sequenceNumber <= toSequenceNr)\n      .sortBy(_.sequenceNumber.asc)\n      .take(max)\n\n  val messagesQuery = Compiled(_messagesQuery _)\n\n  private def _eventsByTag(\n      tag: Rep[String],\n      offset: ConstColumn[Long],\n      maxOffset: ConstColumn[Long],\n      max: ConstColumn[Long]) = {\n    baseTableQuery()\n      .filter(_.tags.like(tag))\n      .sortBy(_.ordering.asc)\n      .filter(row => row.ordering > offset && row.ordering <= maxOffset)\n      .take(max)\n  }\n\n  val eventsByTag = Compiled(_eventsByTag _)\n\n  private def _journalSequenceQuery(from: ConstColumn[Long], limit: ConstColumn[Long]) =\n    JournalTable.filter(_.ordering > from).map(_.ordering).sorted.take(limit)\n\n  val journalSequenceQuery = Compiled(_journalSequenceQuery _)\n\n  val maxJournalSequenceQuery = Compiled {\n    JournalTable.map(_.ordering).max.getOrElse(0L)\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/query/javadsl/JdbcReadJournal.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query.javadsl\n\nimport akka.NotUsed\nimport akka.persistence.jdbc.query.scaladsl.{ JdbcReadJournal => ScalaJdbcReadJournal }\nimport akka.persistence.query.{ EventEnvelope, Offset }\nimport akka.persistence.query.javadsl._\nimport akka.stream.javadsl.Source\n\nobject JdbcReadJournal {\n  final val Identifier = ScalaJdbcReadJournal.Identifier\n}\n\nclass JdbcReadJournal(journal: ScalaJdbcReadJournal)\n    extends ReadJournal\n    with CurrentPersistenceIdsQuery\n    with PersistenceIdsQuery\n    with CurrentEventsByPersistenceIdQuery\n    with EventsByPersistenceIdQuery\n    with CurrentEventsByTagQuery\n    with EventsByTagQuery {\n\n  /**\n   * Same type of query as `persistenceIds` but the event stream\n   * is completed immediately when it reaches the end of the \"result set\". Events that are\n   * stored after the query is completed are not included in the event stream.\n   */\n  override def currentPersistenceIds(): Source[String, NotUsed] =\n    journal.currentPersistenceIds().asJava\n\n  /**\n   * `persistenceIds` is used to retrieve a stream of all `persistenceId`s as strings.\n   *\n   * The stream guarantees that a `persistenceId` is only emitted once and there are no duplicates.\n   * Order is not defined. Multiple executions of the same stream (even bounded) may emit different\n   * sequence of `persistenceId`s.\n   *\n   * The stream is not completed when it reaches the end of the currently known `persistenceId`s,\n   * but it continues to push new `persistenceId`s when new events are persisted.\n   * Corresponding query that is completed when it reaches the end of the currently\n   * known `persistenceId`s is provided by `currentPersistenceIds`.\n   */\n  override def persistenceIds(): Source[String, NotUsed] =\n    journal.persistenceIds().asJava\n\n  /**\n   * Same type of query as `eventsByPersistenceId` but the event stream\n   * is completed immediately when it reaches the end of the \"result set\". Events that are\n   * stored after the query is completed are not included in the event stream.\n   */\n  override def currentEventsByPersistenceId(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long): Source[EventEnvelope, NotUsed] =\n    journal.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava\n\n  /**\n   * `eventsByPersistenceId` is used to retrieve a stream of events for a particular persistenceId.\n   *\n   * The `EventEnvelope` contains the event and provides `persistenceId` and `sequenceNr`\n   * for each event. The `sequenceNr` is the sequence number for the persistent actor with the\n   * `persistenceId` that persisted the event. The `persistenceId` + `sequenceNr` is an unique\n   * identifier for the event.\n   *\n   * `fromSequenceNr` and `toSequenceNr` can be specified to limit the set of returned events.\n   * The `fromSequenceNr` and `toSequenceNr` are inclusive.\n   *\n   * The `EventEnvelope` also provides the `offset` that corresponds to the `ordering` column in\n   * the Journal table. The `ordering` is a sequential id number that uniquely identifies the\n   * position of each event, also across different `persistenceId`. The `Offset` type is\n   * `akka.persistence.query.Sequence` with the `ordering` as the offset value. This is the\n   * same `ordering` number as is used in the offset of the `eventsByTag` query.\n   *\n   * The returned event stream is ordered by `sequenceNr`.\n   *\n   * Causality is guaranteed (`sequenceNr`s of events for a particular `persistenceId` are always ordered\n   * in a sequence monotonically increasing by one). Multiple executions of the same bounded stream are\n   * guaranteed to emit exactly the same stream of events.\n   *\n   * The stream is not completed when it reaches the end of the currently stored events,\n   * but it continues to push new events when new events are persisted.\n   * Corresponding query that is completed when it reaches the end of the currently\n   * stored events is provided by `currentEventsByPersistenceId`.\n   */\n  override def eventsByPersistenceId(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long): Source[EventEnvelope, NotUsed] =\n    journal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava\n\n  /**\n   * Same type of query as `eventsByTag` but the event stream\n   * is completed immediately when it reaches the end of the \"result set\". Events that are\n   * stored after the query is completed are not included in the event stream.\n   */\n  override def currentEventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] =\n    journal.currentEventsByTag(tag, offset).asJava\n\n  /**\n   * Query events that have a specific tag.\n   *\n   * The consumer can keep track of its current position in the event stream by storing the\n   * `offset` and restart the query from a given `offset` after a crash/restart.\n   * The offset is exclusive, i.e. the event corresponding to the given `offset` parameter is not\n   * included in the stream.\n   *\n   * For akka-persistence-jdbc the `offset` corresponds to the `ordering` column in the Journal table.\n   * The `ordering` is a sequential id number that uniquely identifies the position of each event within\n   * the event stream. The `Offset` type is `akka.persistence.query.Sequence` with the `ordering` as the\n   * offset value.\n   *\n   * The returned event stream is ordered by `offset`.\n   *\n   * The stream is not completed when it reaches the end of the currently stored events,\n   * but it continues to push new events when new events are persisted.\n   * Corresponding query that is completed when it reaches the end of the currently\n   * stored events is provided by [[CurrentEventsByTagQuery#currentEventsByTag]].\n   */\n  override def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] =\n    journal.eventsByTag(tag, offset).asJava\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/query/package.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\n\nimport akka.persistence.query._\n\npackage object query {\n  implicit class OffsetOps(val that: Offset) extends AnyVal {\n    def value =\n      that match {\n        case Sequence(offsetValue) => offsetValue\n        case NoOffset              => 0L\n        case _ =>\n          throw new IllegalArgumentException(\n            \"akka-persistence-jdbc does not support \" + that.getClass.getName + \" offsets\")\n      }\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/query/scaladsl/JdbcReadJournal.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\npackage scaladsl\n\nimport akka.NotUsed\nimport akka.actor.ExtendedActorSystem\nimport akka.persistence.jdbc.config.ReadJournalConfig\nimport akka.persistence.jdbc.query.JournalSequenceActor.{ GetMaxOrderingId, MaxOrderingId }\nimport akka.persistence.jdbc.db.SlickExtension\nimport akka.persistence.jdbc.journal.dao.FlowControl\nimport akka.persistence.query.scaladsl._\nimport akka.persistence.query.{ EventEnvelope, Offset, Sequence }\nimport akka.persistence.{ Persistence, PersistentRepr }\nimport akka.serialization.{ Serialization, SerializationExtension }\nimport akka.stream.scaladsl.{ Sink, Source }\nimport akka.stream.{ Materializer, SystemMaterializer }\nimport akka.util.Timeout\nimport com.typesafe.config.Config\nimport slick.jdbc.JdbcBackend._\nimport slick.jdbc.JdbcProfile\n\nimport scala.collection.immutable._\nimport scala.concurrent.duration._\nimport scala.concurrent.{ ExecutionContext, Future }\nimport scala.util.{ Failure, Success }\nimport akka.actor.Scheduler\nimport akka.persistence.jdbc.query.dao.ReadJournalDao\nimport akka.persistence.jdbc.util.PluginVersionChecker\n\nobject JdbcReadJournal {\n  final val Identifier = \"jdbc-read-journal\"\n}\n\nclass JdbcReadJournal(config: Config, configPath: String)(implicit val system: ExtendedActorSystem)\n    extends ReadJournal\n    with CurrentPersistenceIdsQuery\n    with PersistenceIdsQuery\n    with CurrentEventsByPersistenceIdQuery\n    with EventsByPersistenceIdQuery\n    with CurrentEventsByTagQuery\n    with EventsByTagQuery {\n\n  PluginVersionChecker.check()\n\n  implicit val ec: ExecutionContext = system.dispatcher\n  implicit val mat: Materializer = SystemMaterializer(system).materializer\n\n  val readJournalConfig = new ReadJournalConfig(config)\n\n  private val writePluginId = config.getString(\"write-plugin\")\n  // If 'config' is empty, or if the plugin reference is not found, then the write plugin will be resolved from the\n  // ActorSystem configuration. Otherwise, it will be resolved from the provided 'config'.\n  private val eventAdapters = Persistence(system).adaptersFor(writePluginId, config)\n\n  val readJournalDao: ReadJournalDao = {\n    val slickDb = SlickExtension(system).database(config)\n    val db = slickDb.database\n    if (readJournalConfig.addShutdownHook && slickDb.allowShutdown) {\n      system.registerOnTermination {\n        db.close()\n      }\n    }\n    val fqcn = readJournalConfig.pluginConfig.dao\n    val profile: JdbcProfile = slickDb.profile\n    val args = Seq(\n      (classOf[Database], db),\n      (classOf[JdbcProfile], profile),\n      (classOf[ReadJournalConfig], readJournalConfig),\n      (classOf[Serialization], SerializationExtension(system)),\n      (classOf[ExecutionContext], ec),\n      (classOf[Materializer], mat))\n    system.dynamicAccess.createInstanceFor[ReadJournalDao](fqcn, args) match {\n      case Success(dao)   => dao\n      case Failure(cause) => throw cause\n    }\n  }\n\n  // Started lazily to prevent the actor for querying the db if no eventsByTag queries are used\n  private[query] lazy val journalSequenceActor = system.systemActorOf(\n    JournalSequenceActor.props(readJournalDao, readJournalConfig.journalSequenceRetrievalConfiguration),\n    s\"$configPath.akka-persistence-jdbc-journal-sequence-actor\")\n\n  /**\n   * Same type of query as `persistenceIds` but the event stream\n   * is completed immediately when it reaches the end of the \"result set\". Events that are\n   * stored after the query is completed are not included in the event stream.\n   */\n  override def currentPersistenceIds(): Source[String, NotUsed] =\n    readJournalDao.allPersistenceIdsSource(Long.MaxValue)\n\n  /**\n   * `persistenceIds` is used to retrieve a stream of all `persistenceId`s as strings.\n   *\n   * The stream guarantees that a `persistenceId` is only emitted once and there are no duplicates.\n   * Order is not defined. Multiple executions of the same stream (even bounded) may emit different\n   * sequence of `persistenceId`s.\n   *\n   * The stream is not completed when it reaches the end of the currently known `persistenceId`s,\n   * but it continues to push new `persistenceId`s when new events are persisted.\n   * Corresponding query that is completed when it reaches the end of the currently\n   * known `persistenceId`s is provided by `currentPersistenceIds`.\n   */\n  override def persistenceIds(): Source[String, NotUsed] =\n    Source\n      .single(0)\n      .concat(Source.tick(readJournalConfig.refreshInterval, readJournalConfig.refreshInterval, 0))\n      .flatMapConcat(_ => currentPersistenceIds())\n      .statefulMapConcat[String] { () =>\n        var knownIds = Set.empty[String]\n        def next(id: String): Iterable[String] = {\n          val xs = Set(id).diff(knownIds)\n          knownIds += id\n          xs\n        }\n        id => next(id)\n      }\n\n  private def adaptEvents(repr: PersistentRepr): Seq[PersistentRepr] = {\n    val adapter = eventAdapters.get(repr.payload.getClass)\n    adapter.fromJournal(repr.payload, repr.manifest).events.map(repr.withPayload)\n  }\n\n  /**\n   * Same type of query as `eventsByPersistenceId` but the event stream\n   * is completed immediately when it reaches the end of the \"result set\". Events that are\n   * stored after the query is completed are not included in the event stream.\n   */\n  override def currentEventsByPersistenceId(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long): Source[EventEnvelope, NotUsed] =\n    eventsByPersistenceIdSource(persistenceId, fromSequenceNr, toSequenceNr, None)\n\n  /**\n   * `eventsByPersistenceId` is used to retrieve a stream of events for a particular persistenceId.\n   *\n   * The `EventEnvelope` contains the event and provides `persistenceId` and `sequenceNr`\n   * for each event. The `sequenceNr` is the sequence number for the persistent actor with the\n   * `persistenceId` that persisted the event. The `persistenceId` + `sequenceNr` is an unique\n   * identifier for the event.\n   *\n   * `fromSequenceNr` and `toSequenceNr` can be specified to limit the set of returned events.\n   * The `fromSequenceNr` and `toSequenceNr` are inclusive.\n   *\n   * The `EventEnvelope` also provides the `offset` that corresponds to the `ordering` column in\n   * the Journal table. The `ordering` is a sequential id number that uniquely identifies the\n   * position of each event, also across different `persistenceId`. The `Offset` type is\n   * `akka.persistence.query.Sequence` with the `ordering` as the offset value. This is the\n   * same `ordering` number as is used in the offset of the `eventsByTag` query.\n   *\n   * The returned event stream is ordered by `sequenceNr`.\n   *\n   * Causality is guaranteed (`sequenceNr`s of events for a particular `persistenceId` are always ordered\n   * in a sequence monotonically increasing by one). Multiple executions of the same bounded stream are\n   * guaranteed to emit exactly the same stream of events.\n   *\n   * The stream is not completed when it reaches the end of the currently stored events,\n   * but it continues to push new events when new events are persisted.\n   * Corresponding query that is completed when it reaches the end of the currently\n   * stored events is provided by `currentEventsByPersistenceId`.\n   */\n  override def eventsByPersistenceId(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long): Source[EventEnvelope, NotUsed] =\n    eventsByPersistenceIdSource(\n      persistenceId,\n      fromSequenceNr,\n      toSequenceNr,\n      Some(readJournalConfig.refreshInterval -> system.scheduler))\n\n  private def eventsByPersistenceIdSource(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long,\n      refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[EventEnvelope, NotUsed] = {\n    val batchSize = readJournalConfig.maxBufferSize\n    readJournalDao\n      .messagesWithBatch(persistenceId, fromSequenceNr, toSequenceNr, batchSize, refreshInterval)\n      .mapAsync(1)(reprAndOrdNr => Future.fromTry(reprAndOrdNr))\n      .mapConcat { case (repr, ordNr) =>\n        adaptEvents(repr).map(_ -> ordNr)\n      }\n      .map { case (repr, ordNr) =>\n        EventEnvelope(Sequence(ordNr), repr.persistenceId, repr.sequenceNr, repr.payload, repr.timestamp, repr.metadata)\n      }\n  }\n\n  /**\n   * Same type of query as `eventsByTag` but the event stream\n   * is completed immediately when it reaches the end of the \"result set\". Events that are\n   * stored after the query is completed are not included in the event stream.\n   */\n  override def currentEventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] =\n    currentEventsByTag(tag, offset.value)\n\n  private def currentJournalEventsByTag(\n      tag: String,\n      offset: Long,\n      max: Long,\n      latestOrdering: MaxOrderingId): Source[EventEnvelope, NotUsed] = {\n    if (latestOrdering.maxOrdering < offset) Source.empty\n    else {\n      readJournalDao.eventsByTag(tag, offset, latestOrdering.maxOrdering, max).mapAsync(1)(Future.fromTry).mapConcat {\n        case (repr, _, ordering) =>\n          adaptEvents(repr).map(r =>\n            EventEnvelope(Sequence(ordering), r.persistenceId, r.sequenceNr, r.payload, r.timestamp, r.metadata))\n      }\n    }\n  }\n\n  /**\n   * @param terminateAfterOffset If None, the stream never completes. If a Some, then the stream will complete once a\n   *                             query has been executed which might return an event with this offset (or a higher offset).\n   *                             The stream may include offsets higher than the value in terminateAfterOffset, since the last batch\n   *                             will be returned completely.\n   */\n  private def eventsByTag(\n      tag: String,\n      offset: Long,\n      terminateAfterOffset: Option[Long]): Source[EventEnvelope, NotUsed] = {\n    import akka.pattern.ask\n    import FlowControl._\n    implicit val askTimeout: Timeout = Timeout(readJournalConfig.journalSequenceRetrievalConfiguration.askTimeout)\n    val batchSize = readJournalConfig.maxBufferSize\n    val maxOrderingRange = readJournalConfig.eventsByTagBufferSizesPerQuery match {\n      case 0 => None\n      case x => Some(x * batchSize)\n    }\n\n    def getLoopMaxOrderingId(offset: Long, latestOrdering: MaxOrderingId): MaxOrderingId =\n      maxOrderingRange match {\n        case None => latestOrdering\n        case Some(numberOfEvents) =>\n          val limitedMaxOrderingId = offset + numberOfEvents\n          if (limitedMaxOrderingId < 0 || limitedMaxOrderingId >= latestOrdering.maxOrdering) latestOrdering\n          else MaxOrderingId(limitedMaxOrderingId)\n      }\n\n    Source\n      .unfoldAsync[(Long, FlowControl), Seq[EventEnvelope]]((offset, Continue)) { case (from, control) =>\n        def retrieveNextBatch() = {\n          for {\n            queryUntil <- journalSequenceActor.ask(GetMaxOrderingId).mapTo[MaxOrderingId]\n            loopMaxOrderingId = getLoopMaxOrderingId(from, queryUntil)\n            xs <- currentJournalEventsByTag(tag, from, batchSize, loopMaxOrderingId).runWith(Sink.seq)\n          } yield {\n            // continue if query over entire journal was fewer than full batch or if we are limiting\n            // the query through eventsByTagBufferSizesPerQuery and didn't reach the last 'ordering' yet\n            val hasMoreEvents = (xs.size == batchSize) || (loopMaxOrderingId.maxOrdering < queryUntil.maxOrdering)\n            val nextControl: FlowControl =\n              terminateAfterOffset match {\n                // we may stop if target is behind queryUntil and we don't have more events to fetch\n                case Some(target) if !hasMoreEvents && target <= queryUntil.maxOrdering => Stop\n                // We may also stop if we have found an event with an offset >= target\n                case Some(target) if xs.exists(_.offset.value >= target) => Stop\n\n                // otherwise, disregarding if Some or None, we must decide how to continue\n                case _ =>\n                  if (hasMoreEvents) Continue else ContinueDelayed\n              }\n\n            val nextStartingOffset = if (xs.isEmpty) {\n              /* If no events matched the tag between `from` and `maxOrdering` then there is no need to execute the exact\n               * same query again. We can continue querying from `maxOrdering`, which will save some load on the db.\n               * (Note: we may never return a value smaller than `from`, otherwise we might return duplicate events) */\n              math.max(from, loopMaxOrderingId.maxOrdering)\n            } else {\n              // Continue querying from the largest offset\n              xs.map(_.offset.value).max\n            }\n            Some(((nextStartingOffset, nextControl), xs))\n          }\n        }\n\n        control match {\n          case Stop     => Future.successful(None)\n          case Continue => retrieveNextBatch()\n          case ContinueDelayed =>\n            akka.pattern.after(readJournalConfig.refreshInterval, system.scheduler)(retrieveNextBatch())\n        }\n      }\n      .mapConcat(identity)\n  }\n\n  def currentEventsByTag(tag: String, offset: Long): Source[EventEnvelope, NotUsed] = {\n    Source\n      .futureSource(readJournalDao.maxJournalSequence().map { maxOrderingInDb =>\n        eventsByTag(tag, offset, terminateAfterOffset = Some(maxOrderingInDb))\n      })\n      .mapMaterializedValue(_ => NotUsed)\n  }\n\n  /**\n   * Query events that have a specific tag.\n   *\n   * The consumer can keep track of its current position in the event stream by storing the\n   * `offset` and restart the query from a given `offset` after a crash/restart.\n   * The offset is exclusive, i.e. the event corresponding to the given `offset` parameter is not\n   * included in the stream.\n   *\n   * For akka-persistence-jdbc the `offset` corresponds to the `ordering` column in the Journal table.\n   * The `ordering` is a sequential id number that uniquely identifies the position of each event within\n   * the event stream. The `Offset` type is `akka.persistence.query.Sequence` with the `ordering` as the\n   * offset value.\n   *\n   * The returned event stream is ordered by `offset`.\n   *\n   * In addition to the `offset` the `EventEnvelope` also provides `persistenceId` and `sequenceNr`\n   * for each event. The `sequenceNr` is the sequence number for the persistent actor with the\n   * `persistenceId` that persisted the event. The `persistenceId` + `sequenceNr` is an unique\n   * identifier for the event.\n   *\n   * The stream is not completed when it reaches the end of the currently stored events,\n   * but it continues to push new events when new events are persisted.\n   * Corresponding query that is completed when it reaches the end of the currently\n   * stored events is provided by [[CurrentEventsByTagQuery#currentEventsByTag]].\n   */\n  override def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] =\n    eventsByTag(tag, offset.value)\n\n  def eventsByTag(tag: String, offset: Long): Source[EventEnvelope, NotUsed] =\n    eventsByTag(tag, offset, terminateAfterOffset = None)\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/serialization/PersistentReprSerializer.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.serialization\n\nimport akka.NotUsed\nimport akka.persistence.jdbc.util.TrySeq\nimport akka.persistence.journal.Tagged\nimport akka.persistence.{ AtomicWrite, PersistentRepr }\nimport akka.stream.scaladsl.Flow\nimport scala.collection.immutable._\n\nimport scala.util.Try\n\n@deprecated(\"use Akka Serialization for the payloads instead\", since = \"5.0.0\")\ntrait PersistentReprSerializer[T] {\n\n  /**\n   * An akka.persistence.AtomicWrite contains a Sequence of events (with metadata, the PersistentRepr)\n   * that must all be persisted or all fail, what makes the operation atomic. The function converts\n   * each AtomicWrite to a Try[Seq[T]].\n   * The Try denotes whether there was a problem with the AtomicWrite or not.\n   */\n  def serialize(messages: Seq[AtomicWrite]): Seq[Try[Seq[T]]] = {\n    messages.map { atomicWrite =>\n      val serialized = atomicWrite.payload.map(serialize)\n      TrySeq.sequence(serialized)\n    }\n  }\n\n  def serialize(persistentRepr: PersistentRepr): Try[T] =\n    persistentRepr.payload match {\n      case Tagged(payload, tags) =>\n        serialize(persistentRepr.withPayload(payload), tags)\n      case _ => serialize(persistentRepr, Set.empty[String])\n    }\n\n  def serialize(persistentRepr: PersistentRepr, tags: Set[String]): Try[T]\n\n  /**\n   * deserialize into a PersistentRepr, a set of tags and a Long representing the global ordering of events\n   */\n  def deserialize(t: T): Try[(PersistentRepr, Set[String], Long)]\n}\n\n@deprecated(\"use Akka Serialization for the payloads instead\", since = \"5.0.0\")\ntrait FlowPersistentReprSerializer[T] extends PersistentReprSerializer[T] {\n\n  /**\n   * A flow which deserializes each element into a PersistentRepr,\n   * a set of tags and a Long representing the global ordering of events\n   */\n  def deserializeFlow: Flow[T, Try[(PersistentRepr, Set[String], Long)], NotUsed] = {\n    Flow[T].map(deserialize)\n  }\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/serialization/SnapshotSerializer.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.serialization\n\nimport akka.persistence.SnapshotMetadata\n\nimport scala.util.Try\n\ntrait SnapshotSerializer[T] {\n  def serialize(metadata: SnapshotMetadata, snapshot: Any): Try[T]\n\n  def deserialize(t: T): Try[(SnapshotMetadata, Any)]\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/snapshot/JdbcSnapshotStore.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot\n\nimport akka.actor.ActorSystem\nimport akka.persistence.jdbc.config.SnapshotConfig\nimport akka.persistence.jdbc.snapshot.dao.{ SnapshotDao, SnapshotDaoInstantiation }\nimport akka.persistence.jdbc.db.{ SlickDatabase, SlickExtension }\nimport akka.persistence.snapshot.SnapshotStore\nimport akka.persistence.{ SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria }\nimport akka.stream.{ Materializer, SystemMaterializer }\nimport com.typesafe.config.Config\nimport slick.jdbc.JdbcBackend._\n\nimport scala.concurrent.{ ExecutionContext, Future }\n\nobject JdbcSnapshotStore {\n  def toSelectedSnapshot(tupled: (SnapshotMetadata, Any)): SelectedSnapshot =\n    tupled match {\n      case (meta: SnapshotMetadata, snapshot: Any) => SelectedSnapshot(meta, snapshot)\n    }\n}\n\nclass JdbcSnapshotStore(config: Config) extends SnapshotStore {\n  import JdbcSnapshotStore._\n\n  implicit val ec: ExecutionContext = context.dispatcher\n  implicit val system: ActorSystem = context.system\n  implicit val mat: Materializer = SystemMaterializer(system).materializer\n  val snapshotConfig = new SnapshotConfig(config)\n\n  val slickDb: SlickDatabase = SlickExtension(system).database(config)\n  def db: Database = slickDb.database\n\n  val snapshotDao: SnapshotDao = SnapshotDaoInstantiation.snapshotDao(snapshotConfig, slickDb)\n\n  override def loadAsync(\n      persistenceId: String,\n      criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = {\n    val result = criteria match {\n      case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) =>\n        snapshotDao.latestSnapshot(persistenceId)\n      case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) =>\n        snapshotDao.snapshotForMaxTimestamp(persistenceId, maxTimestamp)\n      case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) =>\n        snapshotDao.snapshotForMaxSequenceNr(persistenceId, maxSequenceNr)\n      case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) =>\n        snapshotDao.snapshotForMaxSequenceNrAndMaxTimestamp(persistenceId, maxSequenceNr, maxTimestamp)\n      case null => Future.successful(None)\n    }\n\n    result.map(_.map(toSelectedSnapshot))\n  }\n\n  override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] =\n    snapshotDao.save(metadata, snapshot)\n\n  override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] =\n    for {\n      _ <- snapshotDao.delete(metadata.persistenceId, metadata.sequenceNr)\n    } yield ()\n\n  override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] =\n    criteria match {\n      case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) =>\n        snapshotDao.deleteAllSnapshots(persistenceId)\n      case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) =>\n        snapshotDao.deleteUpToMaxTimestamp(persistenceId, maxTimestamp)\n      case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) =>\n        snapshotDao.deleteUpToMaxSequenceNr(persistenceId, maxSequenceNr)\n      case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) =>\n        snapshotDao.deleteUpToMaxSequenceNrAndMaxTimestamp(persistenceId, maxSequenceNr, maxTimestamp)\n      case null => Future.successful(())\n    }\n\n  override def postStop(): Unit = {\n    if (slickDb.allowShutdown) {\n      // Since a (new) db is created when this actor (re)starts, we must close it when the actor stops\n      db.close()\n    }\n    super.postStop()\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/DefaultSnapshotDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot.dao\n\nimport slick.jdbc.{ JdbcBackend, JdbcProfile }\nimport akka.persistence.SnapshotMetadata\nimport akka.persistence.jdbc.config.SnapshotConfig\nimport akka.serialization.Serialization\nimport akka.stream.Materializer\nimport SnapshotTables._\nimport akka.persistence.jdbc.AkkaSerialization\n\nimport scala.concurrent.{ ExecutionContext, Future }\nimport scala.util.{ Success, Try }\n\nclass DefaultSnapshotDao(\n    db: JdbcBackend#Database,\n    profile: JdbcProfile,\n    snapshotConfig: SnapshotConfig,\n    serialization: Serialization)(implicit ec: ExecutionContext, val mat: Materializer)\n    extends SnapshotDao {\n  import profile.api._\n  val queries = new SnapshotQueries(profile, snapshotConfig.snapshotTableConfiguration)\n\n  private def toSnapshotData(row: SnapshotRow): Try[(SnapshotMetadata, Any)] = {\n    val snapshot = serialization.deserialize(row.snapshotPayload, row.snapshotSerId, row.snapshotSerManifest)\n\n    snapshot.flatMap { snapshot =>\n      val metadata = for {\n        mPayload <- row.metaPayload\n        mSerId <- row.metaSerId\n      } yield (mPayload, mSerId)\n\n      metadata match {\n        case None =>\n          Success((SnapshotMetadata(row.persistenceId, row.sequenceNumber, row.created), snapshot))\n        case Some((payload, id)) =>\n          serialization.deserialize(payload, id, row.metaSerManifest.getOrElse(\"\")).map { meta =>\n            (SnapshotMetadata(row.persistenceId, row.sequenceNumber, row.created, Some(meta)), snapshot)\n          }\n      }\n    }\n  }\n\n  private def serializeSnapshot(meta: SnapshotMetadata, snapshot: Any): Try[SnapshotRow] = {\n    val serializedMetadata = meta.metadata.flatMap(m => AkkaSerialization.serialize(serialization, m).toOption)\n    AkkaSerialization\n      .serialize(serialization, payload = snapshot)\n      .map(serializedSnapshot =>\n        SnapshotRow(\n          meta.persistenceId,\n          meta.sequenceNr,\n          meta.timestamp,\n          serializedSnapshot.serId,\n          serializedSnapshot.serManifest,\n          serializedSnapshot.payload,\n          serializedMetadata.map(_.serId),\n          serializedMetadata.map(_.serManifest),\n          serializedMetadata.map(_.payload)))\n  }\n\n  private def zeroOrOneSnapshot(rows: Seq[SnapshotRow]): Option[(SnapshotMetadata, Any)] =\n    rows.headOption.map(row => toSnapshotData(row).get) // throw is from a future map\n\n  override def latestSnapshot(persistenceId: String): Future[Option[(SnapshotMetadata, Any)]] =\n    db.run(queries.selectLatestByPersistenceId(persistenceId).result).flatMap { rows =>\n      rows.headOption match {\n        case Some(row) => Future.fromTry(toSnapshotData(row)).map(Option(_))\n        case None      => Future.successful(None)\n      }\n    }\n\n  override def snapshotForMaxTimestamp(\n      persistenceId: String,\n      maxTimestamp: Long): Future[Option[(SnapshotMetadata, Any)]] =\n    db.run(queries.selectOneByPersistenceIdAndMaxTimestamp((persistenceId, maxTimestamp)).result).map(zeroOrOneSnapshot)\n\n  override def snapshotForMaxSequenceNr(\n      persistenceId: String,\n      maxSequenceNr: Long): Future[Option[(SnapshotMetadata, Any)]] =\n    db.run(queries.selectOneByPersistenceIdAndMaxSequenceNr((persistenceId, maxSequenceNr)).result)\n      .map(zeroOrOneSnapshot)\n\n  override def snapshotForMaxSequenceNrAndMaxTimestamp(\n      persistenceId: String,\n      maxSequenceNr: Long,\n      maxTimestamp: Long): Future[Option[(SnapshotMetadata, Any)]] =\n    db.run(\n      queries\n        .selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp((persistenceId, maxSequenceNr, maxTimestamp))\n        .result)\n      .map(zeroOrOneSnapshot(_))\n\n  override def save(snapshotMetadata: SnapshotMetadata, snapshot: Any): Future[Unit] = {\n    val eventualSnapshotRow = Future.fromTry(serializeSnapshot(snapshotMetadata, snapshot))\n    eventualSnapshotRow.map(queries.insertOrUpdate).flatMap(db.run).map(_ => ())(ExecutionContext.parasitic)\n  }\n\n  override def delete(persistenceId: String, sequenceNr: Long): Future[Unit] =\n    db.run(queries.selectByPersistenceIdAndSequenceNr((persistenceId, sequenceNr)).delete)\n      .map(_ => ())(ExecutionContext.parasitic)\n\n  override def deleteAllSnapshots(persistenceId: String): Future[Unit] =\n    db.run(queries.selectAll(persistenceId).delete).map(_ => ())((ExecutionContext.parasitic))\n\n  override def deleteUpToMaxSequenceNr(persistenceId: String, maxSequenceNr: Long): Future[Unit] =\n    db.run(queries.selectByPersistenceIdUpToMaxSequenceNr((persistenceId, maxSequenceNr)).delete)\n      .map(_ => ())((ExecutionContext.parasitic))\n\n  override def deleteUpToMaxTimestamp(persistenceId: String, maxTimestamp: Long): Future[Unit] =\n    db.run(queries.selectByPersistenceIdUpToMaxTimestamp((persistenceId, maxTimestamp)).delete)\n      .map(_ => ())((ExecutionContext.parasitic))\n\n  override def deleteUpToMaxSequenceNrAndMaxTimestamp(\n      persistenceId: String,\n      maxSequenceNr: Long,\n      maxTimestamp: Long): Future[Unit] =\n    db.run(\n      queries\n        .selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp((persistenceId, maxSequenceNr, maxTimestamp))\n        .delete)\n      .map(_ => ())((ExecutionContext.parasitic))\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot.dao\n\nimport akka.persistence.SnapshotMetadata\n\nimport scala.concurrent.Future\n\ntrait SnapshotDao {\n  def deleteAllSnapshots(persistenceId: String): Future[Unit]\n\n  def deleteUpToMaxSequenceNr(persistenceId: String, maxSequenceNr: Long): Future[Unit]\n\n  def deleteUpToMaxTimestamp(persistenceId: String, maxTimestamp: Long): Future[Unit]\n\n  def deleteUpToMaxSequenceNrAndMaxTimestamp(\n      persistenceId: String,\n      maxSequenceNr: Long,\n      maxTimestamp: Long): Future[Unit]\n\n  def latestSnapshot(persistenceId: String): Future[Option[(SnapshotMetadata, Any)]]\n\n  def snapshotForMaxTimestamp(persistenceId: String, timestamp: Long): Future[Option[(SnapshotMetadata, Any)]]\n\n  def snapshotForMaxSequenceNr(persistenceId: String, sequenceNr: Long): Future[Option[(SnapshotMetadata, Any)]]\n\n  def snapshotForMaxSequenceNrAndMaxTimestamp(\n      persistenceId: String,\n      sequenceNr: Long,\n      timestamp: Long): Future[Option[(SnapshotMetadata, Any)]]\n\n  def delete(persistenceId: String, sequenceNr: Long): Future[Unit]\n\n  def save(snapshotMetadata: SnapshotMetadata, snapshot: Any): Future[Unit]\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotDaoInstantiation.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot.dao\n\nimport akka.actor.{ ActorSystem, ExtendedActorSystem }\nimport akka.annotation.InternalApi\nimport akka.persistence.jdbc.config.SnapshotConfig\nimport akka.persistence.jdbc.db.SlickDatabase\nimport akka.serialization.{ Serialization, SerializationExtension }\nimport akka.stream.Materializer\nimport slick.jdbc.JdbcBackend.Database\nimport slick.jdbc.JdbcProfile\n\nimport scala.concurrent.ExecutionContext\nimport scala.util.{ Failure, Success }\n\n@InternalApi\nprivate[jdbc] object SnapshotDaoInstantiation {\n\n  def snapshotDao(\n      snapshotConfig: SnapshotConfig,\n      slickDb: SlickDatabase)(implicit system: ActorSystem, ec: ExecutionContext, mat: Materializer): SnapshotDao = {\n    val fqcn = snapshotConfig.pluginConfig.dao\n    val profile: JdbcProfile = slickDb.profile\n    val args = Seq(\n      (classOf[Database], slickDb.database),\n      (classOf[JdbcProfile], profile),\n      (classOf[SnapshotConfig], snapshotConfig),\n      (classOf[Serialization], SerializationExtension(system)),\n      (classOf[ExecutionContext], ec),\n      (classOf[Materializer], mat))\n    system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[SnapshotDao](fqcn, args) match {\n      case Success(dao)   => dao\n      case Failure(cause) => throw cause\n    }\n  }\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotQueries.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot.dao\n\nimport akka.persistence.jdbc.config.SnapshotTableConfiguration\nimport akka.persistence.jdbc.snapshot.dao.SnapshotTables.SnapshotRow\nimport slick.jdbc.JdbcProfile\n\nclass SnapshotQueries(val profile: JdbcProfile, override val snapshotTableCfg: SnapshotTableConfiguration)\n    extends SnapshotTables {\n  import profile.api._\n\n  private val SnapshotTableC = Compiled(SnapshotTable)\n\n  def insertOrUpdate(snapshotRow: SnapshotRow) =\n    SnapshotTableC.insertOrUpdate(snapshotRow)\n\n  private def _selectAll(persistenceId: Rep[String]) =\n    SnapshotTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc)\n  val selectAll = Compiled(_selectAll _)\n\n  private def _selectLatestByPersistenceId(persistenceId: Rep[String]) =\n    _selectAll(persistenceId).take(1)\n  val selectLatestByPersistenceId = Compiled(_selectLatestByPersistenceId _)\n\n  private def _selectByPersistenceIdAndSequenceNr(persistenceId: Rep[String], sequenceNr: Rep[Long]) =\n    _selectAll(persistenceId).filter(_.sequenceNumber === sequenceNr)\n  val selectByPersistenceIdAndSequenceNr = Compiled(_selectByPersistenceIdAndSequenceNr _)\n\n  private def _selectByPersistenceIdUpToMaxTimestamp(persistenceId: Rep[String], maxTimestamp: Rep[Long]) =\n    _selectAll(persistenceId).filter(_.created <= maxTimestamp)\n  val selectByPersistenceIdUpToMaxTimestamp = Compiled(_selectByPersistenceIdUpToMaxTimestamp _)\n\n  private def _selectByPersistenceIdUpToMaxSequenceNr(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) =\n    _selectAll(persistenceId).filter(_.sequenceNumber <= maxSequenceNr)\n  val selectByPersistenceIdUpToMaxSequenceNr = Compiled(_selectByPersistenceIdUpToMaxSequenceNr _)\n\n  private def _selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp(\n      persistenceId: Rep[String],\n      maxSequenceNr: Rep[Long],\n      maxTimestamp: Rep[Long]) =\n    _selectByPersistenceIdUpToMaxSequenceNr(persistenceId, maxSequenceNr).filter(_.created <= maxTimestamp)\n  val selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp = Compiled(\n    _selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp _)\n\n  private def _selectOneByPersistenceIdAndMaxTimestamp(persistenceId: Rep[String], maxTimestamp: Rep[Long]) =\n    _selectAll(persistenceId).filter(_.created <= maxTimestamp).take(1)\n  val selectOneByPersistenceIdAndMaxTimestamp = Compiled(_selectOneByPersistenceIdAndMaxTimestamp _)\n\n  private def _selectOneByPersistenceIdAndMaxSequenceNr(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) =\n    _selectAll(persistenceId).filter(_.sequenceNumber <= maxSequenceNr).take(1)\n  val selectOneByPersistenceIdAndMaxSequenceNr = Compiled(_selectOneByPersistenceIdAndMaxSequenceNr _)\n\n  private def _selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp(\n      persistenceId: Rep[String],\n      maxSequenceNr: Rep[Long],\n      maxTimestamp: Rep[Long]) =\n    _selectByPersistenceIdUpToMaxSequenceNr(persistenceId, maxSequenceNr).filter(_.created <= maxTimestamp).take(1)\n  val selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp = Compiled(\n    _selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp _)\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/SnapshotTables.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot.dao\n\nimport akka.persistence.jdbc.config.SnapshotTableConfiguration\nimport akka.persistence.jdbc.snapshot.dao.SnapshotTables.SnapshotRow\nimport akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.isOracleDriver\nimport akka.persistence.jdbc.util.InputStreamOps.InputStreamImplicits\n\nobject SnapshotTables {\n  case class SnapshotRow(\n      persistenceId: String,\n      sequenceNumber: Long,\n      created: Long,\n      snapshotSerId: Int,\n      snapshotSerManifest: String,\n      snapshotPayload: Array[Byte],\n      metaSerId: Option[Int],\n      metaSerManifest: Option[String],\n      metaPayload: Option[Array[Byte]])\n\n}\n\ntrait SnapshotTables {\n  val profile: slick.jdbc.JdbcProfile\n  import profile.api._\n  def snapshotTableCfg: SnapshotTableConfiguration\n\n  class Snapshot(_tableTag: Tag)\n      extends Table[SnapshotRow](\n        _tableTag,\n        _schemaName = snapshotTableCfg.schemaName,\n        _tableName = snapshotTableCfg.tableName) {\n    def * =\n      (\n        persistenceId,\n        sequenceNumber,\n        created,\n        snapshotSerId,\n        snapshotSerManifest,\n        snapshotPayload,\n        metaSerId,\n        metaSerManifest,\n        metaPayload).<>((SnapshotRow.apply _).tupled, SnapshotRow.unapply)\n\n    val persistenceId: Rep[String] =\n      column[String](snapshotTableCfg.columnNames.persistenceId, O.Length(255, varying = true))\n    val sequenceNumber: Rep[Long] = column[Long](snapshotTableCfg.columnNames.sequenceNumber)\n    val created: Rep[Long] = column[Long](snapshotTableCfg.columnNames.created)\n\n    val snapshotPayload: Rep[Array[Byte]] = column[Array[Byte]](snapshotTableCfg.columnNames.snapshotPayload)\n    val snapshotSerId: Rep[Int] = column[Int](snapshotTableCfg.columnNames.snapshotSerId)\n    val snapshotSerManifest: Rep[String] = column[String](snapshotTableCfg.columnNames.snapshotSerManifest)\n\n    val metaPayload: Rep[Option[Array[Byte]]] = column[Option[Array[Byte]]](snapshotTableCfg.columnNames.metaPayload)\n    val metaSerId: Rep[Option[Int]] = column[Option[Int]](snapshotTableCfg.columnNames.metaSerId)\n    val metaSerManifest: Rep[Option[String]] = column[Option[String]](snapshotTableCfg.columnNames.metaSerManifest)\n\n    val pk = primaryKey(s\"${tableName}_pk\", (persistenceId, sequenceNumber))\n  }\n\n  case class OracleSnapshot(_tableTag: Tag) extends Snapshot(_tableTag) {\n    import java.sql.Blob\n\n    import javax.sql.rowset.serial.SerialBlob\n\n    private val columnType =\n      MappedColumnType.base[Array[Byte], Blob](bytes => new SerialBlob(bytes), blob => blob.getBinaryStream.toArray)\n\n    override val snapshotPayload: Rep[Array[Byte]] =\n      column[Array[Byte]](snapshotTableCfg.columnNames.snapshotPayload)(columnType)\n  }\n\n  lazy val SnapshotTable = new TableQuery(tag =>\n    if (isOracleDriver(profile)) OracleSnapshot(tag) else new Snapshot(tag))\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/ByteArraySnapshotDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot.dao.legacy\n\nimport akka.persistence.SnapshotMetadata\nimport akka.persistence.jdbc.config.SnapshotConfig\nimport akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.SnapshotRow\nimport akka.persistence.jdbc.snapshot.dao.SnapshotDao\nimport akka.serialization.Serialization\nimport akka.stream.Materializer\nimport slick.jdbc.{ JdbcBackend, JdbcProfile }\n\nimport scala.concurrent.{ ExecutionContext, Future }\nimport scala.util.{ Failure, Success }\n\nclass ByteArraySnapshotDao(\n    db: JdbcBackend#Database,\n    profile: JdbcProfile,\n    snapshotConfig: SnapshotConfig,\n    serialization: Serialization)(implicit ec: ExecutionContext, val mat: Materializer)\n    extends SnapshotDao {\n  import profile.api._\n\n  val queries = new SnapshotQueries(profile, snapshotConfig.legacySnapshotTableConfiguration)\n\n  val serializer = new ByteArraySnapshotSerializer(serialization)\n\n  def toSnapshotData(row: SnapshotRow): (SnapshotMetadata, Any) =\n    serializer.deserialize(row) match {\n      case Success(deserialized) => deserialized\n      case Failure(cause)        => throw cause\n    }\n\n  override def latestSnapshot(persistenceId: String): Future[Option[(SnapshotMetadata, Any)]] =\n    for {\n      rows <- db.run(queries.selectLatestByPersistenceId(persistenceId).result)\n    } yield rows.headOption.map(toSnapshotData)\n\n  override def snapshotForMaxTimestamp(\n      persistenceId: String,\n      maxTimestamp: Long): Future[Option[(SnapshotMetadata, Any)]] =\n    for {\n      rows <- db.run(queries.selectOneByPersistenceIdAndMaxTimestamp((persistenceId, maxTimestamp)).result)\n    } yield rows.headOption.map(toSnapshotData)\n\n  override def snapshotForMaxSequenceNr(\n      persistenceId: String,\n      maxSequenceNr: Long): Future[Option[(SnapshotMetadata, Any)]] =\n    for {\n      rows <- db.run(queries.selectOneByPersistenceIdAndMaxSequenceNr((persistenceId, maxSequenceNr)).result)\n    } yield rows.headOption.map(toSnapshotData)\n\n  override def snapshotForMaxSequenceNrAndMaxTimestamp(\n      persistenceId: String,\n      maxSequenceNr: Long,\n      maxTimestamp: Long): Future[Option[(SnapshotMetadata, Any)]] =\n    for {\n      rows <- db.run(\n        queries\n          .selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp((persistenceId, maxSequenceNr, maxTimestamp))\n          .result)\n    } yield rows.headOption.map(toSnapshotData)\n\n  override def save(snapshotMetadata: SnapshotMetadata, snapshot: Any): Future[Unit] = {\n    val eventualSnapshotRow = Future.fromTry(serializer.serialize(snapshotMetadata, snapshot))\n    eventualSnapshotRow.map(queries.insertOrUpdate).flatMap(db.run).map(_ => ())\n  }\n\n  override def delete(persistenceId: String, sequenceNr: Long): Future[Unit] =\n    for {\n      _ <- db.run(queries.selectByPersistenceIdAndSequenceNr((persistenceId, sequenceNr)).delete)\n    } yield ()\n\n  override def deleteAllSnapshots(persistenceId: String): Future[Unit] =\n    for {\n      _ <- db.run(queries.selectAll(persistenceId).delete)\n    } yield ()\n\n  override def deleteUpToMaxSequenceNr(persistenceId: String, maxSequenceNr: Long): Future[Unit] =\n    for {\n      _ <- db.run(queries.selectByPersistenceIdUpToMaxSequenceNr((persistenceId, maxSequenceNr)).delete)\n    } yield ()\n\n  override def deleteUpToMaxTimestamp(persistenceId: String, maxTimestamp: Long): Future[Unit] =\n    for {\n      _ <- db.run(queries.selectByPersistenceIdUpToMaxTimestamp((persistenceId, maxTimestamp)).delete)\n    } yield ()\n\n  override def deleteUpToMaxSequenceNrAndMaxTimestamp(\n      persistenceId: String,\n      maxSequenceNr: Long,\n      maxTimestamp: Long): Future[Unit] =\n    for {\n      _ <- db.run(\n        queries\n          .selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp((persistenceId, maxSequenceNr, maxTimestamp))\n          .delete)\n    } yield ()\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/ByteArraySnapshotSerializer.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot.dao.legacy\n\nimport akka.persistence.SnapshotMetadata\nimport akka.persistence.jdbc.serialization.SnapshotSerializer\nimport akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.SnapshotRow\nimport akka.persistence.serialization.Snapshot\nimport akka.serialization.Serialization\n\nimport scala.util.Try\n\nclass ByteArraySnapshotSerializer(serialization: Serialization) extends SnapshotSerializer[SnapshotRow] {\n  def serialize(metadata: SnapshotMetadata, snapshot: Any): Try[SnapshotRow] = {\n    serialization\n      .serialize(Snapshot(snapshot))\n      .map(SnapshotRow(metadata.persistenceId, metadata.sequenceNr, metadata.timestamp, _))\n  }\n\n  def deserialize(snapshotRow: SnapshotRow): Try[(SnapshotMetadata, Any)] = {\n    serialization\n      .deserialize(snapshotRow.snapshot, classOf[Snapshot])\n      .map(snapshot => {\n        val snapshotMetadata =\n          SnapshotMetadata(snapshotRow.persistenceId, snapshotRow.sequenceNumber, snapshotRow.created)\n        (snapshotMetadata, snapshot.data)\n      })\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/SnapshotQueries.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot.dao.legacy\n\nimport akka.persistence.jdbc.config.LegacySnapshotTableConfiguration\nimport akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.SnapshotRow\nimport slick.jdbc.JdbcProfile\n\nclass SnapshotQueries(val profile: JdbcProfile, override val snapshotTableCfg: LegacySnapshotTableConfiguration)\n    extends SnapshotTables {\n  import profile.api._\n\n  private val SnapshotTableC = Compiled(SnapshotTable)\n\n  def insertOrUpdate(snapshotRow: SnapshotRow) =\n    SnapshotTableC.insertOrUpdate(snapshotRow)\n\n  private def _selectAll(persistenceId: Rep[String]) =\n    SnapshotTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc)\n  val selectAll = Compiled(_selectAll _)\n\n  private def _selectLatestByPersistenceId(persistenceId: Rep[String]) =\n    _selectAll(persistenceId).take(1)\n  val selectLatestByPersistenceId = Compiled(_selectLatestByPersistenceId _)\n\n  private def _selectByPersistenceIdAndSequenceNr(persistenceId: Rep[String], sequenceNr: Rep[Long]) =\n    _selectAll(persistenceId).filter(_.sequenceNumber === sequenceNr)\n  val selectByPersistenceIdAndSequenceNr = Compiled(_selectByPersistenceIdAndSequenceNr _)\n\n  private def _selectByPersistenceIdUpToMaxTimestamp(persistenceId: Rep[String], maxTimestamp: Rep[Long]) =\n    _selectAll(persistenceId).filter(_.created <= maxTimestamp)\n  val selectByPersistenceIdUpToMaxTimestamp = Compiled(_selectByPersistenceIdUpToMaxTimestamp _)\n\n  private def _selectByPersistenceIdUpToMaxSequenceNr(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) =\n    _selectAll(persistenceId).filter(_.sequenceNumber <= maxSequenceNr)\n  val selectByPersistenceIdUpToMaxSequenceNr = Compiled(_selectByPersistenceIdUpToMaxSequenceNr _)\n\n  private def _selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp(\n      persistenceId: Rep[String],\n      maxSequenceNr: Rep[Long],\n      maxTimestamp: Rep[Long]) =\n    _selectByPersistenceIdUpToMaxSequenceNr(persistenceId, maxSequenceNr).filter(_.created <= maxTimestamp)\n  val selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp = Compiled(\n    _selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp _)\n\n  private def _selectOneByPersistenceIdAndMaxTimestamp(persistenceId: Rep[String], maxTimestamp: Rep[Long]) =\n    _selectAll(persistenceId).filter(_.created <= maxTimestamp).take(1)\n  val selectOneByPersistenceIdAndMaxTimestamp = Compiled(_selectOneByPersistenceIdAndMaxTimestamp _)\n\n  private def _selectOneByPersistenceIdAndMaxSequenceNr(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) =\n    _selectAll(persistenceId).filter(_.sequenceNumber <= maxSequenceNr).take(1)\n  val selectOneByPersistenceIdAndMaxSequenceNr = Compiled(_selectOneByPersistenceIdAndMaxSequenceNr _)\n\n  private def _selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp(\n      persistenceId: Rep[String],\n      maxSequenceNr: Rep[Long],\n      maxTimestamp: Rep[Long]) =\n    _selectByPersistenceIdUpToMaxSequenceNr(persistenceId, maxSequenceNr).filter(_.created <= maxTimestamp).take(1)\n  val selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp = Compiled(\n    _selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp _)\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/snapshot/dao/legacy/SnapshotTables.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot.dao.legacy\n\nimport akka.persistence.jdbc.config.LegacySnapshotTableConfiguration\nimport akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.{ isOracleDriver, SnapshotRow }\nimport akka.persistence.jdbc.util.InputStreamOps._\nimport slick.jdbc.JdbcProfile\n\nobject SnapshotTables {\n  case class SnapshotRow(persistenceId: String, sequenceNumber: Long, created: Long, snapshot: Array[Byte])\n  def isOracleDriver(profile: JdbcProfile): Boolean =\n    profile match {\n      case _: slick.jdbc.OracleProfile => true\n      case _                           => false\n    }\n}\n\ntrait SnapshotTables {\n  val profile: slick.jdbc.JdbcProfile\n\n  import profile.api._\n\n  def snapshotTableCfg: LegacySnapshotTableConfiguration\n\n  class Snapshot(_tableTag: Tag)\n      extends Table[SnapshotRow](\n        _tableTag,\n        _schemaName = snapshotTableCfg.schemaName,\n        _tableName = snapshotTableCfg.tableName) {\n    def * = (persistenceId, sequenceNumber, created, snapshot).<>((SnapshotRow.apply _).tupled, SnapshotRow.unapply)\n\n    val persistenceId: Rep[String] =\n      column[String](snapshotTableCfg.columnNames.persistenceId, O.Length(255, varying = true))\n    val sequenceNumber: Rep[Long] = column[Long](snapshotTableCfg.columnNames.sequenceNumber)\n    val created: Rep[Long] = column[Long](snapshotTableCfg.columnNames.created)\n    val snapshot: Rep[Array[Byte]] = column[Array[Byte]](snapshotTableCfg.columnNames.snapshot)\n    val pk = primaryKey(s\"${tableName}_pk\", (persistenceId, sequenceNumber))\n  }\n\n  case class OracleSnapshot(_tableTag: Tag) extends Snapshot(_tableTag) {\n    import java.sql.Blob\n\n    import javax.sql.rowset.serial.SerialBlob\n\n    private val columnType =\n      MappedColumnType.base[Array[Byte], Blob](bytes => new SerialBlob(bytes), blob => blob.getBinaryStream.toArray)\n    override val snapshot: Rep[Array[Byte]] = column[Array[Byte]](snapshotTableCfg.columnNames.snapshot)(columnType)\n  }\n\n  lazy val SnapshotTable = new TableQuery(tag =>\n    if (isOracleDriver(profile)) OracleSnapshot(tag) else new Snapshot(tag))\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/state/DurableStateQueries.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state\n\nimport akka.annotation.InternalApi\nimport slick.jdbc.{ JdbcProfile, SetParameter }\nimport slick.jdbc.H2Profile\nimport slick.jdbc.MySQLProfile\nimport slick.jdbc.OracleProfile\nimport slick.jdbc.PostgresProfile\nimport slick.jdbc.SQLServerProfile\nimport akka.persistence.jdbc.config.DurableStateTableConfiguration\n\n/**\n * INTERNAL API\n */\n@InternalApi private[akka] class DurableStateQueries(\n    val profile: JdbcProfile,\n    override val durableStateTableCfg: DurableStateTableConfiguration)\n    extends DurableStateTables {\n  import profile.api._\n\n  // Identifiers must be quoted via the profile so the raw-SQL INSERT/UPDATE paths use the\n  // same quoting as Slick's typed-query SELECT path. Without this, e.g. H2 in default mode\n  // uppercases unquoted identifiers, which doesn't match the lowercase quoted identifiers\n  // used by the schema and Slick's typed queries.\n  val tableAndSchema =\n    durableStateTableCfg.schemaName.fold(profile.quoteIdentifier(durableStateTableCfg.tableName))(schema =>\n      s\"${profile.quoteIdentifier(schema)}.${profile.quoteIdentifier(durableStateTableCfg.tableName)}\")\n\n  private val persistenceIdColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.persistenceId)\n  private val globalOffsetColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.globalOffset)\n  private val revisionColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.revision)\n  private val statePayloadColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.statePayload)\n  private val stateSerIdColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.stateSerId)\n  private val stateSerManifestColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.stateSerManifest)\n  private val tagColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.tag)\n  private val stateTimestampColumn = profile.quoteIdentifier(durableStateTableCfg.columnNames.stateTimestamp)\n\n  private def slickProfileToSchemaType(profile: JdbcProfile): String =\n    profile match {\n      case PostgresProfile  => \"Postgres\"\n      case MySQLProfile     => \"MySQL\"\n      case OracleProfile    => \"Oracle\"\n      case SQLServerProfile => \"SqlServer\"\n      case H2Profile        => \"H2\"\n      case _                => throw new IllegalArgumentException(s\"Unknown JdbcProfile $profile encountered\")\n    }\n\n  lazy val sequenceNextValUpdater = slickProfileToSchemaType(profile) match {\n    case \"H2\"       => new H2SequenceNextValUpdater(profile, durableStateTableCfg)\n    case \"Postgres\" => new PostgresSequenceNextValUpdater(profile, durableStateTableCfg)\n    case _          => ???\n  }\n\n  implicit val uuidSetter: SetParameter[Array[Byte]] = SetParameter[Array[Byte]] { case (bytes, params) =>\n    params.setBytes(bytes)\n  }\n\n  private[jdbc] def selectFromDbByPersistenceId(persistenceId: Rep[String]) =\n    durableStateTable.filter(_.persistenceId === persistenceId)\n\n  private[jdbc] def insertDbWithDurableState(row: DurableStateTables.DurableStateRow, seqNextValue: String) = {\n    sqlu\"\"\"INSERT INTO #$tableAndSchema\n            (\n             #$persistenceIdColumn,\n             #$globalOffsetColumn,\n             #$revisionColumn,\n             #$statePayloadColumn,\n             #$stateSerIdColumn,\n             #$stateSerManifestColumn,\n             #$tagColumn,\n             #$stateTimestampColumn\n            )\n            VALUES\n            (\n              ${row.persistenceId},\n              #${seqNextValue},\n              ${row.revision},\n              ${row.statePayload},\n              ${row.stateSerId},\n              ${row.stateSerManifest},\n              ${row.tag},\n              #${System.currentTimeMillis()}\n            )\n      \"\"\"\n  }\n\n  private[jdbc] def updateDbWithDurableState(row: DurableStateTables.DurableStateRow, seqNextValue: String) = {\n    sqlu\"\"\"UPDATE #$tableAndSchema\n           SET #$globalOffsetColumn = #${seqNextValue},\n               #$revisionColumn = ${row.revision},\n               #$statePayloadColumn = ${row.statePayload},\n               #$stateSerIdColumn = ${row.stateSerId},\n               #$stateSerManifestColumn = ${row.stateSerManifest},\n               #$tagColumn = ${row.tag},\n               #$stateTimestampColumn = ${System.currentTimeMillis}\n           WHERE #$persistenceIdColumn = ${row.persistenceId}\n             AND #$revisionColumn = ${row.revision} - 1\n        \"\"\"\n  }\n\n  private[jdbc] def getSequenceNextValueExpr() = sequenceNextValUpdater.getSequenceNextValueExpr()\n\n  def deleteFromDb(persistenceId: String) = {\n    durableStateTable.filter(_.persistenceId === persistenceId).delete\n  }\n\n  def deleteAllFromDb() = {\n    durableStateTable.delete\n  }\n\n  private[jdbc] val maxOffsetQuery = Compiled {\n    durableStateTable.map(_.globalOffset).max.getOrElse(0L)\n  }\n\n  private def _changesByTag(\n      tag: Rep[String],\n      offset: ConstColumn[Long],\n      maxOffset: ConstColumn[Long],\n      max: ConstColumn[Long]) = {\n    durableStateTable\n      .filter(_.tag === tag)\n      .sortBy(_.globalOffset.asc)\n      .filter(row => row.globalOffset > offset && row.globalOffset <= maxOffset)\n      .take(max)\n  }\n\n  private[jdbc] val changesByTag = Compiled(_changesByTag _)\n\n  private def _stateStoreStateQuery(from: ConstColumn[Long], limit: ConstColumn[Long]) =\n    durableStateTable // FIXME change this to a specialized query to only retrieve the 3 columns of interest\n      .filter(_.globalOffset > from)\n      .sortBy(_.globalOffset.asc)\n      .take(limit)\n      .map(s => (s.persistenceId, s.globalOffset, s.revision))\n\n  val stateStoreStateQuery = Compiled(_stateStoreStateQuery _)\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/state/DurableStateTables.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state\n\nimport akka.annotation.InternalApi\nimport akka.persistence.jdbc.config.DurableStateTableConfiguration\n\n/**\n * INTERNAL API\n */\n@InternalApi private[akka] object DurableStateTables {\n  case class DurableStateRow(\n      globalOffset: Long,\n      persistenceId: String,\n      revision: Long,\n      statePayload: Array[Byte],\n      tag: Option[String],\n      stateSerId: Int,\n      stateSerManifest: Option[String],\n      stateTimestamp: Long)\n}\n\n/**\n * INTERNAL API\n */\n@InternalApi private[akka] trait DurableStateTables {\n  val profile: slick.jdbc.JdbcProfile\n  import profile.api._\n  def durableStateTableCfg: DurableStateTableConfiguration\n\n  import DurableStateTables._\n\n  class DurableState(_tableTag: Tag)\n      extends Table[DurableStateRow](\n        _tableTag,\n        _schemaName = durableStateTableCfg.schemaName,\n        _tableName = durableStateTableCfg.tableName) {\n\n    def * =\n      (globalOffset, persistenceId, revision, statePayload, tag, stateSerId, stateSerManifest, stateTimestamp)\n        .<>((DurableStateRow.apply _).tupled, DurableStateRow.unapply)\n\n    val globalOffset: Rep[Long] = column[Long](durableStateTableCfg.columnNames.globalOffset, O.AutoInc)\n    val persistenceId: Rep[String] =\n      column[String](durableStateTableCfg.columnNames.persistenceId, O.PrimaryKey, O.Length(255, varying = true))\n    val revision: Rep[Long] = column[Long](durableStateTableCfg.columnNames.revision)\n    val statePayload: Rep[Array[Byte]] = column[Array[Byte]](durableStateTableCfg.columnNames.statePayload)\n    val tag: Rep[Option[String]] = column[Option[String]](durableStateTableCfg.columnNames.tag)\n    val stateSerId: Rep[Int] = column[Int](durableStateTableCfg.columnNames.stateSerId)\n    val stateSerManifest: Rep[Option[String]] =\n      column[Option[String]](durableStateTableCfg.columnNames.stateSerManifest)\n    val stateTimestamp: Rep[Long] = column[Long](durableStateTableCfg.columnNames.stateTimestamp)\n\n    val globalOffsetIdx = index(s\"${tableName}_globalOffset_idx\", globalOffset, unique = true)\n  }\n  lazy val durableStateTable = new TableQuery(new DurableState(_))\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/state/JdbcDurableStateStoreProvider.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state\n\nimport scala.concurrent.ExecutionContext\nimport slick.jdbc.JdbcProfile\nimport slick.jdbc.JdbcBackend._\nimport akka.actor.ExtendedActorSystem\nimport akka.persistence.jdbc.config.DurableStateTableConfiguration\nimport akka.persistence.state.scaladsl.DurableStateStore\nimport akka.persistence.state.javadsl.{ DurableStateStore => JDurableStateStore }\nimport akka.persistence.state.DurableStateStoreProvider\nimport akka.persistence.jdbc.db.{ SlickDatabase, SlickExtension }\nimport akka.serialization.SerializationExtension\nimport akka.stream.{ Materializer, SystemMaterializer }\nimport com.typesafe.config.Config\n\nclass JdbcDurableStateStoreProvider[A](system: ExtendedActorSystem, cfg: Config, cfgPath: String)\n    extends DurableStateStoreProvider {\n\n  implicit val ec: ExecutionContext = system.dispatcher\n  implicit val mat: Materializer = SystemMaterializer(system).materializer\n\n  val config = system.settings.config\n\n  val slickDb: SlickDatabase =\n    SlickExtension(system).database(config.getConfig(cfgPath))\n  def db: Database = slickDb.database\n\n  lazy val durableStateConfig = new DurableStateTableConfiguration(config.getConfig(cfgPath))\n  lazy val serialization = SerializationExtension(system)\n  val profile: JdbcProfile = slickDb.profile\n\n  private lazy val _scaladslDurableStateStore: DurableStateStore[Any] =\n    new scaladsl.JdbcDurableStateStore[Any](cfgPath, db, profile, durableStateConfig, serialization)(system)\n\n  override def scaladslDurableStateStore(): DurableStateStore[Any] =\n    _scaladslDurableStateStore\n\n  override def javadslDurableStateStore(): JDurableStateStore[AnyRef] =\n    new javadsl.JdbcDurableStateStore[AnyRef](\n      profile,\n      durableStateConfig,\n      _scaladslDurableStateStore.asInstanceOf[scaladsl.JdbcDurableStateStore[AnyRef]])\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/state/OffsetOps.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state\n\nimport akka.persistence.query._\nobject OffsetSyntax {\n  implicit class OffsetOps(val that: Offset) extends AnyVal {\n    def value =\n      that match {\n        case Sequence(offsetValue) => offsetValue\n        case NoOffset              => 0L\n        case _ =>\n          throw new IllegalArgumentException(\n            \"akka-persistence-jdbc does not support \" + that.getClass.getName + \" offsets\")\n      }\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/state/SequenceNextValUpdater.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state\n\nimport akka.annotation.InternalApi\nimport akka.persistence.jdbc.config.DurableStateTableConfiguration\nimport slick.jdbc.JdbcProfile\nimport slick.dbio.Effect\nimport slick.sql.SqlStreamingAction\n\n/**\n * INTERNAL API\n */\n@InternalApi private[jdbc] trait SequenceNextValUpdater {\n  def getSequenceNextValueExpr(): SqlStreamingAction[Vector[String], String, Effect]\n}\n\n/**\n * INTERNAL API\n */\n@InternalApi private[jdbc] class H2SequenceNextValUpdater(\n    profile: JdbcProfile,\n    val durableStateTableCfg: DurableStateTableConfiguration)\n    extends SequenceNextValUpdater {\n  import profile.api._\n\n  // H2 dependent (https://stackoverflow.com/questions/36244641/h2-equivalent-of-postgres-serial-or-bigserial-column)\n  def getSequenceNextValueExpr() = {\n    sql\"\"\"SELECT COLUMN_DEFAULT\n          FROM INFORMATION_SCHEMA.COLUMNS\n          WHERE TABLE_NAME = '#${durableStateTableCfg.tableName}'\n            AND COLUMN_NAME = '#${durableStateTableCfg.columnNames.globalOffset}'\n            AND TABLE_SCHEMA = '#${durableStateTableCfg.schemaName.getOrElse(\"PUBLIC\")}'\"\"\".as[String]\n  }\n}\n\n/**\n * INTERNAL API\n */\n@InternalApi private[jdbc] class PostgresSequenceNextValUpdater(\n    profile: JdbcProfile,\n    val durableStateTableCfg: DurableStateTableConfiguration)\n    extends SequenceNextValUpdater {\n  import profile.api._\n  private val schemaPrefix = durableStateTableCfg.schemaName.map(n => s\"$n.\").getOrElse(\"\")\n  final val nextValFetcher =\n    s\"\"\"(SELECT nextval(pg_get_serial_sequence('$schemaPrefix${durableStateTableCfg.tableName}', '${durableStateTableCfg.columnNames.globalOffset}')))\"\"\"\n\n  def getSequenceNextValueExpr() = sql\"\"\"#$nextValFetcher\"\"\".as[String]\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/state/javadsl/JdbcDurableStateStore.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state.javadsl\n\nimport java.util.Optional\nimport java.util.concurrent.CompletionStage\nimport scala.jdk.FutureConverters._\nimport scala.concurrent.ExecutionContext\nimport akka.annotation.ApiMayChange\nimport slick.jdbc.JdbcProfile\nimport akka.{ Done, NotUsed }\nimport akka.persistence.state.javadsl.{ DurableStateUpdateStore, GetObjectResult }\nimport akka.persistence.jdbc.state.DurableStateQueries\nimport akka.persistence.jdbc.config.DurableStateTableConfiguration\nimport akka.persistence.jdbc.state.scaladsl.{ JdbcDurableStateStore => ScalaJdbcDurableStateStore }\nimport akka.persistence.query.{ DurableStateChange, Offset }\nimport akka.persistence.query.javadsl.DurableStateStoreQuery\nimport akka.stream.javadsl.Source\n\nimport scala.annotation.nowarn\n\nobject JdbcDurableStateStore {\n  val Identifier = ScalaJdbcDurableStateStore.Identifier\n}\n\n/**\n * API may change\n */\n@ApiMayChange\nclass JdbcDurableStateStore[A](\n    profile: JdbcProfile,\n    durableStateConfig: DurableStateTableConfiguration,\n    scalaStore: akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore[A])(implicit ec: ExecutionContext)\n    extends DurableStateUpdateStore[A]\n    with DurableStateStoreQuery[A] {\n\n  val queries = new DurableStateQueries(profile, durableStateConfig)\n\n  def getObject(persistenceId: String): CompletionStage[GetObjectResult[A]] =\n    scalaStore\n      .getObject(persistenceId)\n      .map(x => GetObjectResult(Optional.ofNullable(x.value.getOrElse(null.asInstanceOf[A])), x.revision))\n      .asJava\n\n  def upsertObject(persistenceId: String, revision: Long, value: A, tag: String): CompletionStage[Done] =\n    scalaStore.upsertObject(persistenceId, revision, value, tag).asJava\n\n  @deprecated(message = \"Use the deleteObject overload with revision instead.\", since = \"1.0.0\")\n  override def deleteObject(persistenceId: String): CompletionStage[Done] =\n    deleteObject(persistenceId, revision = 0)\n\n  @nowarn(\"msg=deprecated\")\n  override def deleteObject(persistenceId: String, revision: Long): CompletionStage[Done] =\n    scalaStore.deleteObject(persistenceId).asJava\n\n  def currentChanges(tag: String, offset: Offset): Source[DurableStateChange[A], NotUsed] =\n    scalaStore.currentChanges(tag, offset).asJava\n\n  def changes(tag: String, offset: Offset): Source[DurableStateChange[A], NotUsed] =\n    scalaStore.changes(tag, offset).asJava\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/state/scaladsl/DurableStateSequenceActor.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state.scaladsl\n\nimport scala.collection.immutable.NumericRange\n\nimport akka.actor.{ Actor, ActorLogging, Props, Status, Timers }\nimport akka.pattern.pipe\nimport akka.persistence.jdbc.config.DurableStateSequenceRetrievalConfig\nimport akka.stream.Materializer\nimport akka.stream.scaladsl.Sink\nimport scala.concurrent.duration.FiniteDuration\n\nimport akka.annotation.InternalApi\n\n/**\n * INTERNAL API\n */\n@InternalApi private[akka] object DurableStateSequenceActor {\n  def props[A](stateStore: JdbcDurableStateStore[A], config: DurableStateSequenceRetrievalConfig)(\n      implicit materializer: Materializer): Props = Props(new DurableStateSequenceActor(stateStore, config))\n\n  case class VisitedElement(pid: PersistenceId, offset: GlobalOffset, revision: Revision) {\n    override def toString = s\"($pid, $offset, $revision)\"\n  }\n  private case object QueryState\n  private case class NewStateInfo(originalOffset: Long, elements: List[VisitedElement])\n\n  private case class ScheduleAssumeMaxGlobalOffset(max: GlobalOffset)\n  private case class AssumeMaxGlobalOffset(max: GlobalOffset)\n\n  case object GetMaxGlobalOffset\n  case class MaxGlobalOffset(maxOffset: GlobalOffset)\n\n  private case object QueryGlobalOffsetsTimerKey\n  private case object AssumeMaxGlobalOffsetTimerKey\n\n  private type GlobalOffset = Long\n  private type PersistenceId = String\n  private type Revision = Long\n\n  /**\n   * Efficient representation of missing elements using NumericRanges.\n   * It can be seen as a collection of GlobalOffset\n   */\n  case class MissingElements(elements: Seq[NumericRange[GlobalOffset]]) {\n    def addRange(from: GlobalOffset, until: GlobalOffset): MissingElements = {\n      val newRange = from.until(until)\n      MissingElements(elements :+ newRange)\n    }\n    def contains(id: GlobalOffset): Boolean = elements.exists(_.containsTyped(id))\n    def isEmpty: Boolean = elements.forall(_.isEmpty)\n    def size: Int = elements.map(_.size).sum\n    override def toString: String = {\n      elements\n        .collect {\n          case range if range.nonEmpty =>\n            if (range.size == 1) range.start.toString\n            else s\"${range.start}-${range.end}\"\n        }\n        .mkString(\", \")\n    }\n  }\n  private object MissingElements {\n    def empty: MissingElements = MissingElements(Vector.empty)\n  }\n}\n\n/**\n * This actor supports `changesByTag` query to ensure that we don't miss any offsets in the result.\n * In case some offsets are missing we need to re-query (with a delay) and try to fetch the\n * missing offsets. It may be so that the offsets are really missing, in which case we identify them\n * as genuine gaps and continue after `config.maxTries`.\n *\n * There can be three reasons for gaps:\n *\n * 1. The transaction was rolled back. The global offset sequence incremental is not part of the transaction.\n * 2. Global offset is assigned from incrementing a database sequence. The sequence is not part of the\n *    transactions and may result in different order than the commit order. Meaning that in the queries we\n *    may see a later offset before seeing earlier offset. Those missing offsets will be seen when we\n *    re-query. See further explanation in for example\n *    https://espadrine.github.io/blog/posts/two-postgresql-sequence-misconceptions.html\n * 3. There are multiple updates (revisions) to the same persistence id and the queries may only see the\n *    latest revision. Meaning that the additional earlier revisions will be seen as offset gaps.\n *\n * If offset gaps have been detected we try to confirm the gaps by looking at revision changes of\n * individual persistence ids. We keep a cache of previously known revision per persistence ids.\n * If the total number of revision changes corresponds to the number of missing offsets they are\n * considered confirmed to be from case 3 and we can continue without re-query delays.\n *\n * Note that if we have seen revision 10 of p6 and we retrieve revision 13 of p6, we also know that there have been\n * revision 11 and 12 of p6. We are using READ COMMITTED transaction isolation level and we have a check of\n * sequentiality of revisions in `upsert` implementation.\n *\n * We have to delay and re-query for new persistence ids with revision > 1 that we don't know the previous revision,\n * because that could be gaps from case 1 or 2.\n *\n * If gaps cannot be confirmed it will re-query up to `config.maxTries` times before giving up and continue with\n * the highest offset. For example case 1.\n *\n * INTERNAL API\n */\n@InternalApi\nprivate[akka] class DurableStateSequenceActor[A](\n    stateStore: JdbcDurableStateStore[A],\n    config: DurableStateSequenceRetrievalConfig)(implicit materializer: Materializer)\n    extends Actor\n    with ActorLogging\n    with Timers {\n  import DurableStateSequenceActor._\n  import context.dispatcher\n  import config.{ batchSize, maxBackoffQueryDelay, maxTries, queryDelay, revisionCacheCapacity }\n\n  private val revisionCache = collection.mutable.Map.empty[PersistenceId, VisitedElement]\n\n  override def receive: Receive = receive(0L, Map.empty, 0)\n\n  override def preStart(): Unit = {\n    self ! QueryState\n    stateStore.maxStateStoreOffset().mapTo[Long].onComplete {\n      case scala.util.Success(maxInDatabase) =>\n        self ! ScheduleAssumeMaxGlobalOffset(maxInDatabase)\n      case scala.util.Failure(t) =>\n        log.info(\"Failed to recover fast, using state-by-state recovery instead. Cause: {}\", t)\n    }\n  }\n\n  /**\n   * @param currentMaxGlobalOffset The highest offset value for which it is known that no missing elements exist\n   * @param missingByCounter A map with missing offsets. The key of the map is the count at which the missing elements\n   *                         can be assumed to be \"skipped ids\" (they are no longer assumed missing). Used together\n   *                         with the `moduloCounter` to implement a \"sliding window\" where missing offsets are\n   *                         re-tried up to `maxTries` before assumed ok.\n   * @param moduloCounter A counter which is incremented every time a new query have been executed, modulo `maxTries`\n   * @param previousDelay The last used delay (may change in case failures occur)\n   */\n  final def receive(\n      currentMaxGlobalOffset: GlobalOffset,\n      missingByCounter: Map[Int, MissingElements],\n      moduloCounter: Int,\n      previousDelay: FiniteDuration = queryDelay): Receive = {\n    case ScheduleAssumeMaxGlobalOffset(max) =>\n      // All elements smaller than max can be assumed missing after this delay\n      val delay = queryDelay * maxTries\n      timers.startSingleTimer(key = AssumeMaxGlobalOffsetTimerKey, AssumeMaxGlobalOffset(max), delay)\n\n    case AssumeMaxGlobalOffset(max) =>\n      if (currentMaxGlobalOffset < max) {\n        context.become(receive(max, missingByCounter, moduloCounter, previousDelay))\n      }\n\n    case GetMaxGlobalOffset =>\n      sender() ! MaxGlobalOffset(currentMaxGlobalOffset)\n\n    case QueryState =>\n      stateStore\n        .stateStoreStateInfo(currentMaxGlobalOffset, batchSize)\n        .runWith(Sink.seq)\n        .map(result =>\n          NewStateInfo(\n            currentMaxGlobalOffset,\n            result.map { case (pid, offset, rev) =>\n              VisitedElement(pid, offset, rev)\n            }.toList))\n        .pipeTo(self)\n\n    case NewStateInfo(originalOffset, _) if originalOffset < currentMaxGlobalOffset =>\n      // search was done using an offset that became obsolete in the meantime\n      // therefore we start a new query\n      self ! QueryState\n\n    case NewStateInfo(_, elements) =>\n      findGaps(elements, currentMaxGlobalOffset, missingByCounter, moduloCounter)\n\n    case Status.Failure(t) =>\n      val newDelay = maxBackoffQueryDelay.min(previousDelay * 2)\n      if (newDelay == maxBackoffQueryDelay) {\n        log.warning(\"Failed to query max global offset because of {}, retrying in [{}]\", t, newDelay.toCoarsest)\n      }\n      scheduleQuery(newDelay)\n      context.become(receive(currentMaxGlobalOffset, missingByCounter, moduloCounter, newDelay))\n  }\n\n  /**\n   * This method that implements the \"find gaps\" algo. It's the meat and main purpose of this actor.\n   */\n  final def findGaps(\n      elements: List[VisitedElement],\n      currentMaxOffset: GlobalOffset,\n      missingByCounter: Map[Int, MissingElements],\n      moduloCounter: Int): Unit = {\n    // list of elements that will be considered as genuine gaps.\n    // `givenUp` is either empty or is was filled on a previous iteration\n    val givenUp = missingByCounter.getOrElse(moduloCounter, MissingElements.empty)\n\n    val (nextMax, _, missingElems) =\n      // using the global offset of the elements that were fetched, we verify if there are any gaps\n      elements.foldLeft[(GlobalOffset, GlobalOffset, MissingElements)](\n        (currentMaxOffset, currentMaxOffset, MissingElements.empty)) {\n        case ((currentMax, previousOffset, missing), currentElement) =>\n          // we must decide if we move the cursor forward\n          val newMax =\n            if ((currentMax + 1).until(currentElement.offset).forall(givenUp.contains)) {\n              // we move the cursor forward when:\n              // 1) they have been detected as missing on previous iteration, it's time now to give up\n              // 2) current + 1 == currentElement (meaning no gap). Note that `forall` on an empty range always returns true\n              currentElement.offset\n            } else currentMax\n\n          // we accumulate in newMissing the gaps we detect on each iteration\n          val newMissing =\n            if (previousOffset + 1 == currentElement.offset || newMax == currentElement.offset) missing\n            else missing.addRange(previousOffset + 1, currentElement.offset)\n\n          (newMax, currentElement.offset, newMissing)\n      }\n\n    // these offsets will be used as givenUp after one round when back to the same  moduloCounter\n    val newMissingByCounter = missingByCounter + (moduloCounter -> missingElems)\n\n    // did we detect gaps in the current batch?\n    val noGapsFound = missingElems.isEmpty\n\n    // full batch means that we retrieved as much elements as the batchSize\n    // that happens when we are not yet at the end of the stream\n    val isFullBatch = elements.size == batchSize\n\n    if (noGapsFound) {\n      addToRevisionCache(elements, nextMax)\n      if (isFullBatch) {\n        // We can query again immediately, as this allows the actor to rapidly retrieve the real max offset.\n        // Using same moduloCounter.\n        self ! QueryState\n        context.become(receive(nextMax, newMissingByCounter, moduloCounter))\n      } else {\n        // keep querying but not immediately\n        scheduleQuery(queryDelay)\n        context.become(receive(nextMax, newMissingByCounter, (moduloCounter + 1) % maxTries))\n      }\n    } else {\n      // We detected gaps. When there are updates to the same persistence id we might not see all subsequent\n      // changes but only the latest. Those changes will be seen as gaps. By looking at the difference in revisions\n      // for persistence ids that we have seen before (included in the revisionCache) we try to confirm if\n      // the offset gaps can be filled by the revision changes.\n      val missingOffsetCount = missingElems.size\n\n      val (inBetweenRevisionChanges, newMaxOffset, cacheMissed) =\n        // in this fold we find the possibly new max offset and the total revision difference for all persistence ids\n        elements.foldLeft((0L, nextMax, false)) { case ((revChg, currMaxOffset, cacheMiss), elem) =>\n          revisionCache.get(elem.pid) match {\n            case Some(e) =>\n              // cache hit: find the revision difference\n              val maxOffset = math.max(currMaxOffset, elem.offset)\n              val revDiff = elem.revision - e.revision\n              if (revDiff <= 1) {\n                (revChg, maxOffset, cacheMiss)\n              } else {\n                val pidOffsets =\n                  (e.offset until elem.offset).tail // e.offset and elem.offset are known to not be missing\n                val missingCount = math.min(pidOffsets.count(missingElems.contains), revDiff - 1)\n                (revChg + missingCount, maxOffset, cacheMiss)\n              }\n            case None =>\n              // this persistence id was not present in the cache\n              (revChg, math.max(elem.offset, currMaxOffset), cacheMiss || elem.revision != 1L)\n          }\n        }\n\n      // in this case we want to keep querying but not immediately\n      scheduleQuery(queryDelay)\n\n      if (cacheMissed || missingOffsetCount != inBetweenRevisionChanges) {\n        // gaps could not be confirmed\n\n        if (log.isDebugEnabled) {\n          log.debug(\n            \"Offset gaps detected [{}]. Current max offset [{}]. [{}] gaps could not be confirmed by revision changes.{}\",\n            missingElems,\n            nextMax,\n            missingOffsetCount - inBetweenRevisionChanges,\n            if (cacheMissed) \" Some new persistence ids without previously known revision.\" else \"\")\n        }\n\n        addToRevisionCache(elements, nextMax)\n        context.become(receive(nextMax, newMissingByCounter, (moduloCounter + 1) % maxTries))\n      } else {\n        addToRevisionCache(elements, newMaxOffset)\n        context.become(receive(newMaxOffset, newMissingByCounter, (moduloCounter + 1) % maxTries))\n      }\n    }\n  }\n\n  private def addToRevisionCache(elements: List[VisitedElement], upToOffset: GlobalOffset): Unit = {\n    revisionCache ++= elements.iterator.collect { case e if e.offset <= upToOffset => e.pid -> e }\n    evictRevisionCacheIfNeeded()\n  }\n\n  private def evictRevisionCacheIfNeeded(): Unit = {\n    def divRoundUp(num: Int, divisor: Int): Int = (num + divisor - 1) / divisor\n\n    if (revisionCache.size > revisionCacheCapacity) {\n      val sortedEntries = revisionCache.toVector.sortBy { case (_, elem) => elem.offset }\n      // keep 90% of capacity\n      val numberOfEntriesToRemove = (sortedEntries.size - revisionCacheCapacity) + divRoundUp(revisionCacheCapacity, 10)\n      revisionCache --= sortedEntries.iterator.take(numberOfEntriesToRemove).map(_._1)\n    }\n  }\n\n  def scheduleQuery(delay: FiniteDuration): Unit = {\n    timers.startSingleTimer(key = QueryGlobalOffsetsTimerKey, QueryState, delay)\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/state/scaladsl/JdbcDurableStateStore.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state.scaladsl\n\nimport scala.concurrent.{ ExecutionContext, Future }\nimport scala.concurrent.duration._\nimport scala.util.Try\n\nimport slick.jdbc.{ JdbcBackend, JdbcProfile }\nimport akka.{ Done, NotUsed }\nimport akka.actor.ExtendedActorSystem\nimport akka.pattern.ask\nimport akka.persistence.state.scaladsl.{ DurableStateUpdateStore, GetObjectResult }\nimport akka.persistence.jdbc.AkkaSerialization\nimport akka.persistence.jdbc.state.DurableStateQueries\nimport akka.persistence.jdbc.config.DurableStateTableConfiguration\nimport akka.persistence.jdbc.state.{ DurableStateTables, OffsetSyntax }\nimport akka.persistence.query.{ DurableStateChange, Offset }\nimport akka.persistence.query.scaladsl.DurableStateStoreQuery\nimport akka.persistence.jdbc.journal.dao.FlowControl\nimport akka.serialization.Serialization\nimport akka.stream.scaladsl.{ Sink, Source }\nimport akka.stream.{ Materializer, SystemMaterializer }\nimport akka.util.Timeout\nimport OffsetSyntax._\nimport akka.annotation.ApiMayChange\nimport akka.persistence.query.UpdatedDurableState\n\nobject JdbcDurableStateStore {\n  val Identifier = \"jdbc-durable-state-store\"\n}\n\n/**\n * API may change\n */\n@ApiMayChange\nclass JdbcDurableStateStore[A](\n    val configPath: String,\n    db: JdbcBackend#Database,\n    val profile: JdbcProfile,\n    durableStateConfig: DurableStateTableConfiguration,\n    serialization: Serialization)(implicit val system: ExtendedActorSystem)\n    extends DurableStateUpdateStore[A]\n    with DurableStateStoreQuery[A] {\n  import DurableStateSequenceActor._\n  import FlowControl._\n  import profile.api._\n\n  implicit val ec: ExecutionContext = system.dispatcher\n  implicit val mat: Materializer = SystemMaterializer(system).materializer\n\n  lazy val queries = new DurableStateQueries(profile, durableStateConfig)\n\n  // Started lazily to prevent the actor for querying the db if no changesByTag queries are used\n  private[jdbc] lazy val stateSequenceActor = system.systemActorOf(\n    DurableStateSequenceActor.props(this, durableStateConfig.stateSequenceConfig),\n    s\"$configPath.akka-persistence-jdbc-durable-state-sequence-actor\")\n\n  def getObject(persistenceId: String): Future[GetObjectResult[A]] = {\n    db.run(queries.selectFromDbByPersistenceId(persistenceId).result).flatMap { rows =>\n      rows.headOption match {\n        case None => Future.successful(GetObjectResult(None, 0))\n        case Some(row) =>\n          Future.fromTry(AkkaSerialization.fromDurableStateRow(serialization)(row).map { anyRef =>\n            GetObjectResult(Some(anyRef.asInstanceOf[A]), row.revision)\n          })\n      }\n    }\n  }\n\n  def upsertObject(persistenceId: String, revision: Long, value: A, tag: String): Future[Done] = {\n    require(revision > 0)\n    val row =\n      AkkaSerialization.serialize(serialization, value).map { serialized =>\n        DurableStateTables.DurableStateRow(\n          0, // insert 0 for autoinc columns\n          persistenceId,\n          revision,\n          serialized.payload,\n          Option(tag).filter(_.trim.nonEmpty),\n          serialized.serId,\n          Option(serialized.serManifest).filter(_.trim.nonEmpty),\n          System.currentTimeMillis)\n      }\n\n    Future\n      .fromTry(row)\n      .flatMap { r =>\n        val action = if (revision == 1) insertDurableState(r) else updateDurableState(r)\n        db.run(action)\n      }\n      .map { rowsAffected =>\n        if (rowsAffected == 0)\n          throw new IllegalStateException(\n            s\"Incorrect revision number [$revision] provided: It has to be 1 more than the value existing in the database for persistenceId [$persistenceId]\")\n        else Done\n      }\n  }\n\n  @deprecated(message = \"Use the deleteObject overload with revision instead.\", since = \"5.2.0\")\n  override def deleteObject(persistenceId: String): Future[Done] =\n    deleteObject(persistenceId, revision = 0)\n\n  override def deleteObject(persistenceId: String, revision: Long): Future[Done] = {\n    // FIXME #686 use revision\n    db.run(queries.deleteFromDb(persistenceId).map(_ => Done))\n  }\n\n  def currentChanges(tag: String, offset: Offset): Source[DurableStateChange[A], NotUsed] = {\n    Source\n      .futureSource(maxStateStoreOffset().map { maxOrderingInDb =>\n        changesByTag(tag, offset.value, terminateAfterOffset = Some(maxOrderingInDb))\n      })\n      .mapMaterializedValue(_ => NotUsed)\n  }\n\n  def changes(tag: String, offset: Offset): Source[DurableStateChange[A], NotUsed] =\n    changesByTag(tag, offset.value, terminateAfterOffset = None)\n\n  private def currentChangesByTag(\n      tag: String,\n      from: Long,\n      batchSize: Long,\n      queryUntil: MaxGlobalOffset): Source[DurableStateChange[A], NotUsed] = {\n    if (queryUntil.maxOffset < from) Source.empty\n    else changesByTagFromDb(tag, from, queryUntil.maxOffset, batchSize).mapAsync(1)(Future.fromTry)\n  }\n\n  private def changesByTagFromDb(\n      tag: String,\n      offset: Long,\n      maxOffset: Long,\n      batchSize: Long): Source[Try[DurableStateChange[A]], NotUsed] = {\n    Source\n      .fromPublisher(db.stream(queries.changesByTag((tag, offset, maxOffset, batchSize)).result))\n      .map(toDurableStateChange)\n  }\n\n  private[jdbc] def changesByTag(\n      tag: String,\n      offset: Long,\n      terminateAfterOffset: Option[Long]): Source[DurableStateChange[A], NotUsed] = {\n\n    val batchSize = durableStateConfig.batchSize\n    implicit val askTimeout: Timeout = Timeout(durableStateConfig.stateSequenceConfig.askTimeout)\n\n    Source\n      .unfoldAsync[(Long, FlowControl), Seq[DurableStateChange[A]]]((offset, Continue)) { case (from, control) =>\n        def retrieveNextBatch() = {\n          for {\n            queryUntil <- stateSequenceActor.ask(GetMaxGlobalOffset).mapTo[MaxGlobalOffset]\n            xs <- currentChangesByTag(tag, from, batchSize, queryUntil).runWith(Sink.seq)\n          } yield {\n\n            val hasMoreEvents = xs.size == batchSize\n            val nextControl: FlowControl =\n              terminateAfterOffset match {\n                // we may stop if target is behind queryUntil and we don't have more events to fetch\n                case Some(target) if !hasMoreEvents && target <= queryUntil.maxOffset => Stop\n\n                // We may also stop if we have found an event with an offset >= target\n                case Some(target) if xs.exists(_.offset.value >= target) => Stop\n\n                // otherwise, disregarding if Some or None, we must decide how to continue\n                case _ =>\n                  if (hasMoreEvents) Continue\n                  else ContinueDelayed\n              }\n            val nextStartingOffset = if (xs.isEmpty) {\n              math.max(from.value, queryUntil.maxOffset)\n            } else {\n              // Continue querying from the largest offset\n              xs.map(_.offset.value).max\n            }\n            Some(((nextStartingOffset, nextControl), xs))\n          }\n        }\n\n        control match {\n          case Stop     => Future.successful(None)\n          case Continue => retrieveNextBatch()\n          case ContinueDelayed =>\n            akka.pattern.after(durableStateConfig.refreshInterval, system.scheduler)(retrieveNextBatch())\n        }\n      }\n      .mapConcat(identity)\n  }\n\n  private[jdbc] def maxStateStoreOffset(): Future[Long] =\n    db.run(queries.maxOffsetQuery.result)\n\n  private[jdbc] def stateStoreStateInfo(offset: Long, limit: Long): Source[(String, Long, Long), NotUsed] =\n    Source.fromPublisher(db.stream(queries.stateStoreStateQuery((offset, limit)).result))\n\n  private def toDurableStateChange(row: DurableStateTables.DurableStateRow): Try[DurableStateChange[A]] = {\n    AkkaSerialization\n      .fromDurableStateRow(serialization)(row)\n      .map(payload =>\n        new UpdatedDurableState(\n          row.persistenceId,\n          row.revision,\n          payload.asInstanceOf[A],\n          Offset.sequence(row.globalOffset),\n          row.stateTimestamp))\n  }\n\n  private def updateDurableState(row: DurableStateTables.DurableStateRow) = {\n    for {\n      s <- queries.getSequenceNextValueExpr()\n      u <- queries.updateDbWithDurableState(row, s.head)\n    } yield u\n  }\n\n  private def insertDurableState(row: DurableStateTables.DurableStateRow) = {\n    for {\n      s <- queries.getSequenceNextValueExpr()\n      u <- queries.insertDbWithDurableState(row, s.head)\n    } yield u\n  }\n\n  def deleteAllFromDb() = db.run(queries.deleteAllFromDb())\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/testkit/internal/SchemaType.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.testkit.internal\n\nimport akka.annotation.InternalApi\n\n/**\n * INTERNAL API\n */\n@InternalApi private[jdbc] sealed trait SchemaType\n\n/**\n * INTERNAL API\n */\n@InternalApi private[jdbc] case object Postgres extends SchemaType\n\n/**\n * INTERNAL API\n */\n@InternalApi private[jdbc] case object H2 extends SchemaType\n\n/**\n * INTERNAL API\n */\n@InternalApi private[jdbc] case object MySQL extends SchemaType\n\n/**\n * INTERNAL API\n */\n@InternalApi private[jdbc] case object Oracle extends SchemaType\n\n/**\n * INTERNAL API\n */\n@InternalApi private[jdbc] case object SqlServer extends SchemaType\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/testkit/internal/SchemaUtilsImpl.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.testkit.internal\n\nimport java.sql.Statement\n\nimport scala.concurrent.Future\nimport akka.Done\nimport akka.actor.ClassicActorSystemProvider\nimport akka.annotation.InternalApi\nimport akka.dispatch.Dispatchers\nimport akka.persistence.jdbc.db.SlickDatabase\nimport akka.persistence.jdbc.db.SlickExtension\nimport com.typesafe.config.Config\nimport org.slf4j.Logger\nimport slick.jdbc.H2Profile\nimport slick.jdbc.JdbcBackend.Database\nimport slick.jdbc.JdbcProfile\nimport slick.jdbc.MySQLProfile\nimport slick.jdbc.OracleProfile\nimport slick.jdbc.PostgresProfile\nimport slick.jdbc.SQLServerProfile\n\n/**\n * INTERNAL API\n */\n@InternalApi\nprivate[jdbc] object SchemaUtilsImpl {\n\n  def legacy(configKey: String, config: Config): Boolean =\n    config.getConfig(configKey).getString(\"dao\") != \"akka.persistence.jdbc.journal.dao.DefaultJournalDao\"\n\n  /**\n   * INTERNAL API\n   */\n  @InternalApi\n  private[jdbc] def dropIfExists(configKey: String, logger: Logger)(\n      implicit actorSystem: ClassicActorSystemProvider): Future[Done] = {\n    val slickDb: SlickDatabase = loadSlickDatabase(configKey)\n    val (fileToLoad, separator) =\n      dropScriptFor(\n        slickProfileToSchemaType(slickDb.profile),\n        legacy(configKey, actorSystem.classicSystem.settings.config))\n\n    val blockingEC = actorSystem.classicSystem.dispatchers.lookup(Dispatchers.DefaultBlockingDispatcherId)\n    Future(applyScriptWithSlick(fromClasspathAsString(fileToLoad), separator, logger, slickDb.database))(blockingEC)\n  }\n\n  /**\n   * INTERNAL API\n   */\n  @InternalApi\n  private[jdbc] def createIfNotExists(configKey: String, logger: Logger)(\n      implicit actorSystem: ClassicActorSystemProvider): Future[Done] = {\n\n    val slickDb: SlickDatabase = loadSlickDatabase(configKey)\n    val (fileToLoad, separator) =\n      createScriptFor(\n        slickProfileToSchemaType(slickDb.profile),\n        legacy(configKey, actorSystem.classicSystem.settings.config))\n\n    val blockingEC = actorSystem.classicSystem.dispatchers.lookup(Dispatchers.DefaultBlockingDispatcherId)\n    Future(applyScriptWithSlick(fromClasspathAsString(fileToLoad), separator, logger, slickDb.database))(blockingEC)\n  }\n\n  /**\n   * INTERNAL API\n   */\n  @InternalApi\n  private[jdbc] def applyScript(script: String, separator: String, configKey: String, logger: Logger)(\n      implicit actorSystem: ClassicActorSystemProvider): Future[Done] = {\n\n    val blockingEC = actorSystem.classicSystem.dispatchers.lookup(Dispatchers.DefaultBlockingDispatcherId)\n    Future(applyScriptWithSlick(script, separator, logger, loadSlickDatabase(configKey).database))(blockingEC)\n  }\n\n  /**\n   * INTERNAL API\n   */\n  @InternalApi\n  private[jdbc] def dropWithSlick(schemaType: SchemaType, logger: Logger, db: Database, legacy: Boolean): Done = {\n    val (fileToLoad, separator) = dropScriptFor(schemaType, legacy)\n    SchemaUtilsImpl.applyScriptWithSlick(SchemaUtilsImpl.fromClasspathAsString(fileToLoad), separator, logger, db)\n  }\n\n  /**\n   * INTERNAL API\n   */\n  @InternalApi\n  private[jdbc] def createWithSlick(schemaType: SchemaType, logger: Logger, db: Database, legacy: Boolean): Done = {\n    val (fileToLoad, separator) = createScriptFor(schemaType, legacy)\n    SchemaUtilsImpl.applyScriptWithSlick(SchemaUtilsImpl.fromClasspathAsString(fileToLoad), separator, logger, db)\n  }\n\n  private def applyScriptWithSlick(script: String, separator: String, logger: Logger, database: Database): Done = {\n\n    def withStatement(f: Statement => Unit): Done = {\n      val session = database.createSession()\n      try session.withStatement()(f)\n      finally session.close()\n      Done\n    }\n\n    withStatement { stmt =>\n      val lines = script.split(separator).map(_.trim)\n      for {\n        line <- lines if line.nonEmpty\n      } yield {\n        logger.debug(s\"applying DDL: $line\")\n\n        try stmt.executeUpdate(line)\n        catch {\n          case t: java.sql.SQLException =>\n            logger.debug(s\"Exception while applying SQL script\", t)\n        }\n      }\n    }\n  }\n\n  private def dropScriptFor(schemaType: SchemaType, legacy: Boolean): (String, String) = {\n    val suffix = if (legacy) \"-legacy\" else \"\"\n    schemaType match {\n      case Postgres  => (s\"schema/postgres/postgres-drop-schema$suffix.sql\", \";\")\n      case MySQL     => (s\"schema/mysql/mysql-drop-schema$suffix.sql\", \";\")\n      case Oracle    => (s\"schema/oracle/oracle-drop-schema$suffix.sql\", \"/\")\n      case SqlServer => (s\"schema/sqlserver/sqlserver-drop-schema$suffix.sql\", \";\")\n      case H2        => (s\"schema/h2/h2-drop-schema$suffix.sql\", \";\")\n    }\n  }\n\n  private def createScriptFor(schemaType: SchemaType, legacy: Boolean): (String, String) = {\n    val suffix = if (legacy) \"-legacy\" else \"\"\n    schemaType match {\n      case Postgres  => (s\"schema/postgres/postgres-create-schema$suffix.sql\", \";\")\n      case MySQL     => (s\"schema/mysql/mysql-create-schema$suffix.sql\", \";\")\n      case Oracle    => (s\"schema/oracle/oracle-create-schema$suffix.sql\", \"/\")\n      case SqlServer => (s\"schema/sqlserver/sqlserver-create-schema$suffix.sql\", \";\")\n      case H2        => (s\"schema/h2/h2-create-schema$suffix.sql\", \";\")\n    }\n  }\n\n  private def slickProfileToSchemaType(profile: JdbcProfile): SchemaType =\n    profile match {\n      case PostgresProfile  => Postgres\n      case MySQLProfile     => MySQL\n      case OracleProfile    => Oracle\n      case SQLServerProfile => SqlServer\n      case H2Profile        => H2\n      case _                => throw new IllegalArgumentException(s\"Invalid profile $profile encountered\")\n    }\n\n  /**\n   * INTERNAL API\n   */\n  @InternalApi\n  private[jdbc] def fromClasspathAsString(fileName: String): String = {\n    val is = getClass.getClassLoader.getResourceAsStream(fileName)\n    io.Source.fromInputStream(is).mkString\n  }\n\n  private def loadSlickDatabase(configKey: String)(implicit actorSystem: ClassicActorSystemProvider) = {\n    val journalConfig = actorSystem.classicSystem.settings.config.getConfig(configKey)\n    SlickExtension(actorSystem).database(journalConfig)\n  }\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/testkit/javadsl/SchemaUtils.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.testkit.javadsl\n\nimport java.util.concurrent.CompletionStage\n\nimport scala.jdk.FutureConverters._\n\nimport akka.Done\nimport akka.actor.ClassicActorSystemProvider\nimport akka.annotation.ApiMayChange\nimport akka.persistence.jdbc.testkit.internal.SchemaUtilsImpl\nimport org.slf4j.LoggerFactory\n\nobject SchemaUtils {\n\n  private val logger = LoggerFactory.getLogger(\"akka.persistence.jdbc.testkit.javadsl.SchemaUtils\")\n\n  /**\n   * Drops the schema for both the journal and the snapshot table using the default schema definition.\n   *\n   * For information about the different schemas and supported databases consult\n   * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to run any DDL statements before the system is started.\n   *\n   * This method will automatically detects the configured database using the settings from `jdbc-journal` config.\n   * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   */\n  @ApiMayChange\n  def dropIfExists(actorSystem: ClassicActorSystemProvider): CompletionStage[Done] =\n    dropIfExists(configKey = \"jdbc-journal\", actorSystem)\n\n  /**\n   * Drops the schema for both the journal and the snapshot table using the default schema definition.\n   *\n   * For information about the different schemas and supported databases consult\n   * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to run any DDL statements before the system is started.\n   *\n   * This method will automatically detects the configured database using the settings from `configKey` config.\n   * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   *\n   * @param configKey the database journal configuration key to use.\n   */\n  @ApiMayChange\n  def dropIfExists(configKey: String, actorSystem: ClassicActorSystemProvider): CompletionStage[Done] =\n    SchemaUtilsImpl.dropIfExists(configKey, logger)(actorSystem).asJava\n\n  /**\n   * Creates the schema for both the journal and the snapshot table using the default schema definition.\n   *\n   * For information about the different schemas and supported databases consult\n   * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to create run DDL statements before the system is started.\n   *\n   * This method will automatically detects the configured database using the settings from `jdbc-journal` config.\n   * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   */\n  @ApiMayChange\n  def createIfNotExists(actorSystem: ClassicActorSystemProvider): CompletionStage[Done] =\n    createIfNotExists(\"jdbc-journal\", actorSystem)\n\n  /**\n   * Creates the schema for both the journal and the snapshot table using the default schema definition.\n   *\n   * For information about the different schemas and supported databases consult\n   * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to create run DDL statements before the system is started.\n   *\n   * This method will automatically detects the configured database using the settings from `configKey` config.\n   * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   *\n   * @param configKey the database journal configuration key to use.\n   */\n  @ApiMayChange\n  def createIfNotExists(configKey: String, actorSystem: ClassicActorSystemProvider): CompletionStage[Done] =\n    SchemaUtilsImpl.createIfNotExists(configKey, logger)(actorSystem).asJava\n\n  /**\n   * This method can be used to load alternative DDL scripts.\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to run any DDL statements before the system is started.\n   *\n   * It will use the database settings found under `jdbc-journal`, or `akka-persistence-jdbc.shared-databases` if configured so.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   *\n   * @param script the DDL script. The passed script can contain more then one SQL statements separated by a ; (semi-colon).\n   */\n  @ApiMayChange\n  def applyScript(script: String, actorSystem: ClassicActorSystemProvider): CompletionStage[Done] =\n    applyScript(script, separator = \";\", configKey = \"jdbc-journal\", actorSystem)\n\n  /**\n   * This method can be used to load alternative DDL scripts.\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to run any DDL statements before the system is started.\n   *\n   * It will use the database settings found under `configKey`, or `akka-persistence-jdbc.shared-databases` if configured so.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   *\n   * @param script the DDL script. The passed `script` can contain more then one SQL statements.\n   * @param separator used to separate the different DDL statements.\n   * @param configKey the database configuration key to use. Can be `jdbc-journal` or `jdbc-snapshot-store`.\n   */\n  @ApiMayChange\n  def applyScript(\n      script: String,\n      separator: String,\n      configKey: String,\n      actorSystem: ClassicActorSystemProvider): CompletionStage[Done] =\n    SchemaUtilsImpl.applyScript(script, separator, configKey, logger)(actorSystem).asJava\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/testkit/scaladsl/SchemaUtils.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.testkit.scaladsl\n\nimport scala.concurrent.Future\n\nimport akka.Done\nimport akka.actor.ClassicActorSystemProvider\nimport akka.annotation.ApiMayChange\nimport akka.persistence.jdbc.testkit.internal.SchemaUtilsImpl\nimport org.slf4j.LoggerFactory\n\nobject SchemaUtils {\n\n  private val logger = LoggerFactory.getLogger(\"akka.persistence.jdbc.testkit.scaladsl.SchemaUtils\")\n\n  /**\n   * Drops the schema for both the journal and the snapshot table using the default schema definition.\n   *\n   * For information about the different schemas and supported databases consult\n   * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to run any DDL statements before the system is started.\n   *\n   * This method will automatically detects the configured database using the settings from `jdbc-journal` config.\n   * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   */\n  @ApiMayChange\n  def dropIfExists()(implicit actorSystem: ClassicActorSystemProvider): Future[Done] =\n    dropIfExists(configKey = \"jdbc-journal\")\n\n  /**\n   * Drops the schema for both the journal and the snapshot table using the default schema definition.\n   *\n   * For information about the different schemas and supported databases consult\n   * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to run any DDL statements before the system is started.\n   *\n   * This method will automatically detects the configured database using the settings from `configKey` config.\n   * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   *\n   * @param configKey the database journal configuration key to use.\n   */\n  @ApiMayChange\n  def dropIfExists(configKey: String)(implicit actorSystem: ClassicActorSystemProvider): Future[Done] =\n    SchemaUtilsImpl.dropIfExists(configKey, logger)\n\n  /**\n   * Creates the schema for both the journal and the snapshot table using the default schema definition.\n   *\n   * For information about the different schemas and supported databases consult\n   * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to run any DDL statements before the system is started.\n   *\n   * This method will automatically detects the configured database using the settings from `jdbc-journal` config.\n   * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   */\n  @ApiMayChange\n  def createIfNotExists()(implicit actorSystem: ClassicActorSystemProvider): Future[Done] =\n    createIfNotExists(configKey = \"jdbc-journal\")\n\n  /**\n   * Creates the schema for both the journal and the snapshot table using the default schema definition.\n   *\n   * For information about the different schemas and supported databases consult\n   * https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#database-schema\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to run any DDL statements before the system is started.\n   *\n   * This method will automatically detects the configured database using the settings from `configKey` config.\n   * If configured with `use-shared-db`, it will use the `akka-persistence-jdbc.shared-databases` definition instead.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   *\n   * @param configKey the database journal configuration key to use.\n   */\n  @ApiMayChange\n  def createIfNotExists(configKey: String)(implicit actorSystem: ClassicActorSystemProvider): Future[Done] =\n    SchemaUtilsImpl.createIfNotExists(configKey, logger)\n\n  /**\n   * This method can be used to load alternative DDL scripts.\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to run any DDL statements before the system is started.\n   *\n   * It will use the database settings found under `jdbc-journal`, or `akka-persistence-jdbc.shared-databases` if configured so.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   *\n   * @param script the DDL script. The passed script can contain more then one SQL statements separated by a ; (semi-colon).\n   */\n  @ApiMayChange\n  def applyScript(script: String)(implicit actorSystem: ClassicActorSystemProvider): Future[Done] =\n    applyScript(script, separator = \";\", configKey = \"jdbc-journal\")\n\n  /**\n   * This method can be used to load alternative DDL scripts.\n   *\n   * This utility method is intended to be used for testing only.\n   * For production, it's recommended to create the table with DDL statements before the system is started.\n   *\n   * It will use the database settings found under `configKey`, or `akka-persistence-jdbc.shared-databases` if configured so.\n   * See https://doc.akka.io/libraries/akka-persistence-jdbc/current/index.html#sharing-the-database-connection-pool-between-the-journals for details.\n   *\n   * @param script the DDL script. The passed `script` can contain more then one SQL statements.\n   * @param separator used to separate the different DDL statements.\n   * @param configKey the database configuration key to use. Can be `jdbc-journal` or `jdbc-snapshot-store`.\n   */\n  @ApiMayChange\n  def applyScript(script: String, separator: String, configKey: String)(\n      implicit actorSystem: ClassicActorSystemProvider): Future[Done] =\n    SchemaUtilsImpl.applyScript(script, separator, configKey, logger)\n\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/util/BlockingOps.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.util\n\nimport scala.concurrent.duration.{ FiniteDuration, _ }\nimport scala.concurrent.{ Await, Future }\n\nobject BlockingOps {\n  implicit class BlockingFutureImplicits[T](val that: Future[T]) extends AnyVal {\n    def futureValue(implicit awaitDuration: FiniteDuration = 24.hour): T =\n      Await.result(that, awaitDuration)\n    def printFutureValue(implicit awaitDuration: FiniteDuration = 24.hour): Unit =\n      println(that.futureValue)\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/util/ByteArrayOps.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.util\n\nimport java.io.{ ByteArrayInputStream, InputStream }\nimport java.util.Base64\n\nobject ByteArrayOps {\n  implicit class ByteArrayImplicits(val that: Array[Byte]) extends AnyVal {\n    def encodeBase64: String = Base64.getEncoder.encodeToString(that)\n    def toInputStream: InputStream = new ByteArrayInputStream(that)\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/util/ConfigOps.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.util\n\nimport com.typesafe.config.Config\n\nimport java.util.concurrent.TimeUnit\nimport scala.concurrent.duration.FiniteDuration\n\nobject ConfigOps {\n\n  implicit class ConfigOperations(val config: Config) extends AnyVal {\n    def asStringOption(key: String): Option[String] =\n      if (config.hasPath(key)) {\n        val value = config.getString(key).trim\n        if (value.isEmpty) None\n        else Some(value)\n      } else None\n\n    def asFiniteDuration(key: String): FiniteDuration =\n      FiniteDuration(config.getDuration(key).toMillis, TimeUnit.MILLISECONDS)\n\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/util/InputStreamOps.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.util\n\nimport java.io.{ ByteArrayOutputStream, InputStream }\n\nimport scala.concurrent.blocking\n\nobject InputStreamOps {\n  implicit class InputStreamImplicits(val is: InputStream) extends AnyVal {\n    def toArray: Array[Byte] =\n      blocking {\n        /* based on https://stackoverflow.com/a/17861016/865265\n         * Thanks to\n         * - https://stackoverflow.com/users/1435969/ivan-gammel\n         * - https://stackoverflow.com/users/2619133/oliverkn\n         */\n        val bos: ByteArrayOutputStream = new ByteArrayOutputStream\n        val buffer: Array[Byte] = new Array[Byte](0xffff)\n        var len: Int = is.read(buffer)\n        while (len != -1) {\n          bos.write(buffer, 0, len)\n          len = is.read(buffer)\n        }\n        bos.toByteArray\n      }\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/util/PluginVersionChecker.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.util\n\nimport akka.annotation.InternalApi\n\n@InternalApi\nprivate[jdbc] object PluginVersionChecker {\n  def check(): Unit =\n    try {\n      Class.forName(\"akka.persistence.jdbc.util.DefaultSlickDatabaseProvider\")\n      throw new RuntimeException(\n        \"Old version of Akka Persistence JDBC found on the classpath. Remove `com.github.dnvriend:akka-persistence-jdbc` from the classpath..\")\n    } catch {\n      case _: ClassNotFoundException =>\n      // All good! That's intentional.\n      // It's good if we don't have akka.persistence.jdbc.util.DefaultSlickDatabaseProvider around\n    }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/util/StringOps.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.util\n\nimport java.util.Base64\n\nobject StringOps {\n  implicit class StringImplicits(val that: String) extends AnyVal {\n    def toByteArray: Array[Byte] = Base64.getDecoder.decode(that)\n  }\n}\n"
  },
  {
    "path": "core/src/main/scala/akka/persistence/jdbc/util/TrySeq.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.util\n\nimport scala.annotation.nowarn\nimport scala.collection.immutable._\nimport scala.util.{ Failure, Success, Try }\n\nobject TrySeq {\n  def sequence[A](seq: Seq[Try[A]]): Try[Seq[A]] = {\n    @nowarn(\"msg=exhaustive\")\n    def recurse(remaining: Seq[Try[A]], processed: Seq[A]): Try[Seq[A]] = {\n      remaining match {\n        case Seq()                 => Success(processed)\n        case Success(head) +: tail => recurse(remaining = tail, processed :+ head)\n        case Failure(t) +: _       => Failure(t)\n      }\n    }\n\n    recurse(seq, Vector.empty)\n  }\n}\n"
  },
  {
    "path": "core/src/test/LICENSE",
    "content": "﻿LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT\r\n\r\nTHIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS \"AGREEMENT\") IS A LEGAL AGREEMENT BETWEEN YOU (\"USER\") AND LIGHTBEND, INC. (\"LICENSOR\"). \r\nBY CLICKING THE \"I ACCEPT\" BUTTON, OR INSTALLING, COPYING OR OTHERWISE USING LIGHTBEND’S SOFTWARE (THE “SOFTWARE”) AND ITS ASSOCIATED USER DOCUMENTATION, USER ACKNOWLEDGES THAT USER HAS REVIEWED AND ACCEPTS THIS AGREEMENT AND AGREES TO BE BOUND BY ALL OF ITS TERMS. \r\nIF YOU ARE AGREEING TO THIS AGREEMENT AS AN INDIVIDUAL, “USER” REFERS TO YOU INDIVIDUALLY.  IF YOU ARE AGREEING TO THIS AGREEMENT AS A REPRESENTATIVE OF AN ENTITY, YOU REPRESENT THAT YOU HAVE THE AUTHORITY TO BIND THAT ENTITY AND “USER” REFERS TO THAT ENTITY AND ALL THE USERS ACCESSING THE SOFTWARE BY, THROUGH OR ON BEHALF OF THAT ENTITY.  \r\nIF USER DOES NOT AGREE WITH ALL OF THE TERMS OF THIS AGREEMENT, DO NOT INSTALL, COPY OR OTHERWISE USE THE SOFTWARE OR ITS DOCUMENTATION. \r\n\r\n1. DEFINITIONS. \r\n   1. “User System” means User’s website(s), computers, servers and other equipment and software upon and with which the Software is run.\r\n   2. “Documentation” means the user instructions and help files made available by Licensor for use with the Software, as may be updated from time to time by Licensor.\r\n   3. “Intellectual Property Rights” means all intellectual property rights or similar proprietary rights, including \r\n\t(a) patent rights and utility models, \r\n\t(b) copyrights and database rights, \r\n\t(c) trademarks, trade names, domain names and trade dress and the goodwill associated therewith, \r\n\t(d) trade secrets, \r\n\t(e) mask works, and \r\n\t(f) industrial design rights; in each case, including any registrations of, applications to register, and renewals and extensions of, any of the foregoing in any jurisdiction in the world.\r\n   4. “Open Source Software” means all software that is available under the GNU Affero General Public License (AGPL), GNU General Public License (GPL), GNU Lesser General Public License (LGPL), Mozilla Public License (MPL), Apache License, BSD licenses, or any other license that is approved by or similar to those approved by the Open Source Initiative (www.opensource.org).\r\n\r\n2. LICENSES AND RESTRICTIONS.  \r\n   1. License.  Subject to User’s compliance with the terms and conditions of this Agreement, Licensor hereby grants to User, during the term of this Agreement, a limited, non-exclusive, non-transferable and non-sublicensable right to \r\n\t(i) install and execute one (1) copy of the Software in accordance with the Documentation, solely in binary form, and not for the benefit of any other person or entity, and \r\n\t(ii) access and use the Documentation, solely for User’s own internally purposes in support of End User’s permitted use of the Software.  \r\n   2. Restrictions.  User shall not, directly or indirectly, or permit any User or third party to: \r\n\t(a) reverse engineer, decompile, disassemble or otherwise attempt to discover the source code or underlying ideas or algorithms of the Software;  \r\n\t(b) modify, translate, or create derivative works based on any element of the Software or any related Documentation (except to the extent applicable laws specifically prohibit such restriction for interoperability purposes, in which case you agree to first contact Licensor and provide Licensor an opportunity to create such changes as are needed for interoperability purposes); \r\n\t(c) use, rent, lease, distribute, sell, resell, assign, or otherwise transfer the Software or any copy thereof; \r\n\t(d) use the Software for timesharing purposes or otherwise for the benefit of any person or entity other than for the benefit of User and Users; \r\n\t(e) remove any proprietary notices from the Software or the Documentation or attempt to defeat any copy protection device included with the Software; or \r\n\t(f) use the Software for any purpose other than its intended purpose.\r\n   3. Reservation of Rights.  Nothing in this Agreement shall be construed to give User a right to use or otherwise obtain access to any source code from which the Software is compiled or interpreted.  Except as expressly granted in this Agreement, there are no other licenses granted to User, express, implied or by way of estoppel.  All rights not granted in this Agreement are reserved by Licensor.\r\n   4. Open Source Software.  Notwithstanding the foregoing, certain items of software included with the Software are Open Source Software and remains subject Open Source Software licenses.  Such Open Source Software is not subject to the terms and conditions of this Agreement.  \r\nInstead, each such item of Open Source Software is licensed under the terms of the end user license that accompanies such Open Source Software and nothing in this Agreement limits your rights under, or grants you rights that supersede, the terms and conditions of any applicable end user license for such Open Source Software.  If required by any license for particular Open Source Software, Licensor makes such Open Source Software, and any Licensor modifications to that Open Source Software, available as further described in the Documentation.\r\nUSE OF THE SOFTWARE IN ANY MANNER OTHER THAN AS PROVIDED IN THIS AGREEMENT IS STRICTLY PROHIBITED AND MAY INFRINGE ON THE INTELLECTUAL PROPERTY RIGHTS OF LICENSOR AND/OR ITS LICENSOR(S), SUBJECTING USER TO CIVIL AND CRIMINAL PENALTIES, INCLUDING WITHOUT LIMITATION MONETARY DAMAGES AND IMPRISONMENT FOR COPYRIGHT INFRINGEMENT.\r\n\r\n3. USER OBLIGATIONS.\r\n   1. User System.  User is responsible for \r\n\t(a) obtaining, deploying and maintaining the User System, and all computer hardware, software, modems, routers and other communications equipment necessary for User and its Users to install and use the Software; and \r\n\t(b) paying all third party fees and access charges incurred in connection with the foregoing.  Licensor shall not be responsible for supplying any hardware, software or other equipment to User under this Agreement.\r\n   2. Compliance with Laws.  User agrees to use the Software in compliance with all applicable laws, including local laws of the country or region in which User resides, and in compliance with all United States export laws and regulations.  User shall not use the Software for any purpose prohibited by applicable law.  \r\n   3. Trademarks and Tradenames.  With regard to all copies of the Software permitted herein, User shall reproduce on such copies all Licensor copyright notices, and other proprietary notices appearing on and in the original copy of the software received from Licensor. Except as set forth in the foregoing sentence, User will not, during the term of this Agreement or thereafter, use any trademark of Licensor, or any word and/or symbol likely to be confused with any Licensor trademark, either alone or in any combination with other words and/or symbols.\r\n\r\n4. SUPPORT AND MAINTENANCE.\r\n   1. Support.  Licensor is not responsible for maintenance or support of the Software, or the equipment on which the Software resides or is used, under this Agreement. By accepting the license granted under this Agreement, User agrees that Licensor will be under no obligation to provide any support, maintenance or service in connection with the Software or such equipment.  \r\n   2. Upgrades and Updates.  Licensor may from time to time in its sole discretion develop and provide updates for the Software, which may include upgrades, bug fixes, patches, other error corrections, and/or new features (collectively, including related documentation, “Updates”). Updates may also modify or delete in their entirety certain features and functionality. You agree that Licensor has no obligation to provide any Updates or to continue to provide or enable any particular features or functionality. \r\n\r\n5. REPRESENTATIONS AND WARRANTIES; DISCLAIMER.\r\n   1. Mutual Representations and Warranties.  Each party represents, warrants and covenants that: \r\n\t(a) it has the full power and authority to enter into this Agreement and to perform its obligations hereunder, without the need for any consents, approvals or immunities not yet obtained; and \r\n\t(b) its acceptance of and performance under this Agreement shall not breach any oral or written agreement with any third party or any obligation owed by it to any third party to keep any information or materials in confidence or in trust. \r\n   2. Disclaimer.  EXCEPT FOR THE WARRANTIES SET FORTH IN THIS SECTION 5, THE SOFTWARE AND THE DOCUMENTATION ARE PROVIDED ON AN AS-IS BASIS.  USER’S USE OF THE SOFTWARE AND THE DOCUMENTATION IS AT USER’S OWN RISK.  LICENSOR DOES NOT MAKE, AND HEREBY DISCLAIMS, ANY AND ALL OTHER EXPRESS, STATUTORY AND IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE, QUALITY, SUITABILITY, OPERABILITY, CONDITION, SYSTEM INTEGRATION, NON-INTERFERENCE, WORKMANSHIP, TRUTH, ACCURACY (OF DATA OR ANY OTHER INFORMATION OR CONTENT), ABSENCE OF DEFECTS, WHETHER LATENT OR PATENT, AND ANY WARRANTIES ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE.  LICENSOR ALSO DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN, PERFORMED AND/OR PROVIDED BY THE SOFTWARE WILL MEET USER’S REQUIREMENTS, THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, THAT THE SOFTWARE WILL BE COMPATIBLE OR WORK WITH ANY THIRD-PARTY SOFTWARE, APPLICATIONS OR DEVICES, OR THAT DEFECTS IN THE SOFTWARE WILL BE CORRECTED.  USER EXPRESSLY ACKNOWLEDGES AND AGREES THAT, TO THE EXTENT PERMITTED BY APPLICABLE LAW, ITS USE OF THE SOFTWARE IS AT ITS SOLE RISK AND THAT THE ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT IS WITH USER. USER FURTHER ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS NOT INTENDED OR SUITABLE FOR USE IN SITUATIONS OR ENVIRONMENTS WHERE THE FAILURE OR TIME DELAYS OF, OR ERRORS OR INACCURACIES IN THE CONTENT, DATA OR INFORMATION PROVIDED BY THE SOFTWARE COULD LEAD TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. ANY WARRANTIES MADE BY LICENSOR ARE FOR THE BENEFIT OF USER ONLY AND NOT FOR THE BENEFIT OF ANY THIRD PARTY.  THE SOFTWARE AND THE DOCUMENTATION ARE LICENSED AND NOT SOLD. NO AGENT OF LICENSOR IS AUTHORIZED TO ALTER OR EXPAND THE WARRANTIES OF LICENSOR AS SET FORTH HEREIN.  \r\n\r\n6. INDEMNIFICATION. User shall defend Licensor and its licensors and their respective officers, directors and employees (“Licensor Indemnified Parties”) from and against any and all Third-Party Claims which arise out of or relate to: \r\n\t(a) User’s use or alleged use of the Software other than as permitted under this Agreement; or \r\n\t(b) arising out of or relating to any violation of Section 2.2, or any violation of applicable laws.  User shall pay all damages, costs and expenses, including attorneys’ fees and costs (whether by settlement or award of by a final judicial judgment) incurred by the Licensor Indemnified Parties from any such Third-Party Claim.  In no event shall Licensor settle any claim without User’s prior written approval.  Licensor may, at its own expense, engage separate counsel to advise Licensor regarding a Third-Party Claim and to participate in the defense of the claim, subject to User’s right to control the defense and settlement. If you are a New Jersey resident, this indemnification clause is to be only as broad and inclusive as is permitted by the law of the state of New Jersey.\r\n\r\n7. CONFIDENTIALITY. \r\n   1. Confidential Information. User acknowledges that the Software contains valuable proprietary information and trade secrets and that unauthorized or improper use of the Software will result in irreparable harm to Licensor for which monetary damages would be inadequate and for which Licensor may be entitled to immediate injunctive relief. Accordingly, you will maintain the confidentiality of the proprietary information and not sell, license, publish, display, distribute, disclose or otherwise make available such proprietary information to any third party, nor use such information except as authorized by this Agreement.\r\n   2. Injunctive Relief.  User agrees that any unauthorized disclosure of confidential information may cause immediate and irreparable injury to Licensor and that, in the event of such breach, Licensor will be entitled, in addition to any other available remedies, to seek immediate injunctive and other equitable relief, without bond and without the necessity of showing actual monetary damages.\r\n\r\n8. PROPRIETARY RIGHTS. \r\n   1. Licensor.  As between Licensor and User, all right, title and interest in the Software, the Documentation, and any other Licensor materials furnished or made available hereunder, and all modifications and enhancements thereof, and all suggestions, ideas and feedback proposed by User regarding the Software, including all copyright rights, patent rights and other Intellectual Property Rights in each of the foregoing, belong to and are retained solely by Licensor or Licensor’s licensors and providers, as applicable.  User hereby does and will irrevocably assign to Licensor all evaluations, ideas, feedback and suggestions made by User to Licensor regarding the Software or the Documentation (collectively, “Feedback”) and all Intellectual Property Rights in the Feedback.  \r\n\r\n9. LIMITATION OF LIABILITY.\r\n   1. No Consequential Damages.  NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR ANY DAMAGES FOR LOST DATA, BUSINESS INTERRUPTION, LOST PROFITS, LOST REVENUE OR LOST BUSINESS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT, EVEN IF LICENSOR OR ITS LICENSORS OR USER HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, INCLUDING WITHOUT LIMITATION, ANY SUCH DAMAGES ARISING OUT OF THE LICENSING, PROVISION OR USE OF THE SOFTWARE OR THE RESULTS OF THE USE OF THE SOFTWARE.  LICENSOR WILL NOT BE LIABLE FOR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES.\r\n   2. LIMITS ON LIABILITY.  NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR CUMULATIVE, AGGREGATE DAMAGES GREATER THAN FIVE HUNDRED DOLLARS (US $500).  \r\n   3. ESSENTIAL PURPOSE.  USER ACKNOWLEDGES THAT THE TERMS IN THIS SECTION 9 (LIMITATION OF LIABILITY) SHALL APPLY TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW AND SHALL APPLY EVEN IF AN EXCLUSIVE OR LIMITED REMEDY STATED HEREIN FAILS OF ITS ESSENTIAL PURPOSE. SOME JURISDICTIONS DO NOT ALLOW CERTAIN LIMITATIONS OF LIABILITY, SO SOME OR ALL OF THE ABOVE LIMITATIONS OF LIABILITY MAY NOT APPLY TO YOU.  IF YOU ARE A NEW JERSEY RESIDENT, THIS LIMITATION OF LIABILITY SECTION IS TO BE ONLY AS BROAD AND INCLUSIVE AS IS PERMITTED BY THE LAW OF THE STATE OF NEW JERSEY.\r\n\r\n10. TERM AND TERMINATION.  \r\n   1. Term.  This Agreement and User’s right to use the Software commences on earlier of the date that User: \r\n\t(a) installs the Software, \r\n\t(b) begins using the Software or \r\n\t(c) otherwise demonstrates assent to this Agreement.  \r\n\tUser’s right to use the Software shall continue until such time as this Agreement is terminated (the “Term”).  \r\n   2. Termination for Cause.  A party may terminate this Agreement, upon written notice to the other party in the event the other party files a petition for bankruptcy or has a petition for bankruptcy filed against it that is not dismissed within sixty (60) calendar days after filing  or admits its inability to pay its debts as they mature, makes an assignment for the benefit of its creditors or ceases to function as a going concern or to conduct its operations in the normal course of business and such termination shall occur immediately upon notice.  Licensor may terminate this Agreement at any time without notice if it ceases to support the Software, which Licensor may do in its sole discretion. In addition, this Agreement will terminate immediately and automatically without any notice if User breaches any of its terms and conditions.\r\n   3. Termination for Convenience.  Either party may terminate this Agreement for convenience on at least thirty (30) calendar days prior written notice to the other party.  User may also terminate this Agreement by ceasing all use of the Software.\r\n   4. Effects of Termination.  Upon expiration or termination of this Agreement, User’s shall cease all use of the Software and the Documentation and shall destroy all copies of the Software in User’s possession or control.\r\n   5. Survival.  This Section and Sections 1, 2.2 (Restrictions), 2.3 (Reservation of Rights), 2.4 (Open Source Software), 5.2 (Disclaimer), 6 (Indemnification), 7 (Confidentiality), 8 (Proprietary Rights), 9 (Limitation of Liability), 10.4 (Effects of Termination) and 11 (Miscellaneous) shall survive any termination or expiration of this Agreement. \r\n\r\n11. MISCELLANEOUS.\r\n   1. Notices.  Licensor may give notice to User by means of a general notice through electronic mail to User’s e-mail address, or by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service to User’s address on record with Licensor. User may give notice to Licensor by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service addressed to Licensor, Lightbend Inc., 580 California, #1231, San Francisco, CA 94104, Attention: User Support.  Notice shall be deemed to have been given upon receipt or, if earlier, two (2) business days after mailing, as applicable. All communications and notices to be made or given pursuant to this Agreement shall be in the English language.  \r\n   2. Governing Law.  This Agreement and the rights and obligations of the parties to and under this agreement shall be governed by and construed under the laws of the United States and the State of California as applied to agreements entered into and to be performed in such State without giving effect to conflicts of laws rules or principles.  The parties agree that the United Nations Convention on Contracts for the International Sale of Goods is specifically excluded from application to this Agreement and that the application of the Uniform Computer Information Transactions Act (UCITA) is specifically disclaimed.  Any dispute arising out of or in connection with this Agreement, including but not limited to any question regarding its existence, interpretation, validity, performance, or termination, or any dispute between the parties arising from the parties' relationship created by this Agreement, shall be referred to and finally resolved by arbitration administered by the American Arbitration Association under its rules.  The number of arbitrators shall be one (1).  The parties shall endeavor to agree upon the sole arbitrator and jointly nominate the arbitrator.  If the parties cannot agree upon the sole arbitrator within a time prescribed by AAA, the parties shall request the AAA to propose five (5) arbitrators and each party shall rank the proposed arbitrators.  The AAA shall appoint an arbitrator from the list of five (5), based upon the parties' rankings.  \r\n   3. U.S. Government Users.  If User is a Federal Government entity, Licensor provides the Software and the Documentation, including related software and technology, for ultimate Federal Government end use solely in accordance with the following:  Government technical data rights include only those rights customarily provided to the public with a commercial item or process and Government software rights related to the Software and the Documentation include only those rights customarily provided to the public, as defined in this Agreement.  The technical data rights and customary commercial software license is provided in accordance with FAR 12.211 (Technical Data) and FAR 12.212 (Software) and, for Department of Defense transactions, DFAR 252.227-7015 (Technical Data – Commercial Items) and DFAR 227.7202-3 (Rights in Commercial Computer Software or Computer Software Documentation).  If greater rights are needed, a mutually acceptable written addendum specifically conveying such rights must be included in this Agreement. \r\n   4. Export.  The Software utilizes software and technology that may be subject to United States and foreign export controls. User acknowledges and agrees that the Software shall not be used, and none of the underlying information, software, or technology may be transferred or otherwise exported or re-exported to countries as to which the United States maintains an embargo (collectively, “Embargoed Countries”), or to or by a national or resident thereof, or any person or entity on the U.S. Department of Treasury’s List of Specially Designated Nationals or the U.S. Department of Commerce’s Table of Denial Orders (collectively, “Designated Nationals”). The lists of Embargoed Countries and Designated Nationals are subject to change without notice. By using the Software, User represents and warrants that it is not located in, under the control of, or a national or resident of an Embargoed Country or Designated National. The Software may use encryption technology that is subject to licensing requirements under the U.S. Export Administration Regulations, 15 C.F.R. Parts 730-774 and Council Regulation (EC) No. 1334/2000. User agrees to comply strictly with all applicable export laws and assume sole responsibility for obtaining licenses to export or re-export as may be required. Licensor and its licensors make no representation that the Software is appropriate or available for use in other locations. By using the Software, User represents and warrants that it is not located in any such country or on any such list.\r\n   5. General.  User shall not assign its rights hereunder, or delegate the performance of any of its duties or obligations hereunder, whether by merger, acquisition, sale of assets, operation of law, or otherwise, without the prior written consent of Licensor.  Any purported assignment in violation of the preceding sentence is null and void.  Subject to the foregoing, this Agreement shall be binding upon, and inure to the benefit of, the successors and assigns of the parties thereto.  Except as otherwise specified in this Agreement, this Agreement may be amended or supplemented only by a writing that refers explicitly to this Agreement and that is signed on behalf of both parties.  No waiver will be implied from conduct or failure to enforce rights.  No waiver will be effective unless in a writing signed on behalf of the party against whom the waiver is asserted.  If any of this Agreement is found invalid or unenforceable that term will be enforced to the maximum extent permitted by law and the remainder of this Agreement will remain in full force.  \r\nNothing contained herein shall be construed as creating an agency, partnership, or other form of joint enterprise between the parties.  \r\nThis Agreement constitutes the entire agreement between the parties relating to this subject matter and supersedes all prior or simultaneous understandings, representations, discussions, negotiations, and agreements, whether written or oral.  \r\nNeither party shall be liable to the other party or any third party for failure or delay in performing its obligations under this Agreement when such failure or delay is due to any cause beyond the control of the party concerned, including, without limitation, force majeure, governmental orders or restrictions, fire, or flood, provided that upon cessation of such events such party shall thereupon promptly perform or complete the performance of its obligations hereunder."
  },
  {
    "path": "core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc;\n\nimport akka.Done;\nimport akka.NotUsed;\nimport akka.actor.ActorSystem;\n// #create\nimport akka.persistence.jdbc.testkit.javadsl.SchemaUtils;\n// #create\n// #read-journal\nimport akka.persistence.query.*;\nimport akka.persistence.jdbc.query.javadsl.JdbcReadJournal;\n// #read-journal\n// #persistence-ids\nimport akka.stream.javadsl.Source;\nimport akka.persistence.query.PersistenceQuery;\nimport akka.persistence.jdbc.query.javadsl.JdbcReadJournal;\n// #persistence-ids\n// #events-by-persistence-id\nimport akka.stream.javadsl.Source;\nimport akka.persistence.query.PersistenceQuery;\nimport akka.persistence.query.EventEnvelope;\nimport akka.persistence.jdbc.query.javadsl.JdbcReadJournal;\n// #events-by-persistence-id\n// #events-by-tag\nimport akka.stream.javadsl.Source;\nimport akka.persistence.query.PersistenceQuery;\nimport akka.persistence.query.EventEnvelope;\nimport akka.persistence.jdbc.query.javadsl.JdbcReadJournal;\n// #events-by-tag\n\nimport java.util.concurrent.CompletionStage;\n\nfinal class JavadslSnippets {\n  void create() {\n    // #create\n\n    ActorSystem actorSystem = ActorSystem.create(\"example\");\n    CompletionStage<Done> done = SchemaUtils.createIfNotExists(actorSystem);\n    // #create\n  }\n\n  void readJournal() {\n    ActorSystem system = ActorSystem.create(\"example\");\n    // #read-journal\n\n    final JdbcReadJournal readJournal =\n        PersistenceQuery.get(system)\n            .getReadJournalFor(JdbcReadJournal.class, JdbcReadJournal.Identifier());\n    // #read-journal\n\n  }\n\n  void persistenceIds() {\n    ActorSystem system = ActorSystem.create();\n    // #persistence-ids\n\n    JdbcReadJournal readJournal =\n        PersistenceQuery.get(system)\n            .getReadJournalFor(JdbcReadJournal.class, JdbcReadJournal.Identifier());\n\n    Source<String, NotUsed> willNotCompleteTheStream = readJournal.persistenceIds();\n\n    Source<String, NotUsed> willCompleteTheStream = readJournal.currentPersistenceIds();\n    // #persistence-ids\n  }\n\n  void eventsByPersistenceIds() {\n    ActorSystem system = ActorSystem.create();\n\n    // #events-by-persistence-id\n\n    JdbcReadJournal readJournal =\n        PersistenceQuery.get(system)\n            .getReadJournalFor(JdbcReadJournal.class, JdbcReadJournal.Identifier());\n\n    Source<EventEnvelope, NotUsed> willNotCompleteTheStream =\n        readJournal.eventsByPersistenceId(\"some-persistence-id\", 0L, Long.MAX_VALUE);\n\n    Source<EventEnvelope, NotUsed> willCompleteTheStream =\n        readJournal.currentEventsByPersistenceId(\"some-persistence-id\", 0L, Long.MAX_VALUE);\n    // #events-by-persistence-id\n  }\n\n  void eventsByTag() {\n    ActorSystem system = ActorSystem.create();\n    // #events-by-tag\n\n    JdbcReadJournal readJournal =\n        PersistenceQuery.get(system)\n            .getReadJournalFor(JdbcReadJournal.class, JdbcReadJournal.Identifier());\n\n    Source<EventEnvelope, NotUsed> willNotCompleteTheStream =\n        readJournal.eventsByTag(\"apple\", Offset.sequence(0L));\n\n    Source<EventEnvelope, NotUsed> willCompleteTheStream =\n        readJournal.currentEventsByTag(\"apple\", Offset.sequence(0L));\n    // #events-by-tag\n  }\n}\n"
  },
  {
    "path": "core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state;\n\nimport java.util.concurrent.CompletionStage;\nimport akka.actor.ActorSystem;\nimport akka.Done;\nimport akka.NotUsed;\n// #create\nimport akka.persistence.jdbc.testkit.javadsl.SchemaUtils;\n// #create\n// #jdbc-durable-state-store\nimport akka.persistence.state.DurableStateStoreRegistry;\nimport akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore;\n// #jdbc-durable-state-store\n// #get-object\nimport akka.persistence.state.DurableStateStoreRegistry;\nimport akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore;\nimport akka.persistence.state.javadsl.GetObjectResult;\n// #get-object\n// #upsert-get-object\nimport akka.persistence.state.DurableStateStoreRegistry;\nimport akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore;\nimport akka.persistence.state.javadsl.GetObjectResult;\n// #upsert-get-object\n// #delete-object\nimport akka.persistence.state.DurableStateStoreRegistry;\nimport akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore;\n// #delete-object\n// #current-changes\nimport akka.NotUsed;\nimport akka.stream.javadsl.Source;\nimport akka.persistence.state.DurableStateStoreRegistry;\nimport akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore;\nimport akka.persistence.query.DurableStateChange;\nimport akka.persistence.query.NoOffset;\n// #current-changes\n// #changes\nimport akka.NotUsed;\nimport akka.stream.javadsl.Source;\nimport akka.persistence.state.DurableStateStoreRegistry;\nimport akka.persistence.jdbc.state.javadsl.JdbcDurableStateStore;\nimport akka.persistence.query.DurableStateChange;\nimport akka.persistence.query.NoOffset;\n// #changes\n\nfinal class JavadslSnippets {\n  void create() {\n    // #create\n\n    ActorSystem system = ActorSystem.create(\"example\");\n    CompletionStage<Done> done = SchemaUtils.createIfNotExists(system);\n    // #create\n  }\n\n  void durableStatePlugin() {\n    ActorSystem system = ActorSystem.create(\"example\");\n\n    // #jdbc-durable-state-store\n\n    @SuppressWarnings(\"unchecked\")\n    JdbcDurableStateStore<String> store =\n        DurableStateStoreRegistry.get(system)\n            .getDurableStateStoreFor(\n                JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier());\n    // #jdbc-durable-state-store\n  }\n\n  void getObject() {\n    ActorSystem system = ActorSystem.create(\"example\");\n\n    // #get-object\n\n    @SuppressWarnings(\"unchecked\")\n    JdbcDurableStateStore<String> store =\n        DurableStateStoreRegistry.get(system)\n            .getDurableStateStoreFor(\n                JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier());\n\n    CompletionStage<GetObjectResult<String>> futureResult = store.getObject(\"InvalidPersistenceId\");\n    try {\n      GetObjectResult<String> result = futureResult.toCompletableFuture().get();\n      assert !result.value().isPresent();\n    } catch (Exception e) {\n      // handle exceptions\n    }\n    // #get-object\n  }\n\n  void upsertAndGetObject() {\n    ActorSystem system = ActorSystem.create(\"example\");\n\n    // #upsert-get-object\n\n    @SuppressWarnings(\"unchecked\")\n    JdbcDurableStateStore<String> store =\n        DurableStateStoreRegistry.get(system)\n            .getDurableStateStoreFor(\n                JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier());\n\n    CompletionStage<GetObjectResult<String>> r =\n        store\n            .upsertObject(\"p234\", 1, \"a valid string\", \"t123\")\n            .thenCompose(d -> store.getObject(\"p234\"))\n            .thenCompose(o -> store.upsertObject(\"p234\", 2, \"updated valid string\", \"t123\"))\n            .thenCompose(d -> store.getObject(\"p234\"));\n\n    try {\n      assert r.toCompletableFuture().get().value().get().equals(\"updated valid string\");\n    } catch (Exception e) {\n      // handle exceptions\n    }\n    // #upsert-get-object\n  }\n\n  void deleteObject() {\n    ActorSystem system = ActorSystem.create(\"example\");\n\n    // #delete-object\n\n    @SuppressWarnings(\"unchecked\")\n    JdbcDurableStateStore<String> store =\n        DurableStateStoreRegistry.get(system)\n            .getDurableStateStoreFor(\n                JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier());\n\n    CompletionStage<Done> futureResult = store.deleteObject(\"p123\");\n    try {\n      assert futureResult.toCompletableFuture().get().equals(Done.getInstance());\n    } catch (Exception e) {\n      // handle exceptions\n    }\n    // #delete-object\n  }\n\n  void currentChanges() {\n    ActorSystem system = ActorSystem.create(\"example\");\n\n    // #current-changes\n\n    @SuppressWarnings(\"unchecked\")\n    JdbcDurableStateStore<String> store =\n        DurableStateStoreRegistry.get(system)\n            .getDurableStateStoreFor(\n                JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier());\n\n    Source<DurableStateChange<String>, NotUsed> willCompleteTheStream =\n        store.currentChanges(\"tag-1\", NoOffset.getInstance());\n    // #current-changes\n  }\n\n  void changes() {\n    ActorSystem system = ActorSystem.create(\"example\");\n\n    // #changes\n\n    @SuppressWarnings(\"unchecked\")\n    JdbcDurableStateStore<String> store =\n        DurableStateStoreRegistry.get(system)\n            .getDurableStateStoreFor(\n                JdbcDurableStateStore.class, JdbcDurableStateStore.Identifier());\n\n    Source<DurableStateChange<String>, NotUsed> willNotCompleteTheStream =\n        store.changes(\"tag-1\", NoOffset.getInstance());\n    // #changes\n  }\n}\n"
  },
  {
    "path": "core/src/test/resources/general.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// This file contains the general settings which are shared in all akka-persistence-jdbc tests\n\nakka {\n  stdout-loglevel = off // defaults to WARNING can be disabled with off. The stdout-loglevel is only in effect during system startup and shutdown\n  log-dead-letters-during-shutdown = on\n  loglevel = debug\n  log-dead-letters = on\n  log-config-on-start = off // Log the complete configuration at INFO level when the actor system is started\n\n  loggers = [\"akka.event.slf4j.Slf4jLogger\"]\n  logging-filter = \"akka.event.slf4j.Slf4jLoggingFilter\"\n\n  actor {\n    // Required until https://github.com/akka/akka/pull/28333 is available\n    allow-java-serialization = on\n    debug {\n      receive = on // log all messages sent to an actor if that actors receive method is a LoggingReceive\n      autoreceive = off // log all special messages like Kill, PoisoffPill etc sent to all actors\n      lifecycle = off // log all actor lifecycle events of all actors\n      fsm = off // enable logging of all events, transitioffs and timers of FSM Actors that extend LoggingFSM\n      event-stream = off // enable logging of subscriptions (subscribe/unsubscribe) on the ActorSystem.eventStream\n    }\n  }\n}\n\ndocker {\n  host = \"localhost\"\n  host = ${?VM_HOST}\n}\n\njdbc-journal {\n  event-adapters {\n    test-write-event-adapter = \"akka.persistence.jdbc.query.EventAdapterTest$TestWriteEventAdapter\"\n    test-read-event-adapter  = \"akka.persistence.jdbc.query.EventAdapterTest$TestReadEventAdapter\"\n  }\n\n  event-adapter-bindings {\n    \"akka.persistence.jdbc.query.EventAdapterTest$Event\"            = test-write-event-adapter\n    \"akka.persistence.jdbc.query.EventAdapterTest$TaggedEvent\"      = test-write-event-adapter\n    \"akka.persistence.jdbc.query.EventAdapterTest$TaggedAsyncEvent\" = test-write-event-adapter\n    \"akka.persistence.jdbc.query.EventAdapterTest$EventAdapted\"     = test-read-event-adapter\n  }\n}\n\n\njdbc-read-journal {\n  refresh-interval = \"10ms\"\n  max-buffer-size = \"500\"\n}\n\nslick.db.idleTimeout = 10000 // 10 seconds\n"
  },
  {
    "path": "core/src/test/resources/h2-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf is included only for shared settings used for the akka-persistence-jdbc tests\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\njdbc-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-jdbc provider in use for durable state store\njdbc-durable-state-store {\n  slick = ${slick}\n}\n\nanother-jdbc-durable-state-store = ${jdbc-durable-state-store}\n\nslick {\n  profile = \"slick.jdbc.H2Profile$\"\n  db {\n    url = \"jdbc:h2:mem:test-database;DATABASE_TO_UPPER=false;\"\n    user = \"root\"\n    password = \"root\"\n    driver = \"org.h2.Driver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "core/src/test/resources/h2-default-mode-application.conf",
    "content": "# Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n\n// general.conf is included only for shared settings used for the akka-persistence-jdbc tests\ninclude \"general.conf\"\n\n# Same as h2-application.conf but without DATABASE_TO_UPPER=false. This exercises H2 in its\n# default mode, where unquoted identifiers are uppercased — the failure mode that the durable\n# state identifier-quoting fix addresses.\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n    }\n  }\n}\n\njdbc-journal {\n  slick = ${slick}\n}\n\njdbc-snapshot-store {\n  slick = ${slick}\n}\n\njdbc-read-journal {\n  slick = ${slick}\n}\n\njdbc-durable-state-store {\n  slick = ${slick}\n}\n\nanother-jdbc-durable-state-store = ${jdbc-durable-state-store}\n\nslick {\n  profile = \"slick.jdbc.H2Profile$\"\n  db {\n    url = \"jdbc:h2:mem:test-database\"\n    user = \"root\"\n    password = \"root\"\n    driver = \"org.h2.Driver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "core/src/test/resources/h2-shared-db-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\nakka-persistence-jdbc {\n  shared-databases {\n    slick {\n      profile = \"slick.jdbc.H2Profile$\"\n      db {\n        url = \"jdbc:h2:mem:test-database;DATABASE_TO_UPPER=false;\"\n        user = \"root\"\n        password = \"root\"\n        driver = \"org.h2.Driver\"\n        numThreads = 5\n        maxConnections = 5\n        minConnections = 1\n      }\n    }\n  }\n}\n\njdbc-journal {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  use-shared-db = \"slick\"\n}\n\n"
  },
  {
    "path": "core/src/test/resources/h2-two-read-journals-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"h2-application.conf\"\n\n// In this case we use exactly the same config for the second journal\n// (this includes the defaults form reference.conf)\njdbc-read-journal-number-two = ${jdbc-read-journal}\n"
  },
  {
    "path": "core/src/test/resources/jndi-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf is included only for shared settings used for the akka-persistence-jdbc tests\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\njdbc-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  slick = ${slick}\n}\n\nslick {\n  profile = \"slick.jdbc.PostgresProfile$\"\n  jndiName = \"java:/jboss/datasources/bla\"\n}\n"
  },
  {
    "path": "core/src/test/resources/jndi-shared-db-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\nakka-persistence-jdbc {\n  shared-databases {\n    slick {\n      profile = \"slick.jdbc.PostgresProfile$\"\n      jndiName = \"java:/jboss/datasources/bla\"\n    }\n  }\n}\n\njdbc-journal {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  use-shared-db = \"slick\"\n}\n\n"
  },
  {
    "path": "core/src/test/resources/logback-test.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<configuration debug=\"false\">\n\n    <appender name=\"console\" class=\"ch.qos.logback.core.ConsoleAppender\">\n        <filter class=\"ch.qos.logback.classic.filter.ThresholdFilter\">\n            <level>debug</level>\n        </filter>\n        <encoder>\n            <pattern>%date{ISO8601} - %logger -> %-5level[%thread] [%X{akkaSource} %X{sourceActorSystem} %logger{0} - %msg%n</pattern>\n        </encoder>\n    </appender>\n\n<!--    <logger name=\"akka.persistence.jdbc\" level=\"debug\"/>-->\n<!--    <logger name=\"akka.persistence.jdbc.journal\" level=\"info\"/>-->\n\n    <logger name=\"com.zaxxer.hikari\" level=\"warn\"/>\n\n<!--    <logger name=\"slick.backend.DatabaseComponent.action\" level=\"debug\"/>-->\n\n    <!-- Shows execution times for SQL statements. -->\n<!--    <logger name=\"slick.jdbc.JdbcBackend.benchmark\" level=\"debug\"/>-->\n\n    <!-- Shows the execution of every Database I/O Action -->\n<!--    <logger name=\"slick.basic.BasicBackend.action\" level=\"debug\"/>-->\n\n    <!-- Logs session events such as opening/closing connections -->\n<!--    <logger name=\"slick.session\" level=\"debug\"/>-->\n\n<!--     Shows bind variable contents (for supported types) of all SQL statements which are executed.-->\n<!--    <logger name=\"slick.jdbc.JdbcBackend.parameter\" level=\"DEBUG\"/>-->\n\n    <!-- Logs SQL sent to the database -->\n<!--    <logger name=\"slick.jdbc.JdbcBackend.statement\" level=\"DEBUG\"/>-->\n\n    <!-- Logs the first few results of each query. -->\n<!--    <logger name=\"slick.jdbc.StatementInvoker.result\" level=\"DEBUG\"/>-->\n\n    <!--<logger name=\"slick.util.AsyncExecutor\" level=\"DEBUG\" />-->\n\n    <!-- <logger name=\"akka.persistence.jdbc.testkit.scaladsl.SchemaUtils\" level=\"DEBUG\"/>-->\n    <logger name=\"akka.persistence.jdbc.state.scaladsl.DurableStateSequenceActor\" level=\"DEBUG\"/>\n\n    <root level=\"INFO\">\n        <appender-ref ref=\"console\"/>\n    </root>\n\n</configuration>\n"
  },
  {
    "path": "core/src/test/resources/mysql-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf is included only for shared settings used for the akka-persistence-jdbc tests\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\njdbc-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-jdbc provider in use for durable state store\njdbc-durable-state-store {\n  slick = ${slick}\n}\n\nslick {\n  profile = \"slick.jdbc.MySQLProfile$\"\n  db {\n    host = ${docker.host}\n    host = ${?DB_HOST}\n    url = \"jdbc:mysql://\"${slick.db.host}\":3306/docker?cachePrepStmts=true&cacheCallableStmts=true&cacheServerConfiguration=true&useLocalSessionState=true&elideSetAutoCommits=true&alwaysSendSetIsolation=false&enableQueryTimeouts=false&connectionAttributes=none&verifyServerCertificate=false&useSSL=false&allowPublicKeyRetrieval=true&useUnicode=true&useLegacyDatetimeCode=false&serverTimezone=UTC&rewriteBatchedStatements=true\"\n    user = \"root\"\n    password = \"root\"\n    driver = \"com.mysql.cj.jdbc.Driver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "core/src/test/resources/mysql-shared-db-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\nakka-persistence-jdbc {\n  shared-databases {\n    slick {\n      profile = \"slick.jdbc.MySQLProfile$\"\n      db {\n        host = ${docker.host}\n        host = ${?DB_HOST}\n        url = \"jdbc:mysql://\"${akka-persistence-jdbc.shared-databases.slick.db.host}\":3306/docker?cachePrepStmts=true&cacheCallableStmts=true&cacheServerConfiguration=true&useLocalSessionState=true&elideSetAutoCommits=true&alwaysSendSetIsolation=false&enableQueryTimeouts=false&connectionAttributes=none&verifyServerCertificate=false&useSSL=false&allowPublicKeyRetrieval=true&useUnicode=true&useLegacyDatetimeCode=false&serverTimezone=UTC&rewriteBatchedStatements=true\"\n        user = \"root\"\n        password = \"root\"\n        driver = \"com.mysql.cj.jdbc.Driver\"\n        numThreads = 5\n        maxConnections = 5\n        minConnections = 1\n      }\n    }\n  }\n}\n\njdbc-journal {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-jdbc provider in use for durable state store\njdbc-durable-state-store {\n  use-shared-db = \"slick\"\n}"
  },
  {
    "path": "core/src/test/resources/oracle-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf is included only for shared settings used for the akka-persistence-jdbc tests\ninclude \"general.conf\"\ninclude \"oracle-schema-overrides.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\n\njdbc-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  slick = ${slick}\n}\n\nslick {\n  profile = \"slick.jdbc.OracleProfile$\"\n  db {\n    host = ${docker.host}\n    host = ${?DB_HOST}\n    url = \"jdbc:oracle:thin:@//\"${slick.db.host}\":1521/FREEPDB1\"\n    user = \"system\"\n    password = \"oracle\"\n    driver = \"oracle.jdbc.OracleDriver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "core/src/test/resources/oracle-schema-overrides.conf",
    "content": "# Oracle does not support returning a column with a case senstive name so all the column names and table names need\n# to be caps. See:\n# https://github.com/slick/slick/issues/47\n# https://groups.google.com/g/scalaquery/c/U431n-Z2cwM\n\njdbc-snapshot-store {\n  tables {\n    snapshot {\n      tableName = \"SNAPSHOT\"\n      schemaName = \"SYSTEM\"\n      columnNames {\n        persistenceId = \"PERSISTENCE_ID\"\n        sequenceNumber = \"SEQUENCE_NUMBER\"\n        created = \"CREATED\"\n\n        snapshotPayload = \"SNAPSHOT_PAYLOAD\"\n        snapshotSerId = \"SNAPSHOT_SER_ID\"\n        snapshotSerManifest = \"SNAPSHOT_SER_MANIFEST\"\n\n        metaPayload = \"META_PAYLOAD\"\n        metaSerId = \"META_SER_ID\"\n        metaSerManifest = \"META_SER_MANIFEST\"\n      }\n    }\n  }\n}\n\njdbc-read-journal {\n  tables {\n    event_journal {\n      tableName = \"EVENT_JOURNAL\"\n      schemaName = \"SYSTEM\"\n    }\n  }\n}\n\njdbc-journal {\n  tables {\n    event_journal {\n      tableName = \"EVENT_JOURNAL\"\n      schemaName = \"SYSTEM\"\n\n      columnNames {\n        ordering = \"ORDERING\"\n        deleted = \"DELETED\"\n        persistenceId = \"PERSISTENCE_ID\"\n        sequenceNumber = \"SEQUENCE_NUMBER\"\n        writer = \"WRITER\",\n        writeTimestamp = \"WRITE_TIMESTAMP\"\n        adapterManifest = \"ADAPTER_MANIFEST\"\n        eventPayload = \"EVENT_PAYLOAD\"\n        eventSerId = \"EVENT_SER_ID\"\n        eventSerManifest = \"EVENT_SER_MANIFEST\"\n        metaPayload = \"META_PAYLOAD\"\n        metaSerId = \"META_SER_ID\"\n        metaSerManifest = \"META_SER_MANIFEST\"\n      }\n    }\n\n    event_tag {\n      tableName = \"EVENT_TAG\"\n      schemaName = \"SYSTEM\"\n\n      columnNames {\n        eventId = \"EVENT_ID\"\n        persistenceId = \"PERSISTENCE_ID\"\n        sequenceNumber = \"SEQUENCE_NUMBER\"\n        tag = \"TAG\"\n      }\n    }\n  }\n\n}\n\n"
  },
  {
    "path": "core/src/test/resources/oracle-shared-db-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general.conf\"\ninclude \"oracle-schema-overrides.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\nakka-persistence-jdbc {\n  shared-databases {\n    slick {\n      profile = \"slick.jdbc.OracleProfile$\"\n      db {\n        host = ${docker.host}\n        host = ${?DB_HOST}\n        url = \"jdbc:oracle:thin:@//\"${akka-persistence-jdbc.shared-databases.slick.db.host}\":1521/FREEPDB1\"\n        user = \"system\"\n        password = \"oracle\"\n        driver = \"oracle.jdbc.OracleDriver\"\n        numThreads = 5\n        maxConnections = 5\n        minConnections = 1\n      }\n    }\n  }\n}\n\njdbc-journal {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  use-shared-db = \"slick\"\n}\n"
  },
  {
    "path": "core/src/test/resources/postgres-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf is included only for shared settings used for the akka-persistence-jdbc tests\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\njdbc-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-jdbc provider in use for durable state store\njdbc-durable-state-store {\n  slick = ${slick}\n}\n\nslick {\n  profile = \"slick.jdbc.PostgresProfile$\"\n  db {\n    host = \"localhost\"\n    host = ${?DB_HOST}\n    url = \"jdbc:postgresql://\"${slick.db.host}\":5432/docker?reWriteBatchedInserts=true\"\n    user = \"docker\"\n    password = \"docker\"\n    driver = \"org.postgresql.Driver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "core/src/test/resources/postgres-shared-db-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\nakka-persistence-jdbc {\n  shared-databases {\n    slick {\n      profile = \"slick.jdbc.PostgresProfile$\"\n      db {\n        host = \"localhost\"\n        host = ${?DB_HOST}\n        url = \"jdbc:postgresql://\"${akka-persistence-jdbc.shared-databases.slick.db.host}\":5432/docker?reWriteBatchedInserts=true\"\n        user = \"docker\"\n        password = \"docker\"\n        driver = \"org.postgresql.Driver\"\n        numThreads = 5\n        maxConnections = 5\n        minConnections = 1\n      }\n    }\n  }\n}\n\njdbc-journal {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-jdbc provider in use for durable state store\njdbc-durable-state-store {\n  use-shared-db = \"slick\"\n}\n\nanother-jdbc-durable-state-store = ${jdbc-durable-state-store}\n"
  },
  {
    "path": "core/src/test/resources/sqlserver-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\njdbc-journal {\n  tables {\n    journal {\n      schemaName = \"dbo\"\n    }\n  }\n\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  tables {\n    snapshot {\n      schemaName = \"dbo\"\n    }\n  }\n\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  tables {\n    journal {\n      schemaName = \"dbo\"\n    }\n  }\n\n  slick = ${slick}\n}\n\nslick {\n  profile = \"slick.jdbc.SQLServerProfile$\"\n  db {\n    host = ${docker.host}\n    host = ${?DB_HOST}\n    url = \"jdbc:sqlserver://\"${slick.db.host}\":1433;databaseName=docker;integratedSecurity=false\"\n    user = \"sa\"\n    password = \"docker123abc#\"\n    driver = \"com.microsoft.sqlserver.jdbc.SQLServerDriver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "core/src/test/resources/sqlserver-shared-db-application.conf",
    "content": "# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\nakka-persistence-jdbc {\n  shared-databases {\n    slick {\n      profile = \"slick.jdbc.SQLServerProfile$\"\n      db {\n        host = ${docker.host}\n        host = ${?DB_HOST}\n        url = \"jdbc:sqlserver://\"${akka-persistence-jdbc.shared-databases.slick.db.host}\":1433;databaseName=docker;integratedSecurity=false;\"\n        user = \"sa\"\n        password = \"docker123abc#\"\n        driver = \"com.microsoft.sqlserver.jdbc.SQLServerDriver\"\n        numThreads = 5\n        maxConnections = 5\n        minConnections = 1\n      }\n    }\n  }\n}\n\njdbc-journal {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  use-shared-db = \"slick\"\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  use-shared-db = \"slick\"\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\n\nimport akka.{ Done, NotUsed }\nimport akka.actor.ActorSystem\n\nimport scala.annotation.nowarn\nimport scala.concurrent.Future\n\n@nowarn(\"msg=never used\")\nobject ScaladslSnippets {\n\n  def create(): Unit = {\n    // #create\n    import akka.persistence.jdbc.testkit.scaladsl.SchemaUtils\n\n    implicit val system: ActorSystem = ActorSystem(\"example\")\n    val done: Future[Done] = SchemaUtils.createIfNotExists()\n    // #create\n  }\n\n  def readJournal(): Unit = {\n    implicit val system: ActorSystem = ActorSystem()\n\n    // #read-journal\n    import akka.persistence.query.PersistenceQuery\n    import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal\n\n    val readJournal: JdbcReadJournal =\n      PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier)\n    // #read-journal\n  }\n\n  def persistenceIds(): Unit = {\n    implicit val system: ActorSystem = ActorSystem()\n\n    // #persistence-ids\n    import akka.stream.scaladsl.Source\n    import akka.persistence.query.PersistenceQuery\n    import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal\n\n    val readJournal: JdbcReadJournal =\n      PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier)\n\n    val willNotCompleteTheStream: Source[String, NotUsed] = readJournal.persistenceIds()\n\n    val willCompleteTheStream: Source[String, NotUsed] = readJournal.currentPersistenceIds()\n    // #persistence-ids\n  }\n\n  def eventsByPersistenceId(): Unit = {\n    implicit val system: ActorSystem = ActorSystem()\n\n    // #events-by-persistence-id\n    import akka.stream.scaladsl.Source\n    import akka.persistence.query.{ EventEnvelope, PersistenceQuery }\n    import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal\n\n    val readJournal: JdbcReadJournal =\n      PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier)\n\n    val willNotCompleteTheStream: Source[EventEnvelope, NotUsed] =\n      readJournal.eventsByPersistenceId(\"some-persistence-id\", 0L, Long.MaxValue)\n\n    val willCompleteTheStream: Source[EventEnvelope, NotUsed] =\n      readJournal.currentEventsByPersistenceId(\"some-persistence-id\", 0L, Long.MaxValue)\n    // #events-by-persistence-id\n  }\n\n  def eventsByTag(): Unit = {\n    implicit val system: ActorSystem = ActorSystem()\n    // #events-by-tag\n    import akka.stream.scaladsl.Source\n    import akka.persistence.query.{ EventEnvelope, PersistenceQuery }\n    import akka.persistence.jdbc.query.scaladsl.JdbcReadJournal\n\n    val readJournal: JdbcReadJournal =\n      PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier)\n\n    val willNotCompleteTheStream: Source[EventEnvelope, NotUsed] = readJournal.eventsByTag(\"apple\", 0L)\n\n    val willCompleteTheStream: Source[EventEnvelope, NotUsed] = readJournal.currentEventsByTag(\"apple\", 0L)\n    // #events-by-tag\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/SharedActorSystemTestSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\n\nimport akka.actor.ActorSystem\nimport akka.persistence.jdbc.config.{ JournalConfig, ReadJournalConfig }\nimport akka.persistence.jdbc.query.javadsl.JdbcReadJournal\nimport akka.persistence.jdbc.util.DropCreate\nimport akka.persistence.jdbc.db.SlickExtension\nimport akka.serialization.SerializationExtension\nimport akka.util.Timeout\nimport com.typesafe.config.{ Config, ConfigFactory, ConfigValue }\nimport org.scalatest.BeforeAndAfterAll\n\nimport scala.concurrent.ExecutionContext\nimport scala.concurrent.duration._\n\nabstract class SharedActorSystemTestSpec(val config: Config) extends SimpleSpec with DropCreate with BeforeAndAfterAll {\n  def this(config: String = \"postgres-application.conf\", configOverrides: Map[String, ConfigValue] = Map.empty) =\n    this(configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) =>\n      conf.withValue(path, configValue)\n    })\n\n  implicit lazy val system: ActorSystem = ActorSystem(\"test\", config)\n\n  implicit lazy val ec: ExecutionContext = system.dispatcher\n  implicit val pc: PatienceConfig = PatienceConfig(timeout = 1.minute)\n  implicit val timeout: Timeout = Timeout(1.minute)\n\n  lazy val serialization = SerializationExtension(system)\n\n  val cfg = config.getConfig(\"jdbc-journal\")\n  val journalConfig = new JournalConfig(cfg)\n  lazy val db = SlickExtension(system).database(cfg).database\n  val readJournalConfig = new ReadJournalConfig(config.getConfig(JdbcReadJournal.Identifier))\n\n  override protected def afterAll(): Unit = {\n    db.close()\n    system.terminate().futureValue\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/SimpleSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\n\nimport akka.actor.{ ActorRef, ActorSystem }\nimport akka.persistence.jdbc.util.ClasspathResources\nimport akka.testkit.TestProbe\nimport org.scalatest._\nimport org.scalatest.concurrent.{ Eventually, ScalaFutures }\nimport org.scalatest.flatspec.AnyFlatSpec\nimport org.scalatest.matchers.should.Matchers\n\ntrait SimpleSpec\n    extends AnyFlatSpec\n    with Matchers\n    with ScalaFutures\n    with TryValues\n    with OptionValues\n    with Eventually\n    with ClasspathResources\n    with BeforeAndAfterAll\n    with BeforeAndAfterEach\n    with GivenWhenThen {\n\n  /**\n   * Sends the PoisonPill command to an actor and waits for it to die\n   */\n  def killActors(actors: ActorRef*)(implicit system: ActorSystem): Unit = {\n    val tp = TestProbe()\n    actors.foreach { (actor: ActorRef) =>\n      tp.watch(actor)\n      system.stop(actor)\n      tp.expectTerminated(actor)\n    }\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/SingleActorSystemPerTestSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\n\nimport akka.actor.ActorSystem\nimport akka.persistence.jdbc.config.{ JournalConfig, ReadJournalConfig, SlickConfiguration }\nimport akka.persistence.jdbc.query.javadsl.JdbcReadJournal\nimport akka.persistence.jdbc.util.DropCreate\nimport akka.persistence.jdbc.db.SlickDatabase\nimport akka.util.Timeout\nimport com.typesafe.config.{ Config, ConfigFactory, ConfigValue }\nimport org.scalatest.BeforeAndAfterEach\nimport slick.jdbc.JdbcBackend.Database\n\nimport scala.concurrent.duration._\n\nabstract class SingleActorSystemPerTestSpec(val config: Config)\n    extends SimpleSpec\n    with DropCreate\n    with BeforeAndAfterEach {\n  def this(config: String = \"postgres-application.conf\", configOverrides: Map[String, ConfigValue] = Map.empty) =\n    this(configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) =>\n      conf.withValue(path, configValue)\n    })\n\n  override implicit val patienceConfig: PatienceConfig = PatienceConfig(timeout = 1.minute)\n  implicit val timeout: Timeout = Timeout(1.minute)\n\n  val cfg = config.getConfig(\"jdbc-journal\")\n  val journalConfig = new JournalConfig(cfg)\n  val journalTableName =\n    if (newDao) journalConfig.eventJournalTableConfiguration.tableName\n    else journalConfig.journalTableConfiguration.tableName\n  val tables =\n    if (newDao)\n      List(journalConfig.eventTagTableConfiguration.tableName, journalConfig.eventJournalTableConfiguration.tableName)\n    else List(journalConfig.journalTableConfiguration.tableName)\n  val profile = if (cfg.hasPath(\"slick.profile\")) {\n    SlickDatabase.profile(cfg, \"slick\")\n  } else SlickDatabase.profile(config, \"akka-persistence-jdbc.shared-databases.slick\")\n  val readJournalConfig = new ReadJournalConfig(config.getConfig(JdbcReadJournal.Identifier))\n\n  // The db is initialized in the before and after each bocks\n  var dbOpt: Option[Database] = None\n  def db: Database = {\n    dbOpt.getOrElse {\n      val newDb = if (cfg.hasPath(\"slick.profile\")) {\n        SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig(\"slick\")), \"slick.db\")\n      } else\n        SlickDatabase.database(\n          config,\n          new SlickConfiguration(config.getConfig(\"akka-persistence-jdbc.shared-databases.slick\")),\n          \"akka-persistence-jdbc.shared-databases.slick.db\")\n\n      dbOpt = Some(newDb)\n      newDb\n    }\n  }\n\n  def closeDb(): Unit = {\n    dbOpt.foreach(_.close())\n    dbOpt = None\n  }\n\n  override protected def afterEach(): Unit = {\n    super.afterEach()\n    closeDb()\n  }\n\n  override protected def afterAll(): Unit = {\n    super.afterAll()\n    closeDb()\n  }\n\n  def withActorSystem(f: ActorSystem => Unit): Unit = {\n    implicit val system: ActorSystem = ActorSystem(\"test\", config)\n    f(system)\n    system.terminate().futureValue\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/TablesTestSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\n\nimport akka.persistence.jdbc.config._\nimport com.typesafe.config.ConfigFactory\nimport org.scalatest.flatspec.AnyFlatSpec\nimport org.scalatest.matchers.should.Matchers\n\nimport scala.annotation.nowarn\n\nabstract class TablesTestSpec extends AnyFlatSpec with Matchers {\n  def toColumnName[A](tableName: String)(columnName: String): String = s\"$tableName.$columnName\"\n\n  @nowarn(\"msg=possible missing interpolator\")\n  val config = ConfigFactory\n    .parseString(\"\"\"\n      |akka-persistence-jdbc.slick.db {\n      |  host = <not used>\n      |  port = <not used>\n      |  name = <not used>\n      |}\n      |\n      |jdbc-journal {\n      |  class = \"akka.persistence.jdbc.journal.JdbcAsyncWriteJournal\"\n      |\n      |  tables {\n      |    journal {\n      |      tableName = \"journal\"\n      |      schemaName = \"\"\n      |      columnNames {\n      |        persistenceId = \"persistence_id\"\n      |        sequenceNumber = \"sequence_number\"\n      |        created = \"created\"\n      |        tags = \"tags\"\n      |        message = \"message\"\n      |      }\n      |    }\n      |\n      |    deletedTo {\n      |      tableName = \"deleted_to\"\n      |      schemaName = \"\"\n      |      columnNames = {\n      |        persistenceId = \"persistence_id\"\n      |        deletedTo = \"deleted_to\"\n      |      }\n      |    }\n      |  }\n      |\n      |  tagSeparator = \",\"\n      |\n      |  serialization = on // alter only when using a custom dao\n      |\n      |  dao = \"akka.persistence.jdbc.dao.bytea.ByteArrayJournalDao\"\n      |\n      |  slick {\n      |    profile = \"slick.jdbc.PostgresProfile\"\n      |    db {\n      |      host = \"localhost\"\n      |      host = ${?POSTGRES_HOST}\n      |      port = \"5432\"\n      |      port = ${?POSTGRES_PORT}\n      |      name = \"docker\"\n      |\n      |      url = \"jdbc:postgresql://\"${akka-persistence-jdbc.slick.db.host}\":\"${akka-persistence-jdbc.slick.db.port}\"/\"${akka-persistence-jdbc.slick.db.name}\n      |      user = \"docker\"\n      |      password = \"docker\"\n      |      driver = \"org.postgresql.Driver\"\n      |\n      |      // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n      |\n      |      // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n      |      // slick will use an async executor with a fixed size queue of 10.000 objects\n      |      // The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n      |      // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n      |      queueSize = 10000 // number of objects that can be queued by the async exector\n      |\n      |      connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds)\n      |      validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n      |      idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes)\n      |      maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n      |      leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n      |\n      |      initializationFailFast = true // This property controls whether the pool will \"fail fast\" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true\n      |\n      |      keepAliveConnection = on // ensures that the database does not get dropped while we are using it\n      |\n      |      numThreads = 4 // number of cores\n      |      maxConnections = 4  // same as numThreads\n      |      minConnections = 4  // same as numThreads\n      |    }\n      |  }\n      |}\n      |\n      |# the akka-persistence-snapshot-store in use\n      |jdbc-snapshot-store {\n      |  class = \"akka.persistence.jdbc.snapshot.JdbcSnapshotStore\"\n      |\n      |  tables {\n      |    snapshot {\n      |      tableName = \"snapshot\"\n      |      schemaName = \"\"\n      |      columnNames {\n      |        persistenceId = \"persistence_id\"\n      |        sequenceNumber = \"sequence_number\"\n      |        created = \"created\"\n      |        snapshot = \"snapshot\"\n      |      }\n      |    }\n      |  }\n      |\n      |  serialization = on // alter only when using a custom dao\n      |\n      |  dao = \"akka.persistence.jdbc.dao.bytea.ByteArraySnapshotDao\"\n      |\n      |  slick {\n      |    profile = \"slick.jdbc.PostgresProfile\"\n      |    db {\n      |      host = \"localhost\"\n      |      host = ${?POSTGRES_HOST}\n      |      port = \"5432\"\n      |      port = ${?POSTGRES_PORT}\n      |      name = \"docker\"\n      |\n      |      url = \"jdbc:postgresql://\"${akka-persistence-jdbc.slick.db.host}\":\"${akka-persistence-jdbc.slick.db.port}\"/\"${akka-persistence-jdbc.slick.db.name}\n      |      user = \"docker\"\n      |      password = \"docker\"\n      |      driver = \"org.postgresql.Driver\"\n      |\n      |      // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n      |\n      |      // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n      |      // slick will use an async executor with a fixed size queue of 10.000 objects\n      |      // The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n      |      // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n      |      queueSize = 10000 // number of objects that can be queued by the async exector\n      |\n      |      connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds)\n      |      validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n      |      idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes)\n      |      maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n      |      leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n      |\n      |      initializationFailFast = true // This property controls whether the pool will \"fail fast\" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true\n      |\n      |      keepAliveConnection = on // ensures that the database does not get dropped while we are using it\n      |\n      |      numThreads = 4 // number of cores\n      |      maxConnections = 4  // same as numThreads\n      |      minConnections = 4  // same as numThreads\n      |    }\n      |  }\n      |}\n      |\n      |# the akka-persistence-query provider in use\n      |jdbc-read-journal {\n      |  class = \"akka.persistence.jdbc.query.JdbcReadJournalProvider\"\n      |\n      |  # New events are retrieved (polled) with this interval.\n      |  refresh-interval = \"1s\"\n      |\n      |  # How many events to fetch in one query (replay) and keep buffered until they\n      |  # are delivered downstreams.\n      |  max-buffer-size = \"500\"\n      |\n      |  serialization = on // alter only when using a custom dao\n      |\n      |  dao = \"akka.persistence.jdbc.dao.bytea.ByteArrayJournalDao\"\n      |\n      |  tables {\n      |    journal {\n      |      tableName = \"journal\"\n      |      schemaName = \"\"\n      |      columnNames {\n      |        persistenceId = \"persistence_id\"\n      |        sequenceNumber = \"sequence_number\"\n      |        created = \"created\"\n      |        tags = \"tags\"\n      |        message = \"message\"\n      |      }\n      |    }\n      |  }\n      |\n      |  tagSeparator = \",\"\n      |\n      |  slick {\n      |    profile = \"slick.jdbc.PostgresProfile\"\n      |    db {\n      |      host = \"localhost\"\n      |      host = ${?POSTGRES_HOST}\n      |      port = \"5432\"\n      |      port = ${?POSTGRES_PORT}\n      |      name = \"docker\"\n      |\n      |      url = \"jdbc:postgresql://\"${akka-persistence-jdbc.slick.db.host}\":\"${akka-persistence-jdbc.slick.db.port}\"/\"${akka-persistence-jdbc.slick.db.name}\n      |      user = \"docker\"\n      |      password = \"docker\"\n      |      driver = \"org.postgresql.Driver\"\n      |\n      |      // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n      |\n      |      // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n      |      // slick will use an async executor with a fixed size queue of 10.000 objects\n      |      // The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n      |      // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n      |      queueSize = 10000 // number of objects that can be queued by the async exector\n      |\n      |      connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds)\n      |      validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n      |      idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes)\n      |      maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n      |      leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n      |\n      |      initializationFailFast = true // This property controls whether the pool will \"fail fast\" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true\n      |\n      |      keepAliveConnection = on // ensures that the database does not get dropped while we are using it\n      |\n      |      numThreads = 4 // number of cores\n      |      maxConnections = 4  // same as numThreads\n      |      minConnections = 4  // same as numThreads\n      |    }\n      |  }\n      |}\n    \"\"\".stripMargin)\n    .withFallback(ConfigFactory.load(\"reference\"))\n    .resolve()\n\n  val journalConfig = new JournalConfig(config.getConfig(\"jdbc-journal\"))\n  val snapshotConfig = new SnapshotConfig(config.getConfig(\"jdbc-snapshot-store\"))\n  val readJournalConfig = new ReadJournalConfig(config.getConfig(\"jdbc-read-journal\"))\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/cleanup/scaladsl/EventSourcedCleanupTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.cleanup.scaladsl\n\nimport akka.persistence.jdbc.query.{ H2Cleaner, QueryTestSpec }\nimport org.scalatest.matchers.should.Matchers\n\nimport scala.concurrent.duration._\nimport akka.pattern.ask\nimport akka.persistence.jdbc.query.EventAdapterTest.Snapshot\n\nabstract class EventSourcedCleanupTest(config: String) extends QueryTestSpec(config) with Matchers {\n  implicit val askTimeout: FiniteDuration = 500.millis\n\n  it should \"delete all events and reset sequence number\" in withActorSystem { implicit system =>\n    withTestActors(replyToMessages = true) { (actor1, _, _) =>\n      (actor1 ? 1).futureValue\n      (actor1 ? 2).futureValue\n      (actor1 ? 3).futureValue\n    }\n    new EventSourcedCleanup(system).deleteAllEvents(\"my-1\", true).futureValue\n    withTestActors(replyToMessages = true) { (actor1, _, _) =>\n      (actor1 ? \"state\").futureValue.asInstanceOf[Int] shouldBe 0\n    }\n  }\n\n  it should \"delete snapshots as well as events\" in withActorSystem { implicit system =>\n    withTestActors(replyToMessages = true) { (actor1, _, _) =>\n      (actor1 ? 1).futureValue\n      (actor1 ? 2).futureValue\n      (actor1 ? Snapshot).futureValue\n    }\n    new EventSourcedCleanup(system).deleteAll(\"my-1\", true).futureValue\n    withTestActors(replyToMessages = true) { (actor1, _, _) =>\n      (actor1 ? \"state\").futureValue.asInstanceOf[Int] shouldBe 0\n    }\n  }\n\n}\n\nclass H2EventSourcedCleanupTest extends EventSourcedCleanupTest(\"h2-application.conf\") with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/configuration/AkkaPersistenceConfigTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.configuration\n\nimport akka.persistence.jdbc.config._\nimport com.typesafe.config.{ Config, ConfigFactory }\nimport org.scalatest.flatspec.AnyFlatSpec\nimport org.scalatest.matchers.should.Matchers\n\nimport scala.annotation.nowarn\nimport scala.concurrent.duration._\n\nclass AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers {\n  private val referenceConfig: Config = ConfigFactory.load(\"reference\")\n\n  @nowarn(\"msg=possible missing interpolator\")\n  val config: Config = ConfigFactory\n    .parseString(\"\"\"\n          |akka-persistence-jdbc.slick.db {\n          |  host = <not used>\n          |  port = <not used>\n          |  name = <not used>\n          |}\n          |\n          |jdbc-journal {\n          |  class = \"akka.persistence.jdbc.journal.JdbcAsyncWriteJournal\"\n          |\n          |  tables {\n          |    journal {\n          |      tableName = \"journal\"\n          |      schemaName = \"\"\n          |      columnNames {\n          |        ordering = \"ordering\"\n          |        persistenceId = \"persistence_id\"\n          |        sequenceNumber = \"sequence_number\"\n          |        deleted = \"deleted\"\n          |        tags = \"tags\"\n          |        message = \"message\"\n          |      }\n          |    }\n          |  }\n          |\n          |  tagSeparator = \",\"\n          |\n          |  dao = \"akka.persistence.jdbc.dao.bytea.journal.ByteArrayJournalDao\"\n          |\n          |  slick {\n          |    profile = \"slick.jdbc.PostgresProfile$\"\n          |    db {\n          |      host = \"localhost\"\n          |      host = ${?POSTGRES_HOST}\n          |      port = \"5432\"\n          |      port = ${?POSTGRES_PORT}\n          |      name = \"docker\"\n          |\n          |      url = \"jdbc:postgresql://\"${akka-persistence-jdbc.slick.db.host}\":\"${akka-persistence-jdbc.slick.db.port}\"/\"${akka-persistence-jdbc.slick.db.name}\n          |      user = \"docker\"\n          |      password = \"docker\"\n          |      driver = \"org.postgresql.Driver$\"\n          |\n          |      // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n          |\n          |      // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n          |      // slick will use an async executor with a fixed size queue of 10.000 objects\n          |      // The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n          |      // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n          |      queueSize = 10000 // number of objects that can be queued by the async exector\n          |\n          |      connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds)\n          |      validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n          |      idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes)\n          |      maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n          |      leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n          |\n          |      initializationFailFast = true // This property controls whether the pool will \"fail fast\" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true\n          |\n          |      keepAliveConnection = on // ensures that the database does not get dropped while we are using it\n          |\n          |      numThreads = 4 // number of cores\n          |      maxConnections = 4  // same as numThreads\n          |      minConnections = 4  // same as numThreads\n          |    }\n          |  }\n          |}\n          |\n          |# the akka-persistence-snapshot-store in use\n          |jdbc-snapshot-store {\n          |  class = \"akka.persistence.jdbc.snapshot.JdbcSnapshotStore\"\n          |\n          |  tables {\n          |    snapshot {\n          |      tableName = \"snapshot\"\n          |      schemaName = \"\"\n          |      columnNames {\n          |        persistenceId = \"persistence_id\"\n          |        sequenceNumber = \"sequence_number\"\n          |        created = \"created\"\n          |        snapshot = \"snapshot\"\n          |      }\n          |    }\n          |  }\n          |\n          |  dao = \"akka.persistence.jdbc.dao.bytea.snapshot.ByteArraySnapshotDao\"\n          |\n          |  slick {\n          |    profile = \"slick.jdbc.MySQLProfile$\"\n          |    db {\n          |      host = \"localhost\"\n          |      host = ${?POSTGRES_HOST}\n          |      port = \"5432\"\n          |      port = ${?POSTGRES_PORT}\n          |      name = \"docker\"\n          |\n          |      url = \"jdbc:postgresql://\"${akka-persistence-jdbc.slick.db.host}\":\"${akka-persistence-jdbc.slick.db.port}\"/\"${akka-persistence-jdbc.slick.db.name}\n          |      user = \"docker\"\n          |      password = \"docker\"\n          |      driver = \"org.postgresql.Driver\"\n          |\n          |      // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n          |\n          |      // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n          |      // slick will use an async executor with a fixed size queue of 10.000 objects\n          |      // The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n          |      // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n          |      queueSize = 10000 // number of objects that can be queued by the async exector\n          |\n          |      connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds)\n          |      validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n          |      idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes)\n          |      maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n          |      leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n          |\n          |      initializationFailFast = true // This property controls whether the pool will \"fail fast\" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true\n          |\n          |      keepAliveConnection = on // ensures that the database does not get dropped while we are using it\n          |\n          |      numThreads = 4 // number of cores\n          |      maxConnections = 4  // same as numThreads\n          |      minConnections = 4  // same as numThreads\n          |    }\n          |  }\n          |}\n          |\n          |# the akka-persistence-query provider in use\n          |jdbc-read-journal {\n          |  class = \"akka.persistence.jdbc.query.JdbcReadJournalProvider\"\n          |\n          |  # New events are retrieved (polled) with this interval.\n          |  refresh-interval = \"300ms\"\n          |\n          |  # How many events to fetch in one query (replay) and keep buffered until they\n          |  # are delivered downstreams.\n          |  max-buffer-size = \"10\"\n          |\n          |  dao = \"akka.persistence.jdbc.dao.bytea.readjournal.ByteArrayReadJournalDao\"\n          |\n          |  tables {\n          |    journal {\n          |      tableName = \"journal\"\n          |      schemaName = \"\"\n          |      columnNames {\n          |        ordering = \"ordering\"\n          |        persistenceId = \"persistence_id\"\n          |        sequenceNumber = \"sequence_number\"\n          |        created = \"created\"\n          |        tags = \"tags\"\n          |        message = \"message\"\n          |      }\n          |    }\n          |  }\n          |\n          |  tagSeparator = \",\"\n          |\n          |  slick {\n          |    profile = \"slick.jdbc.OracleProfile$\"\n          |    db {\n          |      host = \"localhost\"\n          |      host = ${?POSTGRES_HOST}\n          |      port = \"5432\"\n          |      port = ${?POSTGRES_PORT}\n          |      name = \"docker\"\n          |\n          |      url = \"jdbc:postgresql://\"${akka-persistence-jdbc.slick.db.host}\":\"${akka-persistence-jdbc.slick.db.port}\"/\"${akka-persistence-jdbc.slick.db.name}\n          |      user = \"docker\"\n          |      password = \"docker\"\n          |      driver = \"org.postgresql.Driver\"\n          |\n          |      // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP\n          |\n          |      // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing\n          |      // slick will use an async executor with a fixed size queue of 10.000 objects\n          |      // The async executor is a connection pool for asynchronous execution of blocking I/O actions.\n          |      // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC.\n          |      queueSize = 10000 // number of objects that can be queued by the async exector\n          |\n          |      connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds)\n          |      validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000\n          |      idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes)\n          |      maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes)\n          |      leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0\n          |\n          |      initializationFailFast = true // This property controls whether the pool will \"fail fast\" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true\n          |\n          |      keepAliveConnection = on // ensures that the database does not get dropped while we are using it\n          |\n          |      numThreads = 4 // number of cores\n          |      maxConnections = 4  // same as numThreads\n          |      minConnections = 4  // same as numThreads\n          |    }\n          |  }\n          |}\n    \"\"\".stripMargin)\n    .withFallback(referenceConfig)\n    .resolve()\n\n  \"reference config\" should \"parse JournalConfig\" in {\n    val cfg = new JournalConfig(referenceConfig.getConfig(\"jdbc-journal\"))\n    val slickConfiguration = new SlickConfiguration(referenceConfig.getConfig(\"jdbc-journal.slick\"))\n    slickConfiguration.jndiName shouldBe None\n    slickConfiguration.jndiDbName shouldBe None\n\n    cfg.pluginConfig.dao shouldBe \"akka.persistence.jdbc.journal.dao.DefaultJournalDao\"\n    cfg.pluginConfig.tagSeparator shouldBe \",\"\n\n    cfg.journalTableConfiguration.tableName shouldBe \"journal\"\n    cfg.journalTableConfiguration.schemaName shouldBe None\n\n    cfg.journalTableConfiguration.columnNames.ordering shouldBe \"ordering\"\n    cfg.journalTableConfiguration.columnNames.created shouldBe \"created\"\n    cfg.journalTableConfiguration.columnNames.message shouldBe \"message\"\n    cfg.journalTableConfiguration.columnNames.persistenceId shouldBe \"persistence_id\"\n    cfg.journalTableConfiguration.columnNames.sequenceNumber shouldBe \"sequence_number\"\n    cfg.journalTableConfiguration.columnNames.tags shouldBe \"tags\"\n  }\n\n  it should \"parse SnapshotConfig\" in {\n    val cfg = new SnapshotConfig(referenceConfig.getConfig(\"jdbc-snapshot-store\"))\n    val slickConfiguration = new SlickConfiguration(referenceConfig.getConfig(\"jdbc-journal.slick\"))\n    slickConfiguration.jndiName shouldBe None\n    slickConfiguration.jndiDbName shouldBe None\n\n    cfg.pluginConfig.dao shouldBe \"akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao\"\n\n    cfg.legacySnapshotTableConfiguration.tableName shouldBe \"snapshot\"\n    cfg.legacySnapshotTableConfiguration.schemaName shouldBe None\n\n    cfg.legacySnapshotTableConfiguration.columnNames.persistenceId shouldBe \"persistence_id\"\n    cfg.legacySnapshotTableConfiguration.columnNames.created shouldBe \"created\"\n    cfg.legacySnapshotTableConfiguration.columnNames.sequenceNumber shouldBe \"sequence_number\"\n    cfg.legacySnapshotTableConfiguration.columnNames.snapshot shouldBe \"snapshot\"\n  }\n\n  it should \"parse ReadJournalConfig\" in {\n    val cfg = new ReadJournalConfig(referenceConfig.getConfig(\"jdbc-read-journal\"))\n    val slickConfiguration = new SlickConfiguration(referenceConfig.getConfig(\"jdbc-journal.slick\"))\n    slickConfiguration.jndiName shouldBe None\n    slickConfiguration.jndiDbName shouldBe None\n\n    cfg.pluginConfig.dao shouldBe \"akka.persistence.jdbc.query.dao.DefaultReadJournalDao\"\n    cfg.pluginConfig.tagSeparator shouldBe \",\"\n    cfg.refreshInterval shouldBe 1.second\n    cfg.maxBufferSize shouldBe 500\n\n    cfg.journalTableConfiguration.tableName shouldBe \"journal\"\n    cfg.journalTableConfiguration.schemaName shouldBe None\n\n    cfg.journalTableConfiguration.columnNames.ordering shouldBe \"ordering\"\n    cfg.journalTableConfiguration.columnNames.created shouldBe \"created\"\n    cfg.journalTableConfiguration.columnNames.message shouldBe \"message\"\n    cfg.journalTableConfiguration.columnNames.persistenceId shouldBe \"persistence_id\"\n    cfg.journalTableConfiguration.columnNames.sequenceNumber shouldBe \"sequence_number\"\n    cfg.journalTableConfiguration.columnNames.tags shouldBe \"tags\"\n  }\n\n  \"full config\" should \"parse JournalConfig\" in {\n    val cfg = new JournalConfig(config.getConfig(\"jdbc-journal\"))\n    val slickConfiguration = new SlickConfiguration(config.getConfig(\"jdbc-journal.slick\"))\n    slickConfiguration.jndiName shouldBe None\n    slickConfiguration.jndiDbName shouldBe None\n\n    cfg.pluginConfig.dao shouldBe \"akka.persistence.jdbc.dao.bytea.journal.ByteArrayJournalDao\"\n    cfg.pluginConfig.tagSeparator shouldBe \",\"\n\n    cfg.journalTableConfiguration.tableName shouldBe \"journal\"\n    cfg.journalTableConfiguration.schemaName shouldBe None\n\n    cfg.journalTableConfiguration.columnNames.ordering shouldBe \"ordering\"\n    cfg.journalTableConfiguration.columnNames.created shouldBe \"created\"\n    cfg.journalTableConfiguration.columnNames.message shouldBe \"message\"\n    cfg.journalTableConfiguration.columnNames.persistenceId shouldBe \"persistence_id\"\n    cfg.journalTableConfiguration.columnNames.sequenceNumber shouldBe \"sequence_number\"\n    cfg.journalTableConfiguration.columnNames.tags shouldBe \"tags\"\n  }\n\n  it should \"parse SnapshotConfig\" in {\n    val cfg = new SnapshotConfig(config.getConfig(\"jdbc-snapshot-store\"))\n    val slickConfiguration = new SlickConfiguration(config.getConfig(\"jdbc-snapshot-store.slick\"))\n    slickConfiguration.jndiName shouldBe None\n    slickConfiguration.jndiDbName shouldBe None\n\n    cfg.pluginConfig.dao shouldBe \"akka.persistence.jdbc.dao.bytea.snapshot.ByteArraySnapshotDao\"\n\n    cfg.legacySnapshotTableConfiguration.tableName shouldBe \"snapshot\"\n    cfg.legacySnapshotTableConfiguration.schemaName shouldBe None\n    cfg.legacySnapshotTableConfiguration.columnNames.persistenceId shouldBe \"persistence_id\"\n    cfg.legacySnapshotTableConfiguration.columnNames.created shouldBe \"created\"\n    cfg.legacySnapshotTableConfiguration.columnNames.sequenceNumber shouldBe \"sequence_number\"\n    cfg.legacySnapshotTableConfiguration.columnNames.snapshot shouldBe \"snapshot\"\n  }\n\n  it should \"parse ReadJournalConfig\" in {\n    val cfg = new ReadJournalConfig(config.getConfig(\"jdbc-read-journal\"))\n    val slickConfiguration = new SlickConfiguration(config.getConfig(\"jdbc-read-journal.slick\"))\n    slickConfiguration.jndiName shouldBe None\n    slickConfiguration.jndiDbName shouldBe None\n\n    cfg.pluginConfig.dao shouldBe \"akka.persistence.jdbc.dao.bytea.readjournal.ByteArrayReadJournalDao\"\n    cfg.pluginConfig.tagSeparator shouldBe \",\"\n    cfg.refreshInterval shouldBe 300.millis\n    cfg.maxBufferSize shouldBe 10\n\n    cfg.journalTableConfiguration.tableName shouldBe \"journal\"\n    cfg.journalTableConfiguration.schemaName shouldBe None\n\n    cfg.journalTableConfiguration.columnNames.ordering shouldBe \"ordering\"\n    cfg.journalTableConfiguration.columnNames.created shouldBe \"created\"\n    cfg.journalTableConfiguration.columnNames.message shouldBe \"message\"\n    cfg.journalTableConfiguration.columnNames.persistenceId shouldBe \"persistence_id\"\n    cfg.journalTableConfiguration.columnNames.sequenceNumber shouldBe \"sequence_number\"\n    cfg.journalTableConfiguration.columnNames.tags shouldBe \"tags\"\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/configuration/ConfigOpsTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.configuration\n\nimport akka.persistence.jdbc.SimpleSpec\nimport akka.persistence.jdbc.util.ConfigOps\nimport ConfigOps._\nimport com.typesafe.config.ConfigFactory\n\nclass ConfigOpsTest extends SimpleSpec {\n  it should \"parse field values to Options\" in {\n    val cfg = ConfigFactory.parseString(\"\"\"\n        | person {\n        |   firstName = \"foo\"\n        |   lastName = \"bar\"\n        |   pet = \"\"\n        |   car = \" \"\n        | }\n      \"\"\".stripMargin)\n\n    cfg.asStringOption(\"person.firstName\").get shouldBe \"foo\"\n    cfg.asStringOption(\"person.lastName\").get shouldBe \"bar\"\n    cfg.asStringOption(\"person.pet\") shouldBe None\n    cfg.asStringOption(\"person.car\") shouldBe None\n    cfg.asStringOption(\"person.bike\") shouldBe None\n    cfg.asStringOption(\"person.bike\") shouldBe None\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/configuration/JNDIConfigTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.configuration\n\nimport akka.actor.ActorSystem\nimport akka.persistence.jdbc.SimpleSpec\nimport akka.persistence.jdbc.db.SlickExtension\nimport com.typesafe.config.ConfigFactory\n\nclass JNDIConfigTest extends SimpleSpec {\n  \"JNDI config\" should \"read the config and throw NoInitialContextException in case the JNDI resource is not available\" in {\n    withActorSystem(\"jndi-application.conf\") { system =>\n      val jdbcJournalConfig = system.settings.config.getConfig(\"jdbc-journal\")\n      val slickExtension = SlickExtension(system)\n      intercept[javax.naming.NoInitialContextException] {\n        // Since the JNDI resource is not actually available we expect a NoInitialContextException\n        // This is an indication that the application actually attempts to load the configured JNDI resource\n        slickExtension.database(jdbcJournalConfig).database\n      }\n    }\n  }\n\n  \"JNDI config for shared databases\" should \"read the config and throw NoInitialContextException in case the JNDI resource is not available\" in {\n    withActorSystem(\"jndi-shared-db-application.conf\") { system =>\n      val jdbcJournalConfig = system.settings.config.getConfig(\"jdbc-journal\")\n      val slickExtension = SlickExtension(system)\n      intercept[javax.naming.NoInitialContextException] {\n        // Since the JNDI resource is not actually available we expect a NoInitialContextException\n        // This is an indication that the application actually attempts to load the configured JNDI resource\n        slickExtension.database(jdbcJournalConfig).database\n      }\n    }\n  }\n\n  def withActorSystem(config: String)(f: ActorSystem => Unit): Unit = {\n    val cfg = ConfigFactory.load(config)\n    val system = ActorSystem(\"test\", cfg)\n\n    try {\n      f(system)\n    } finally {\n      system.terminate().futureValue\n    }\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalPerfSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal\n\nimport akka.actor.Props\nimport akka.persistence.CapabilityFlag\nimport akka.persistence.jdbc.config._\nimport akka.persistence.jdbc.db.SlickExtension\nimport akka.persistence.jdbc.testkit.internal.{ H2, SchemaType }\nimport akka.persistence.jdbc.util.{ ClasspathResources, DropCreate }\nimport akka.persistence.journal.JournalPerfSpec\nimport akka.persistence.journal.JournalPerfSpec.{ BenchActor, Cmd, ResetCounter }\nimport akka.testkit.TestProbe\nimport com.typesafe.config.{ Config, ConfigFactory }\nimport org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }\nimport org.scalatest.concurrent.ScalaFutures\n\nimport scala.concurrent.ExecutionContext\nimport scala.concurrent.duration._\n\nabstract class JdbcJournalPerfSpec(config: Config, schemaType: SchemaType)\n    extends JournalPerfSpec(config)\n    with BeforeAndAfterAll\n    with BeforeAndAfterEach\n    with ScalaFutures\n    with ClasspathResources\n    with DropCreate {\n  override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = true\n\n  implicit lazy val ec: ExecutionContext = system.dispatcher\n\n  implicit def pc: PatienceConfig = PatienceConfig(timeout = 10.minutes)\n\n  override def eventsCount: Int = 1000\n\n  override def awaitDurationMillis: Long = 10.minutes.toMillis\n\n  override def measurementIterations: Int = 1\n\n  lazy val cfg = system.settings.config.getConfig(\"jdbc-journal\")\n\n  lazy val journalConfig = new JournalConfig(cfg)\n\n  lazy val db = SlickExtension(system).database(cfg).database\n\n  override def beforeAll(): Unit = {\n    dropAndCreate(schemaType)\n    super.beforeAll()\n  }\n\n  override def afterAll(): Unit = {\n    db.close()\n    super.afterAll()\n  }\n\n  def actorCount = 100\n\n  private val commands = Vector(1 to eventsCount: _*)\n\n  \"A PersistentActor's performance\" must {\n    s\"measure: persist()-ing $eventsCount events for $actorCount actors\" in {\n      val testProbe = TestProbe()\n      val replyAfter = eventsCount\n      def createBenchActor(actorNumber: Int) =\n        system.actorOf(Props(classOf[BenchActor], s\"$pid--$actorNumber\", testProbe.ref, replyAfter))\n      val actors = 1.to(actorCount).map(createBenchActor)\n\n      measure(d => s\"Persist()-ing $eventsCount * $actorCount took ${d.toMillis} ms\") {\n        for (cmd <- commands; actor <- actors) {\n          actor ! Cmd(\"p\", cmd)\n        }\n        for (_ <- actors) {\n          testProbe.expectMsg(awaitDurationMillis.millis, commands.last)\n        }\n        for (actor <- actors) {\n          actor ! ResetCounter\n        }\n      }\n    }\n  }\n\n  \"A PersistentActor's performance\" must {\n    s\"measure: persistAsync()-ing $eventsCount events for $actorCount actors\" in {\n      val testProbe = TestProbe()\n      val replyAfter = eventsCount\n      def createBenchActor(actorNumber: Int) =\n        system.actorOf(Props(classOf[BenchActor], s\"$pid--$actorNumber\", testProbe.ref, replyAfter))\n      val actors = 1.to(actorCount).map(createBenchActor)\n\n      measure(d => s\"persistAsync()-ing $eventsCount * $actorCount took ${d.toMillis} ms\") {\n        for (cmd <- commands; actor <- actors) {\n          actor ! Cmd(\"pa\", cmd)\n        }\n        for (_ <- actors) {\n          testProbe.expectMsg(awaitDurationMillis.millis, commands.last)\n        }\n        for (actor <- actors) {\n          actor ! ResetCounter\n        }\n      }\n    }\n  }\n}\n\nclass H2JournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load(\"h2-application.conf\"), H2)\n\nclass H2JournalPerfSpecSharedDb extends JdbcJournalPerfSpec(ConfigFactory.load(\"h2-shared-db-application.conf\"), H2)\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal\n\nimport akka.persistence.CapabilityFlag\nimport akka.persistence.jdbc.config._\nimport akka.persistence.jdbc.db.SlickExtension\nimport akka.persistence.jdbc.testkit.internal.{ H2, SchemaType }\nimport akka.persistence.jdbc.util.{ ClasspathResources, DropCreate }\nimport akka.persistence.journal.JournalSpec\nimport com.typesafe.config.{ Config, ConfigFactory }\nimport org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }\nimport org.scalatest.concurrent.ScalaFutures\n\nimport scala.concurrent.ExecutionContext\nimport scala.concurrent.duration._\n\nabstract class JdbcJournalSpec(config: Config, schemaType: SchemaType)\n    extends JournalSpec(config)\n    with BeforeAndAfterAll\n    with BeforeAndAfterEach\n    with ScalaFutures\n    with ClasspathResources\n    with DropCreate {\n  override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = true\n\n  implicit val pc: PatienceConfig = PatienceConfig(timeout = 10.seconds)\n\n  implicit lazy val ec: ExecutionContext = system.dispatcher\n\n  lazy val cfg = system.settings.config.getConfig(\"jdbc-journal\")\n\n  lazy val journalConfig = new JournalConfig(cfg)\n\n  lazy val db = SlickExtension(system).database(cfg).database\n\n  protected override def supportsSerialization: CapabilityFlag = newDao\n  protected override def supportsMetadata: CapabilityFlag = newDao\n\n  override def beforeAll(): Unit = {\n    dropAndCreate(schemaType)\n    super.beforeAll()\n  }\n\n  override def afterAll(): Unit = {\n    db.close()\n    super.afterAll()\n  }\n}\n\nclass H2JournalSpec extends JdbcJournalSpec(ConfigFactory.load(\"h2-application.conf\"), H2)\nclass H2JournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load(\"h2-shared-db-application.conf\"), H2)\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/journal/dao/ByteArrayJournalSerializerTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc\npackage journal.dao.legacy\n\nimport akka.persistence.{ AtomicWrite, PersistentRepr }\n\nimport scala.collection.immutable._\n\nclass ByteArrayJournalSerializerTest extends SharedActorSystemTestSpec() {\n  it should \"serialize a serializable message and indicate whether or not the serialization succeeded\" in {\n    val serializer = new ByteArrayJournalSerializer(serialization, \",\")\n    val result = serializer.serialize(Seq(AtomicWrite(PersistentRepr(\"foo\"))))\n    result should have size 1\n    (result.head should be).a(Symbol(\"success\"))\n  }\n\n  it should \"not serialize a non-serializable message and indicate whether or not the serialization succeeded\" in {\n    class Test\n    val serializer = new ByteArrayJournalSerializer(serialization, \",\")\n    val result = serializer.serialize(Seq(AtomicWrite(PersistentRepr(new Test))))\n    result should have size 1\n    (result.head should be).a(Symbol(\"failure\"))\n  }\n\n  it should \"serialize non-serializable and serializable messages and indicate whether or not the serialization succeeded\" in {\n    class Test\n    val serializer = new ByteArrayJournalSerializer(serialization, \",\")\n    val result = serializer.serialize(List(AtomicWrite(PersistentRepr(new Test)), AtomicWrite(PersistentRepr(\"foo\"))))\n    result should have size 2\n    (result.head should be).a(Symbol(\"failure\"))\n    (result.last should be).a(Symbol(\"success\"))\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/journal/dao/JournalTablesTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao.legacy\n\nimport akka.persistence.jdbc.TablesTestSpec\nimport slick.jdbc.JdbcProfile\n\nclass JournalTablesTest extends TablesTestSpec {\n  val journalTableConfiguration = journalConfig.journalTableConfiguration\n\n  object TestByteAJournalTables extends JournalTables {\n    override val profile: JdbcProfile = slick.jdbc.PostgresProfile\n    override val journalTableCfg = journalTableConfiguration\n  }\n\n  \"JournalTable\" should \"be configured with a schema name\" in {\n    TestByteAJournalTables.JournalTable.baseTableRow.schemaName shouldBe journalTableConfiguration.schemaName\n  }\n\n  it should \"be configured with a table name\" in {\n    TestByteAJournalTables.JournalTable.baseTableRow.tableName shouldBe journalTableConfiguration.tableName\n  }\n\n  it should \"be configured with column names\" in {\n    val colName = toColumnName(journalTableConfiguration.tableName)(_)\n    TestByteAJournalTables.JournalTable.baseTableRow.persistenceId.toString shouldBe colName(\n      journalTableConfiguration.columnNames.persistenceId)\n    TestByteAJournalTables.JournalTable.baseTableRow.deleted.toString shouldBe colName(\n      journalTableConfiguration.columnNames.deleted)\n    TestByteAJournalTables.JournalTable.baseTableRow.sequenceNumber.toString shouldBe colName(\n      journalTableConfiguration.columnNames.sequenceNumber)\n    //    TestByteAJournalTables.JournalTable.baseTableRow.tags.toString() shouldBe colName(journalTableConfiguration.columnNames.tags)\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/journal/dao/TagsSerializationTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao.legacy\n\nimport akka.persistence.jdbc.SharedActorSystemTestSpec\n\nclass TagsSerializationTest extends SharedActorSystemTestSpec {\n  \"Encode\" should \"no tags\" in {\n    encodeTags(Set.empty[String], \",\") shouldBe None\n  }\n\n  it should \"one tag\" in {\n    encodeTags(Set(\"foo\"), \",\").value shouldBe \"foo\"\n  }\n\n  it should \"two tags\" in {\n    encodeTags(Set(\"foo\", \"bar\"), \",\").value shouldBe \"foo,bar\"\n  }\n\n  it should \"three tags\" in {\n    encodeTags(Set(\"foo\", \"bar\", \"baz\"), \",\").value shouldBe \"foo,bar,baz\"\n  }\n\n  \"decode\" should \"no tags\" in {\n    decodeTags(None, \",\") shouldBe Set()\n  }\n\n  it should \"one tag with separator\" in {\n    decodeTags(Some(\"foo\"), \",\") shouldBe Set(\"foo\")\n  }\n\n  it should \"two tags with separator\" in {\n    decodeTags(Some(\"foo,bar\"), \",\") shouldBe Set(\"foo\", \"bar\")\n  }\n\n  it should \"three tags with separator\" in {\n    decodeTags(Some(\"foo,bar,baz\"), \",\") shouldBe Set(\"foo\", \"bar\", \"baz\")\n  }\n\n  \"TagsSerialization\" should \"be bijective\" in {\n    val tags: Set[String] = Set(\"foo\", \"bar\", \"baz\")\n    decodeTags(encodeTags(tags, \",\"), \",\") shouldBe tags\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/journal/dao/TrySeqTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.journal.dao\n\nimport akka.persistence.jdbc.util.TrySeq\nimport akka.persistence.jdbc.SimpleSpec\n\nimport scala.collection.immutable._\nimport scala.util.{ Failure, Success }\n\nclass TrySeqTest extends SimpleSpec {\n  def failure(text: String) = Failure(new RuntimeException(text))\n\n  it should \"sequence an empty immutable.Seq\" in {\n    TrySeq.sequence(Seq.empty) shouldBe Success(Seq.empty)\n  }\n\n  it should \"sequence an empty immutable.Vector\" in {\n    TrySeq.sequence(Vector.empty) shouldBe Success(Seq.empty)\n  }\n\n  it should \"sequence a immutable.Seq of success/success\" in {\n    TrySeq.sequence(Seq(Success(\"a\"), Success(\"b\"))) shouldBe Success(Seq(\"a\", \"b\"))\n  }\n\n  it should \"sequence an immutable Seq of success/failure\" in {\n    val result = TrySeq.sequence(List(Success(\"a\"), failure(\"b\")))\n    result should matchPattern { case Failure(cause) if cause.getMessage.contains(\"b\") => }\n  }\n\n  it should \"sequence an immutable Seq of failure/success\" in {\n    val result = TrySeq.sequence(List(failure(\"a\"), Success(\"b\")))\n    result should matchPattern { case Failure(cause) if cause.getMessage.contains(\"a\") => }\n  }\n\n  it should \"sequence an immutable.Seq of failure/failure\" in {\n    val result = TrySeq.sequence(Seq(failure(\"a\"), failure(\"b\")))\n    result should matchPattern { case Failure(cause) if cause.getMessage.contains(\"a\") => }\n  }\n\n  it should \"sequence an immutable.Vector of failure/failure\" in {\n    val result = TrySeq.sequence(Vector(failure(\"a\"), failure(\"b\")))\n    result should matchPattern { case Failure(cause) if cause.getMessage.contains(\"a\") => }\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/AllPersistenceIdsTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport scala.concurrent.duration._\n\nabstract class AllPersistenceIdsTest(config: String) extends QueryTestSpec(config) {\n  it should \"not terminate the stream when there are not pids\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    journalOps.withPersistenceIds() { tp =>\n      tp.request(1)\n      tp.expectNoMessage(100.millis)\n      tp.cancel()\n      tp.expectNoMessage(100.millis)\n    }\n  }\n\n  it should \"find persistenceIds for actors\" in withActorSystem { implicit system =>\n    val journalOps = new JavaDslJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, actor2, actor3) =>\n      journalOps.withPersistenceIds() { tp =>\n        tp.request(10)\n        tp.expectNoMessage(100.millis)\n\n        actor1 ! 1\n        tp.expectNext(ExpectNextTimeout, \"my-1\")\n        tp.expectNoMessage(100.millis)\n\n        actor2 ! 1\n        tp.expectNext(ExpectNextTimeout, \"my-2\")\n        tp.expectNoMessage(100.millis)\n\n        actor3 ! 1\n        tp.expectNext(ExpectNextTimeout, \"my-3\")\n        tp.expectNoMessage(100.millis)\n\n        actor1 ! 1\n        tp.expectNoMessage(100.millis)\n\n        actor2 ! 1\n        tp.expectNoMessage(100.millis)\n\n        actor3 ! 1\n        tp.expectNoMessage(100.millis)\n\n        tp.cancel()\n        tp.expectNoMessage(100.millis)\n      }\n    }\n  }\n}\n\nclass H2ScalaAllPersistenceIdsTest extends AllPersistenceIdsTest(\"h2-application.conf\") with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/CurrentEventsByPersistenceIdTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.Done\nimport akka.persistence.Persistence\nimport akka.persistence.jdbc.journal.JdbcAsyncWriteJournal\nimport akka.persistence.query.Offset\nimport akka.persistence.query.{ EventEnvelope, Sequence }\nimport akka.testkit.TestProbe\n\nabstract class CurrentEventsByPersistenceIdTest(config: String) extends QueryTestSpec(config) {\n  import QueryTestSpec.EventEnvelopeProbeOps\n\n  it should \"find events from sequenceNr\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, _, _) =>\n      actor1 ! 1\n      actor1 ! 2\n      actor1 ! 3\n      actor1 ! 4\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 4\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 0, 1) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextEventEnvelope(\"my-1\", 1, 1)\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 1, 1) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 1, 2) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-1\", 2, 2, timestamp = 0L))\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 2, 2) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-1\", 2, 2, timestamp = 0L))\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 2, 3) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-1\", 2, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-1\", 3, 3, timestamp = 0L))\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 3, 3) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-1\", 3, 3, timestamp = 0L))\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 0, 3) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-1\", 2, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-1\", 3, 3, timestamp = 0L))\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 1, 3) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-1\", 2, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-1\", 3, 3, timestamp = 0L))\n        tp.expectComplete()\n      }\n    }\n  }\n\n  it should \"not find any events for unknown pid\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    journalOps.withCurrentEventsByPersistenceId()(\"unkown-pid\", 0L, Long.MaxValue) { tp =>\n      tp.request(Int.MaxValue)\n      tp.expectComplete()\n    }\n  }\n\n  it should \"include ordering Offset in EventEnvelope\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, actor2, actor3) =>\n      actor1 ! 1\n      actor1 ! 2\n      actor1 ! 3\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      actor2 ! 4\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 4\n      }\n\n      actor3 ! 5\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 5\n      }\n\n      actor1 ! 6\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 6\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 0, Long.MaxValue) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextEventEnvelope(\"my-1\", 1, 1)\n        tp.expectNextEventEnvelope(\"my-1\", 2, 2)\n\n        val env3 = tp.expectNext(ExpectNextTimeout)\n        val ordering3 = env3.offset match {\n          case Sequence(value) => value\n          case _               => fail()\n        }\n\n        val env6 = tp.expectNext(ExpectNextTimeout)\n        env6.persistenceId shouldBe \"my-1\"\n        env6.sequenceNr shouldBe 4\n        env6.event shouldBe 6\n        // event 4 and 5 persisted before 6 by different actors, increasing the ordering\n        env6.offset shouldBe Offset.sequence(ordering3 + 3)\n\n        tp.expectComplete()\n      }\n    }\n  }\n\n  it should \"find events for actors\" in withActorSystem { implicit system =>\n    val journalOps = new JavaDslJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, _, _) =>\n      actor1 ! 1\n      actor1 ! 2\n      actor1 ! 3\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 1, 1) { tp =>\n        tp.request(Int.MaxValue).expectNextEventEnvelope(\"my-1\", 1, 1).expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 2, 2) { tp =>\n        tp.request(Int.MaxValue).expectNextEventEnvelope(\"my-1\", 2, 2).expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 3, 3) { tp =>\n        tp.request(Int.MaxValue).expectNextEventEnvelope(\"my-1\", 3, 3).expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 2, 3) { tp =>\n        tp.request(Int.MaxValue)\n          .expectNextEventEnvelope(\"my-1\", 2, 2)\n          .expectNextEventEnvelope(\"my-1\", 3, 3)\n          .expectComplete()\n      }\n    }\n  }\n\n  it should \"allow updating events (for data migrations)\" in withActorSystem { implicit system =>\n    if (newDao)\n      pending // https://github.com/akka/akka-persistence-jdbc/issues/469\n    val journalOps = new JavaDslJdbcReadJournalOperations(system)\n    val journal = Persistence(system).journalFor(\"\")\n\n    withTestActors() { (actor1, _, _) =>\n      actor1 ! 1\n      actor1 ! 2\n      actor1 ! 3\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      val pid = \"my-1\"\n      journalOps.withCurrentEventsByPersistenceId()(pid, 1, 3) { tp =>\n        tp.request(Int.MaxValue)\n          .expectNextEventEnvelope(pid, 1, 1)\n          .expectNextEventEnvelope(pid, 2, 2)\n          .expectNextEventEnvelope(pid, 3, 3)\n          .expectComplete()\n      }\n\n      // perform in-place update\n      val journalP = TestProbe()\n      journal.tell(JdbcAsyncWriteJournal.InPlaceUpdateEvent(pid, 1, Integer.valueOf(111)), journalP.ref)\n      journalP.expectMsg(Done)\n\n      journalOps.withCurrentEventsByPersistenceId()(pid, 1, 3) { tp =>\n        tp.request(Int.MaxValue)\n          .expectNextEventEnvelope(pid, 1, Integer.valueOf(111))\n          .expectNextEventEnvelope(pid, 2, 2)\n          .expectNextEventEnvelope(pid, 3, 3)\n          .expectComplete()\n      }\n    }\n  }\n}\n\n// Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config\n\nclass H2ScalaCurrentEventsByPersistenceIdTest\n    extends CurrentEventsByPersistenceIdTest(\"h2-shared-db-application.conf\")\n    with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/CurrentEventsByTagTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.persistence.query.{ EventEnvelope, NoOffset, Sequence }\nimport akka.pattern.ask\nimport com.typesafe.config.{ ConfigValue, ConfigValueFactory }\n\nimport scala.concurrent.duration._\nimport akka.Done\nimport akka.persistence.jdbc.query.EventAdapterTest.{ Event, TaggedAsyncEvent }\n\nimport scala.concurrent.Future\nimport CurrentEventsByTagTest._\n\nobject CurrentEventsByTagTest {\n  val maxBufferSize = 20\n  val refreshInterval = 500.milliseconds\n\n  val configOverrides: Map[String, ConfigValue] = Map(\n    \"jdbc-read-journal.max-buffer-size\" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString),\n    \"jdbc-read-journal.refresh-interval\" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString()))\n}\n\nabstract class CurrentEventsByTagTest(config: String) extends QueryTestSpec(config, configOverrides) {\n  it should \"not find an event by tag for unknown tag\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n      (actor1 ? withTags(1, \"one\")).futureValue\n      (actor2 ? withTags(2, \"two\")).futureValue\n      (actor3 ? withTags(3, \"three\")).futureValue\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      journalOps.withCurrentEventsByTag()(\"unknown\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectComplete()\n      }\n    }\n  }\n\n  it should \"find all events by tag\" in withActorSystem { implicit system =>\n\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n      (actor1 ? withTags(1, \"number\")).futureValue\n      (actor2 ? withTags(2, \"number\")).futureValue\n      (actor3 ? withTags(3, \"number\")).futureValue\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      journalOps.withCurrentEventsByTag()(\"number\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"number\", Sequence(0)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"number\", Sequence(1)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"number\", Sequence(2)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"number\", Sequence(3)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectComplete()\n      }\n    }\n  }\n\n  it should \"persist and find a tagged event with multiple tags\" in withActorSystem { implicit system =>\n\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n      withClue(\"Persisting multiple tagged events\") {\n        (actor1 ? withTags(1, \"one\", \"1\", \"prime\")).futureValue\n        (actor1 ? withTags(2, \"two\", \"2\", \"prime\")).futureValue\n        (actor1 ? withTags(3, \"three\", \"3\", \"prime\")).futureValue\n        (actor1 ? withTags(4, \"four\", \"4\")).futureValue\n        (actor1 ? withTags(5, \"five\", \"5\", \"prime\")).futureValue\n\n        (actor2 ? withTags(3, \"three\", \"3\", \"prime\")).futureValue\n        (actor3 ? withTags(3, \"three\", \"3\", \"prime\")).futureValue\n\n        (actor1 ? 1).futureValue\n        (actor1 ? 1).futureValue\n\n        eventually {\n          journalOps.countJournal.futureValue shouldBe 9\n        }\n      }\n\n      journalOps.withCurrentEventsByTag()(\"one\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"prime\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(5), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(6), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(7), _, _, _) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"3\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(6), _, _, _) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(7), _, _, _) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"4\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(4), _, _, _) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"four\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(4), _, _, _) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"5\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(5), _, _, _) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"five\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(5), _, _, _) => }\n        tp.expectComplete()\n      }\n    }\n  }\n\n  it should \"complete without any gaps in case events are being persisted when the query is executed\" in withActorSystem {\n    implicit system =>\n\n      val journalOps = new JavaDslJdbcReadJournalOperations(system)\n      import system.dispatcher\n      withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n        def sendMessagesWithTag(tag: String, numberOfMessagesPerActor: Int): Future[Done] = {\n          val futures = for (actor <- Seq(actor1, actor2, actor3); i <- 1 to numberOfMessagesPerActor) yield {\n            actor ? TaggedAsyncEvent(Event(i.toString), tag)\n          }\n          Future.sequence(futures).map(_ => Done)\n        }\n\n        val tag = \"someTag\"\n        // send a batch of 3 * 200\n        val batch1 = sendMessagesWithTag(tag, 200)\n\n        // wait for acknowledgement of the first batch only\n        batch1.futureValue\n        // Sanity check, all events in the first batch must be in the journal\n        journalOps.countJournal.futureValue should be >= 600L\n\n        // Try to persist a large batch of events per actor. Some of these may be returned, but not all!\n        val batch2 = sendMessagesWithTag(tag, 5000)\n        // start the query before the last batch completes\n        journalOps.withCurrentEventsByTag()(tag, NoOffset) { tp =>\n          // The stream must complete within the given amount of time\n          // This make take a while in case the journal sequence actor detects gaps\n          val allEvents = tp.toStrict(atMost = 40.seconds)\n          allEvents.size should be >= 600\n          val expectedOffsets = 1L.to(allEvents.size).map(Sequence.apply)\n          allEvents.map(_.offset) shouldBe expectedOffsets\n        }\n        batch2.futureValue\n      }\n  }\n}\n\n// Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config\n\nclass H2ScalaCurrentEventsByTagTest extends CurrentEventsByTagTest(\"h2-shared-db-application.conf\") with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/CurrentPersistenceIdsTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nabstract class CurrentPersistenceIdsTest(config: String) extends QueryTestSpec(config) {\n  it should \"not find any persistenceIds for empty journal\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    journalOps.withCurrentPersistenceIds() { tp =>\n      tp.request(1)\n      tp.expectComplete()\n    }\n  }\n\n  it should \"find persistenceIds for actors\" in withActorSystem { implicit system =>\n    val journalOps = new JavaDslJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, actor2, actor3) =>\n      actor1 ! 1\n      actor2 ! 1\n      actor3 ! 1\n\n      eventually {\n        journalOps.withCurrentPersistenceIds() { tp =>\n          tp.request(3)\n          tp.expectNextUnordered(\"my-1\", \"my-2\", \"my-3\")\n          tp.expectComplete()\n        }\n      }\n    }\n  }\n}\n\n// Note: these tests use the shared-db configs, the test for all persistence ids use the regular db config\n\nclass H2ScalaCurrentPersistenceIdsTest extends CurrentPersistenceIdsTest(\"h2-shared-db-application.conf\") with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/EventAdapterTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.persistence.query.{ EventEnvelope, NoOffset, Sequence }\n\nimport scala.concurrent.duration._\nimport akka.pattern.ask\nimport akka.persistence.journal.{ EventSeq, ReadEventAdapter, Tagged, WriteEventAdapter }\nimport org.scalatest.Assertions.fail\n\nobject EventAdapterTest {\n  case class Event(value: String) {\n    def adapted = EventAdapted(value)\n  }\n\n  case class TaggedEvent(event: Event, tag: String)\n\n  case class TaggedAsyncEvent(event: Event, tag: String)\n\n  case class EventAdapted(value: String) {\n    def restored = EventRestored(value)\n  }\n\n  case class EventRestored(value: String)\n\n  case object Snapshot\n\n  class TestReadEventAdapter extends ReadEventAdapter {\n    override def fromJournal(event: Any, manifest: String): EventSeq =\n      event match {\n        case e: EventAdapted => EventSeq.single(e.restored)\n        case _               => fail()\n      }\n  }\n\n  class TestWriteEventAdapter extends WriteEventAdapter {\n    override def manifest(event: Any): String = \"\"\n\n    override def toJournal(event: Any): Any =\n      event match {\n        case e: Event                        => e.adapted\n        case TaggedEvent(e: Event, tag)      => Tagged(e.adapted, Set(tag))\n        case TaggedAsyncEvent(e: Event, tag) => Tagged(e.adapted, Set(tag))\n        case _                               => event\n      }\n  }\n}\n\n/**\n * Tests that check persistence queries when event adapter is configured for persisted event.\n */\nabstract class EventAdapterTest(config: String) extends QueryTestSpec(config) {\n  import EventAdapterTest._\n\n  final val NoMsgTime: FiniteDuration = 100.millis\n\n  it should \"apply event adapter when querying events for actor with pid 'my-1'\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, _, _) =>\n      journalOps.withEventsByPersistenceId()(\"my-1\", 0) { tp =>\n        tp.request(10)\n        tp.expectNoMessage(100.millis)\n\n        actor1 ! Event(\"1\")\n        tp.expectNext(ExpectNextTimeout, EventEnvelope(Sequence(1), \"my-1\", 1, EventRestored(\"1\"), timestamp = 0L))\n        tp.expectNoMessage(100.millis)\n\n        actor1 ! Event(\"2\")\n        tp.expectNext(ExpectNextTimeout, EventEnvelope(Sequence(2), \"my-1\", 2, EventRestored(\"2\"), timestamp = 0L))\n        tp.expectNoMessage(100.millis)\n        tp.cancel()\n      }\n    }\n  }\n\n  it should \"apply event adapters when querying events by tag from an offset\" in withActorSystem { implicit system =>\n\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n      (actor1 ? TaggedEvent(Event(\"1\"), \"event\")).futureValue\n      (actor2 ? TaggedEvent(Event(\"2\"), \"event\")).futureValue\n      (actor3 ? TaggedEvent(Event(\"3\"), \"event\")).futureValue\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      journalOps.withEventsByTag(10.seconds)(\"event\", Sequence(1)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, EventRestored(\"2\"), timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, EventRestored(\"3\"), timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n\n        actor1 ? TaggedEvent(Event(\"1\"), \"event\")\n        tp.expectNext(EventEnvelope(Sequence(4), \"my-1\", 2, EventRestored(\"1\"), timestamp = 0L))\n        tp.cancel()\n        tp.expectNoMessage(NoMsgTime)\n      }\n    }\n  }\n\n  it should \"apply event adapters when querying current events for actors\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, _, _) =>\n      actor1 ! Event(\"1\")\n      actor1 ! Event(\"2\")\n      actor1 ! Event(\"3\")\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 1, 1) { tp =>\n        tp.request(Int.MaxValue)\n          .expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, EventRestored(\"1\"), timestamp = 0L))\n          .expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 2, 2) { tp =>\n        tp.request(Int.MaxValue)\n          .expectNext(EventEnvelope(Sequence(2), \"my-1\", 2, EventRestored(\"2\"), timestamp = 0L))\n          .expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 3, 3) { tp =>\n        tp.request(Int.MaxValue)\n          .expectNext(EventEnvelope(Sequence(3), \"my-1\", 3, EventRestored(\"3\"), timestamp = 0L))\n          .expectComplete()\n      }\n\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\", 2, 3) { tp =>\n        tp.request(Int.MaxValue)\n          .expectNext(EventEnvelope(Sequence(2), \"my-1\", 2, EventRestored(\"2\"), timestamp = 0L))\n          .expectNext(EventEnvelope(Sequence(3), \"my-1\", 3, EventRestored(\"3\"), timestamp = 0L))\n          .expectComplete()\n      }\n    }\n  }\n\n  it should \"apply event adapters when querying all current events by tag\" in withActorSystem { implicit system =>\n\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n      (actor1 ? TaggedEvent(Event(\"1\"), \"event\")).futureValue\n      (actor2 ? TaggedEvent(Event(\"2\"), \"event\")).futureValue\n      (actor3 ? TaggedEvent(Event(\"3\"), \"event\")).futureValue\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      journalOps.withCurrentEventsByTag()(\"event\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, EventRestored(\"1\")) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, EventRestored(\"2\")) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, EventRestored(\"3\")) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"event\", Sequence(0)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, EventRestored(\"1\")) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, EventRestored(\"2\")) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, EventRestored(\"3\")) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"event\", Sequence(1)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, EventRestored(\"2\")) => }\n        tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, EventRestored(\"3\")) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"event\", Sequence(2)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, EventRestored(\"3\")) => }\n        tp.expectComplete()\n      }\n\n      journalOps.withCurrentEventsByTag()(\"event\", Sequence(3)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectComplete()\n      }\n    }\n  }\n}\n\nclass H2ScalaEventAdapterTest extends EventAdapterTest(\"h2-application.conf\") with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/EventsByPersistenceIdTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.Done\nimport akka.persistence.jdbc.query.EventAdapterTest.{ Event, TaggedAsyncEvent }\nimport akka.persistence.query.{ EventEnvelope, Sequence }\nimport scala.concurrent.Future\nimport scala.concurrent.duration._\n\nimport akka.pattern.ask\nimport akka.persistence.query.Offset\n\nabstract class EventsByPersistenceIdTest(config: String) extends QueryTestSpec(config) {\n  import QueryTestSpec.EventEnvelopeProbeOps\n\n  it should \"not find any events for unknown pid\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    journalOps.withEventsByPersistenceId()(\"unkown-pid\", 0L, Long.MaxValue) { tp =>\n      tp.request(1)\n      tp.expectNoMessage(100.millis)\n      tp.cancel()\n      tp.expectNoMessage(100.millis)\n    }\n  }\n\n  it should \"find events from sequenceNr\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, _, _) =>\n      actor1 ! withTags(1, \"number\")\n      actor1 ! withTags(2, \"number\")\n      actor1 ! withTags(3, \"number\")\n      actor1 ! withTags(4, \"number\")\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 4\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 0, 0) { tp =>\n        tp.request(1)\n        tp.expectComplete()\n        tp.cancel()\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 0, 1) { tp =>\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 1, 1)\n        tp.request(1)\n        tp.expectComplete()\n        tp.cancel()\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 1, 1) { tp =>\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 1, 1)\n        tp.request(1)\n        tp.expectComplete()\n        tp.cancel()\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 1, 2) { tp =>\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 1, 1)\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 2, 2)\n        tp.request(1)\n        tp.expectComplete()\n        tp.cancel()\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 2, 2) { tp =>\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 2, 2)\n        tp.request(1)\n        tp.expectComplete()\n        tp.cancel()\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 2, 3) { tp =>\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 2, 2)\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 3, 3)\n        tp.request(1)\n        tp.expectComplete()\n        tp.cancel()\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 3, 3) { tp =>\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 3, 3)\n        tp.request(1)\n        tp.expectComplete()\n        tp.cancel()\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 0, 3) { tp =>\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 1, 1)\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 2, 2)\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 3, 3)\n        tp.request(1)\n        tp.expectComplete()\n        tp.cancel()\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 1, 3) { tp =>\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 1, 1)\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 2, 2)\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 3, 3)\n        tp.request(1)\n        tp.expectComplete()\n        tp.cancel()\n      }\n    }\n  }\n\n  it should \"include ordering Offset in EventEnvelope\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, actor2, actor3) =>\n      actor1 ! withTags(1, \"ordering\")\n      actor1 ! withTags(2, \"ordering\")\n      actor1 ! withTags(3, \"ordering\")\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 0, Long.MaxValue) { tp =>\n        tp.request(100)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 1, 1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 2, 2)\n\n        val env3 = tp.expectNext(ExpectNextTimeout)\n        val ordering3 = env3.offset match {\n          case Sequence(value) => value\n          case _               => fail()\n        }\n\n        actor2 ! withTags(4, \"ordering\")\n        eventually {\n          journalOps.countJournal.futureValue shouldBe 4\n        }\n        actor3 ! withTags(5, \"ordering\")\n        eventually {\n          journalOps.countJournal.futureValue shouldBe 5\n        }\n        actor1 ! withTags(6, \"ordering\")\n        eventually {\n          journalOps.countJournal.futureValue shouldBe 6\n        }\n\n        val env6 = tp.expectNext(ExpectNextTimeout)\n        env6.persistenceId shouldBe \"my-1\"\n        env6.sequenceNr shouldBe 4\n        env6.event shouldBe 6\n        // event 4 and 5 persisted before 6 by different actors, increasing the ordering\n        env6.offset shouldBe Offset.sequence(ordering3 + 3)\n\n        tp.cancel()\n      }\n    }\n  }\n\n  it should \"deliver EventEnvelopes non-zero timestamps\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    val testStartTime = System.currentTimeMillis()\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n\n      (actor1 ? withTags(1, \"number\")).futureValue\n      (actor2 ? withTags(2, \"number\")).futureValue\n      (actor3 ? withTags(3, \"number\")).futureValue\n\n      def assertTimestamp(timestamp: Long, clue: String) = {\n        withClue(clue) {\n          timestamp should !==(0L)\n          // we want to prove that the event got a non-zero timestamp\n          // but also a timestamp that between some boundaries around this test run\n          (timestamp - testStartTime) should be < 120000L\n          (timestamp - testStartTime) should be > 0L\n        }\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 0, 1) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case ev @ EventEnvelope(Sequence(1), \"my-1\", 1, 1) =>\n          assertTimestamp(ev.timestamp, \"my-1\")\n        }\n        tp.cancel()\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-2\", 0, 1) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case ev @ EventEnvelope(_, \"my-2\", 1, 2) =>\n          assertTimestamp(ev.timestamp, \"my-2\")\n        }\n        tp.cancel()\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-3\", 0, 1) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case ev @ EventEnvelope(_, \"my-3\", 1, 3) =>\n          assertTimestamp(ev.timestamp, \"my-3\")\n        }\n        tp.cancel()\n      }\n    }\n  }\n\n  it should \"find events for actor with pid 'my-1'\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, _, _) =>\n      journalOps.withEventsByPersistenceId()(\"my-1\", 0) { tp =>\n        tp.request(10)\n        tp.expectNoMessage(100.millis)\n\n        actor1 ! 1\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 1, 1)\n        tp.expectNoMessage(100.millis)\n\n        actor1 ! 2\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 2, 2)\n        tp.expectNoMessage(100.millis)\n        tp.cancel()\n      }\n    }\n  }\n\n  it should \"find events for actor with pid 'my-1' and persisting messages to other actor\" in withActorSystem {\n    implicit system =>\n      val journalOps = new JavaDslJdbcReadJournalOperations(system)\n      withTestActors() { (actor1, actor2, _) =>\n        journalOps.withEventsByPersistenceId()(\"my-1\", 0, Long.MaxValue) { tp =>\n          tp.request(10)\n          tp.expectNoMessage(100.millis)\n\n          actor1 ! 1\n          tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 1, 1)\n          tp.expectNoMessage(100.millis)\n\n          actor1 ! 2\n          tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 2, 2)\n          tp.expectNoMessage(100.millis)\n\n          actor2 ! 1\n          actor2 ! 2\n          actor2 ! 3\n          tp.expectNoMessage(100.millis)\n\n          actor1 ! 3\n          tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-1\", 3, 3)\n          tp.expectNoMessage(100.millis)\n\n          tp.cancel()\n          tp.expectNoMessage(100.millis)\n        }\n      }\n  }\n\n  it should \"find events for actor with pid 'my-2'\" in withActorSystem { implicit system =>\n    val journalOps = new JavaDslJdbcReadJournalOperations(system)\n    withTestActors() { (_, actor2, _) =>\n      actor2 ! 1\n      actor2 ! 2\n      actor2 ! 3\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      journalOps.withEventsByPersistenceId()(\"my-2\", 0, Long.MaxValue) { tp =>\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-2\", 1, 1)\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-2\", 2, 2)\n        tp.request(1)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-2\", 3, 3)\n        tp.expectNoMessage(100.millis)\n\n        actor2 ! 5\n        actor2 ! 6\n        actor2 ! 7\n\n        eventually {\n          journalOps.countJournal.futureValue shouldBe 6\n        }\n\n        tp.request(3)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-2\", 4, 5)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-2\", 5, 6)\n        tp.expectNextEventEnvelope(ExpectNextTimeout, \"my-2\", 6, 7)\n        tp.expectNoMessage(100.millis)\n\n        tp.cancel()\n        tp.expectNoMessage(100.millis)\n      }\n    }\n  }\n\n  it should \"find a large number of events quickly\" in withActorSystem { implicit system =>\n    import akka.pattern.ask\n    import system.dispatcher\n    val journalOps = new JavaDslJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, _, _) =>\n      def sendMessagesWithTag(tag: String, numberOfMessages: Int): Future[Done] = {\n        val futures = for (i <- 1 to numberOfMessages) yield {\n          actor1 ? TaggedAsyncEvent(Event(i.toString), tag)\n        }\n        Future.sequence(futures).map(_ => Done)\n      }\n\n      val tag = \"someTag\"\n      val numberOfEvents = 1000\n      // send a batch with a large number of events\n      val batch = sendMessagesWithTag(tag, numberOfEvents)\n\n      // wait for acknowledgement of the batch\n      batch.futureValue\n\n      journalOps.withEventsByPersistenceId()(\"my-1\", 1, numberOfEvents) { tp =>\n        val allEvents = tp.toStrict(atMost = 20.seconds)\n        allEvents.size shouldBe numberOfEvents\n      }\n    }\n  }\n}\n\nclass H2ScalaEventsByPersistenceIdTest extends EventsByPersistenceIdTest(\"h2-application.conf\") with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/EventsByTagMigrationTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.actor.ActorSystem\nimport akka.pattern.ask\nimport akka.persistence.jdbc.query.EventsByTagMigrationTest.{ legacyTagKeyConfigOverride, migrationConfigOverride }\nimport akka.persistence.query.{ EventEnvelope, Sequence }\nimport com.typesafe.config.{ ConfigFactory, ConfigValue, ConfigValueFactory }\n\nimport scala.concurrent.duration._\n\nobject EventsByTagMigrationTest {\n  val maxBufferSize = 20\n  val refreshInterval = 500.milliseconds\n  val legacyTagKey = true\n\n  val legacyTagKeyConfigOverride: Map[String, ConfigValue] = Map(\n    \"jdbc-read-journal.max-buffer-size\" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString),\n    \"jdbc-read-journal.refresh-interval\" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString),\n    \"jdbc-journal.tables.event_tag.legacy-tag-key\" -> ConfigValueFactory.fromAnyRef(legacyTagKey))\n\n  val migrationConfigOverride: Map[String, ConfigValue] = Map(\n    \"jdbc-read-journal.max-buffer-size\" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString),\n    \"jdbc-read-journal.refresh-interval\" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString))\n}\n\nabstract class EventsByTagMigrationTest(configS: String) extends QueryTestSpec(configS, migrationConfigOverride) {\n  final val NoMsgTime: FiniteDuration = 100.millis\n\n  val tagTableCfg = journalConfig.eventTagTableConfiguration\n  val journalTableCfg = journalConfig.eventJournalTableConfiguration\n  val joinSQL: String =\n    s\"JOIN ${journalTableName} ON ${tagTableCfg.tableName}.${tagTableCfg.columnNames.eventId} = ${journalTableName}.${journalTableCfg.columnNames.ordering}\"\n  val fromSQL: String =\n    s\"FROM ${journalTableName} WHERE ${tagTableCfg.tableName}.${tagTableCfg.columnNames.eventId} = ${journalTableName}.${journalTableCfg.columnNames.ordering}\"\n\n  def dropConstraint(\n      tableName: String = tagTableCfg.tableName,\n      constraintTableName: String = \"INFORMATION_SCHEMA.TABLE_CONSTRAINTS\",\n      constraintType: String,\n      constraintDialect: String = \"CONSTRAINT\",\n      constraintNameDialect: String = \"\"): Unit = {\n    withStatement { stmt =>\n      // SELECT AND DROP old CONSTRAINT\n      val constraintNameQuery =\n        s\"\"\"\n           |SELECT CONSTRAINT_NAME\n           |FROM $constraintTableName\n           |WHERE TABLE_NAME = '$tableName' AND CONSTRAINT_TYPE = '$constraintType'\n                  \"\"\".stripMargin\n      val resultSet = stmt.executeQuery(constraintNameQuery)\n      if (resultSet.next()) {\n        val constraintName = resultSet.getString(\"CONSTRAINT_NAME\")\n        stmt.execute(s\"ALTER TABLE $tableName DROP $constraintDialect $constraintName $constraintNameDialect\")\n      }\n    }\n  }\n\n  def addPKConstraint(\n      tableName: String = tagTableCfg.tableName,\n      pidColumnName: String = tagTableCfg.columnNames.persistenceId,\n      seqNrColumnName: String = tagTableCfg.columnNames.sequenceNumber,\n      tagColumnName: String = tagTableCfg.columnNames.tag,\n      constraintNameDialect: String = \"pk_event_tag\"): Unit = {\n    withStatement { stmt =>\n      stmt.execute(s\"\"\"\n           |ALTER TABLE $tableName\n           |ADD CONSTRAINT $constraintNameDialect\n           |PRIMARY KEY ($pidColumnName, $seqNrColumnName, $tagColumnName)\n                      \"\"\".stripMargin)\n    }\n  }\n\n  def addFKConstraint(\n      tableName: String = tagTableCfg.tableName,\n      pidColumnName: String = tagTableCfg.columnNames.persistenceId,\n      seqNrColumnName: String = tagTableCfg.columnNames.sequenceNumber,\n      journalTableName: String = journalTableCfg.tableName,\n      journalPidColumnName: String = tagTableCfg.columnNames.persistenceId,\n      journalSeqNrColumnName: String = tagTableCfg.columnNames.sequenceNumber,\n      constraintNameDialect: String = \"fk_event_journal_on_pk\"): Unit = {\n    withStatement { stmt =>\n      stmt.execute(s\"\"\"\n                      |ALTER TABLE $tableName\n                      |ADD CONSTRAINT $constraintNameDialect\n                      |FOREIGN KEY ($pidColumnName, $seqNrColumnName)\n                      |REFERENCES $journalTableName ($journalPidColumnName, $journalSeqNrColumnName)\n                      |ON DELETE CASCADE\n                      \"\"\".stripMargin)\n    }\n  }\n\n  def alterColumn(\n      tableName: String = tagTableCfg.tableName,\n      alterDialect: String = \"ALTER COLUMN\",\n      columnName: String = tagTableCfg.columnNames.eventId,\n      changeToDialect: String = \"BIGINT NULL\"): Unit = {\n    withStatement { stmt =>\n      stmt.execute(s\"ALTER TABLE $tableName $alterDialect $columnName $changeToDialect\")\n    }\n  }\n\n  def fillNewColumn(\n      joinDialect: String = \"\",\n      pidSetDialect: String =\n        s\"${tagTableCfg.columnNames.persistenceId} = ${journalTableName}.${journalTableCfg.columnNames.persistenceId}\",\n      seqNrSetDialect: String =\n        s\"${tagTableCfg.columnNames.sequenceNumber} = ${journalTableName}.${journalTableCfg.columnNames.sequenceNumber}\",\n      fromDialect: String = \"\"): Unit = {\n    withStatement { stmt =>\n      stmt.execute(s\"\"\"\n                      |UPDATE ${tagTableCfg.tableName} ${joinDialect}\n                      |SET ${pidSetDialect},\n                      |${seqNrSetDialect}\n                      |${fromDialect}\"\"\".stripMargin)\n    }\n  }\n\n  /**\n   * add new column to event_tag table.\n   */\n  def addNewColumn(): Unit = {}\n\n  /**\n   * fill new column for exists rows.\n   */\n  def migrateLegacyRows(): Unit = {\n    fillNewColumn(fromDialect = fromSQL);\n  }\n\n  /**\n   * drop old FK constraint\n   */\n  def dropLegacyFKConstraint(): Unit =\n    dropConstraint(constraintType = \"FOREIGN KEY\")\n\n  /**\n   * drop old PK  constraint\n   */\n  def dropLegacyPKConstraint(): Unit =\n    dropConstraint(constraintType = \"PRIMARY KEY\")\n\n  /**\n   * create new PK constraint for PK column.\n   */\n  def addNewPKConstraint(): Unit =\n    addPKConstraint()\n\n  /**\n   * create new FK constraint for PK column.\n   */\n  def addNewFKConstraint(): Unit =\n    addFKConstraint()\n\n  // override this, so we can reset the value.\n  def withRollingUpdateActorSystem(f: ActorSystem => Unit): Unit = {\n    val legacyTagKeyConfig = legacyTagKeyConfigOverride.foldLeft(ConfigFactory.load(configS)) {\n      case (conf, (path, configValue)) =>\n        conf.withValue(path, configValue)\n    }\n\n    implicit val system: ActorSystem = ActorSystem(\"migrator-test\", legacyTagKeyConfig)\n    f(system)\n    system.terminate().futureValue\n  }\n\n  it should \"migrate event tag to new way\" in {\n    // 1. Mock legacy tag column on here, but actually using new tag write.\n    withRollingUpdateActorSystem { implicit system =>\n\n      val journalOps = new ScalaJdbcReadJournalOperations(system)\n      withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n        (actor1 ? withTags(1, \"number\")).futureValue\n        (actor2 ? withTags(2, \"number\")).futureValue\n        (actor3 ? withTags(3, \"number\")).futureValue\n\n        journalOps.withEventsByTag()(\"number\", Sequence(Long.MinValue)) { tp =>\n          tp.request(Int.MaxValue)\n          tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 2, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n          tp.cancel()\n        }\n      }(system)\n    }\n\n    // Assume that the user could alter table for the addition of the new column manually, then we don't need to maintain\n    // the legacy table schema creation.\n    if (newDao) {\n      addNewColumn();\n      migrateLegacyRows();\n    }\n\n    // 2. write and read redundancy\n    withRollingUpdateActorSystem { implicit system =>\n      val journalOps = new ScalaJdbcReadJournalOperations(system)\n      withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n        (actor1 ? withTags(4, \"number\")).futureValue\n        (actor2 ? withTags(5, \"number\")).futureValue\n        (actor3 ? withTags(6, \"number\")).futureValue\n        // Delay events that have not yet been projected can still be read.\n        journalOps.withEventsByTag()(\"number\", Sequence(Long.MinValue)) { tp =>\n          tp.request(Int.MaxValue)\n          tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 2, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(4), \"my-1\", 2, 4, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(5), \"my-2\", 2, 5, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(6), \"my-3\", 2, 6, timestamp = 0L))\n          tp.cancel()\n        }\n      }(system)\n    }\n\n    // 3. Migrate the old constraints so that we can change read and write from the new PK.\n    if (newDao) {\n      dropLegacyFKConstraint();\n      dropLegacyPKConstraint()\n      addNewPKConstraint()\n      addNewFKConstraint()\n    }\n\n    // 4. check the migration completed.\n    withActorSystem { implicit system =>\n\n      val journalOps = new ScalaJdbcReadJournalOperations(system)\n      withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n\n        (actor1 ? withTags(7, \"number\")).futureValue\n        (actor2 ? withTags(8, \"number\")).futureValue\n        (actor3 ? withTags(9, \"number\")).futureValue\n\n        journalOps.withEventsByTag()(\"number\", Sequence(3)) { tp =>\n          tp.request(Int.MaxValue)\n          tp.expectNext(EventEnvelope(Sequence(4), \"my-1\", 2, 4, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(5), \"my-2\", 2, 5, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(6), \"my-3\", 2, 6, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(7), \"my-1\", 3, 7, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(8), \"my-2\", 3, 8, timestamp = 0L))\n          tp.expectNext(EventEnvelope(Sequence(9), \"my-3\", 3, 9, timestamp = 0L))\n          tp.cancel()\n        }\n\n      }(system)\n    }\n  }\n}\n\nclass H2ScalaEventsByTagMigrationTest extends EventsByTagMigrationTest(\"h2-application.conf\") with H2Cleaner {\n\n  override def migrateLegacyRows(): Unit = {\n    fillNewColumn(\n      pidSetDialect = s\"\"\"${tagTableCfg.columnNames.persistenceId} = (\n           |    SELECT ${journalTableCfg.columnNames.persistenceId}\n           |    ${fromSQL}\n           |)\"\"\".stripMargin,\n      seqNrSetDialect = s\"\"\"${tagTableCfg.columnNames.sequenceNumber} = (\n           |    SELECT ${journalTableCfg.columnNames.sequenceNumber}\n           |    ${fromSQL}\n           |)\"\"\".stripMargin)\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/EventsByTagTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.Done\nimport akka.persistence.query.{ EventEnvelope, NoOffset, Sequence }\nimport akka.pattern.ask\nimport akka.persistence.jdbc.query.EventAdapterTest.{ Event, EventRestored, TaggedAsyncEvent, TaggedEvent }\nimport com.typesafe.config.{ ConfigValue, ConfigValueFactory }\n\nimport scala.concurrent.duration._\nimport scala.concurrent.Future\n\nimport EventsByTagTest._\n\nobject EventsByTagTest {\n  val maxBufferSize = 20\n  val refreshInterval = 500.milliseconds\n\n  val configOverrides: Map[String, ConfigValue] = Map(\n    \"jdbc-read-journal.max-buffer-size\" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString),\n    \"jdbc-read-journal.refresh-interval\" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString()))\n}\n\nabstract class EventsByTagTest(config: String) extends QueryTestSpec(config, configOverrides) {\n  final val NoMsgTime: FiniteDuration = 100.millis\n\n  it should \"not find events for unknown tags\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, actor2, actor3) =>\n      actor1 ! withTags(1, \"one\")\n      actor2 ! withTags(2, \"two\")\n      actor3 ! withTags(3, \"three\")\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      journalOps.withEventsByTag()(\"unknown\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n      }\n    }\n  }\n\n  it should \"find all events by tag\" in withActorSystem { implicit system =>\n\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n      (actor1 ? withTags(1, \"number\")).futureValue\n      (actor2 ? withTags(2, \"number\")).futureValue\n      (actor3 ? withTags(3, \"number\")).futureValue\n\n      journalOps.withEventsByTag()(\"number\", Sequence(Long.MinValue)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag()(\"number\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag()(\"number\", Sequence(0)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag()(\"number\", Sequence(1)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag()(\"number\", Sequence(2)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag()(\"number\", Sequence(3)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n        tp.expectNoMessage(NoMsgTime)\n      }\n\n      journalOps.withEventsByTag()(\"number\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n\n        actor1 ? withTags(1, \"number\")\n        tp.expectNext(EventEnvelope(Sequence(4), \"my-1\", 2, 1, timestamp = 0L))\n\n        actor1 ? withTags(1, \"number\")\n        tp.expectNext(EventEnvelope(Sequence(5), \"my-1\", 3, 1, timestamp = 0L))\n\n        actor1 ? withTags(1, \"number\")\n        tp.expectNext(EventEnvelope(Sequence(6), \"my-1\", 4, 1, timestamp = 0L))\n        tp.cancel()\n        tp.expectNoMessage(NoMsgTime)\n      }\n    }\n  }\n\n  it should \"deliver EventEnvelopes non-zero timestamps\" in withActorSystem { implicit system =>\n\n    val testStartTime = System.currentTimeMillis()\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n\n      (actor1 ? withTags(1, \"number\")).futureValue\n      (actor2 ? withTags(2, \"number\")).futureValue\n      (actor3 ? withTags(3, \"number\")).futureValue\n\n      def assertTimestamp(timestamp: Long, clue: String) = {\n        withClue(clue) {\n          timestamp should !==(0L)\n          // we want to prove that the event got a non-zero timestamp\n          // but also a timestamp that between some boundaries around this test run\n          (timestamp - testStartTime) should be < 120000L\n          (timestamp - testStartTime) should be > 0L\n        }\n      }\n\n      journalOps.withEventsByTag()(\"number\", Sequence(Long.MinValue)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case ev @ EventEnvelope(Sequence(1), \"my-1\", 1, 1) =>\n          assertTimestamp(ev.timestamp, \"my-1\")\n        }\n        tp.expectNextPF { case ev @ EventEnvelope(Sequence(2), \"my-2\", 1, 2) =>\n          assertTimestamp(ev.timestamp, \"my-2\")\n        }\n        tp.expectNextPF { case ev @ EventEnvelope(Sequence(3), \"my-3\", 1, 3) =>\n          assertTimestamp(ev.timestamp, \"my-3\")\n        }\n        tp.cancel()\n      }\n    }\n  }\n\n  it should \"select events by tag with exact match\" in withActorSystem { implicit system =>\n\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n      (actor1 ? withTags(1, \"number\", \"sharded-1\")).futureValue\n      (actor2 ? withTags(2, \"number\", \"sharded-10\")).futureValue\n      (actor3 ? withTags(3, \"number\", \"sharded-100\")).futureValue\n\n      journalOps.withEventsByTag()(\"number\", Sequence(Long.MinValue)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag()(\"sharded-1\", Sequence(Long.MinValue)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag()(\"sharded-10\", Sequence(Long.MinValue)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 2, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag()(\"sharded-100\", Sequence(Long.MinValue)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n      }\n    }\n  }\n\n  it should \"find all events by tag even when lots of events are persisted concurrently\" in withActorSystem {\n    implicit system =>\n\n      val journalOps = new ScalaJdbcReadJournalOperations(system)\n      val msgCountPerActor = 20\n      val numberOfActors = 100\n      val totalNumberOfMessages = msgCountPerActor * numberOfActors\n      withManyTestActors(numberOfActors) { actors =>\n        val actorsWithIndexes = actors.zipWithIndex\n        for {\n          messageNumber <- 0 until msgCountPerActor\n          (actor, actorIdx) <- actorsWithIndexes\n        } actor ! TaggedEvent(Event(s\"$actorIdx-$messageNumber\"), \"myEvent\")\n\n        journalOps.withEventsByTag()(\"myEvent\", NoOffset) { tp =>\n          tp.request(Int.MaxValue)\n          (1 to totalNumberOfMessages).foldLeft(Map.empty[Int, Int]) { (map, _) =>\n            val mgsParts = tp.expectNext().event.asInstanceOf[EventRestored].value.split(\"-\")\n            val actorIdx = mgsParts(0).toInt\n            val msgNumber = mgsParts(1).toInt\n            val expectedCount = map.getOrElse(actorIdx, 0)\n            assertResult(expected = expectedCount)(msgNumber)\n            // keep track of the next message number we expect for this actor idx\n            map.updated(actorIdx, msgNumber + 1)\n          }\n          tp.cancel()\n          tp.expectNoMessage(NoMsgTime)\n        }\n      }\n  }\n\n  it should \"find events by tag from an offset\" in withActorSystem { implicit system =>\n\n    val journalOps = new JavaDslJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n      (actor1 ? withTags(1, \"number\")).futureValue\n      (actor2 ? withTags(2, \"number\")).futureValue\n      (actor3 ? withTags(3, \"number\")).futureValue\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 3\n      }\n\n      journalOps.withEventsByTag()(\"number\", Sequence(1)) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 3, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n\n        actor1 ? withTags(1, \"number\")\n        tp.expectNext(EventEnvelope(Sequence(4), \"my-1\", 2, 1, timestamp = 0L))\n        tp.cancel()\n        tp.expectNoMessage(NoMsgTime)\n      }\n    }\n  }\n\n  it should \"persist and find tagged event for one tag\" in withActorSystem { implicit system =>\n\n    val journalOps = new JavaDslJdbcReadJournalOperations(system)\n    withTestActors() { (actor1, actor2, actor3) =>\n      journalOps.withEventsByTag(10.seconds)(\"one\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNoMessage(NoMsgTime)\n\n        actor1 ! withTags(1, \"one\") // 1\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n\n        actor2 ! withTags(1, \"one\") // 2\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-2\", 1, 1, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n\n        actor3 ! withTags(1, \"one\") // 3\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-3\", 1, 1, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n\n        actor1 ! withTags(2, \"two\") // 4\n        tp.expectNoMessage(NoMsgTime)\n\n        actor2 ! withTags(2, \"two\") // 5\n        tp.expectNoMessage(NoMsgTime)\n\n        actor3 ! withTags(2, \"two\") // 6\n        tp.expectNoMessage(NoMsgTime)\n\n        actor1 ! withTags(1, \"one\") // 7\n        tp.expectNext(EventEnvelope(Sequence(7), \"my-1\", 3, 1, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n\n        actor2 ! withTags(1, \"one\") // 8\n        tp.expectNext(EventEnvelope(Sequence(8), \"my-2\", 3, 1, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n\n        actor3 ! withTags(1, \"one\") // 9\n        tp.expectNext(EventEnvelope(Sequence(9), \"my-3\", 3, 1, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n        tp.expectNoMessage(NoMsgTime)\n      }\n    }\n  }\n\n  it should \"persist and find tagged events when stored with multiple tags\" in withActorSystem { implicit system =>\n\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n      (actor1 ? withTags(1, \"one\", \"1\", \"prime\")).futureValue\n      (actor1 ? withTags(2, \"two\", \"2\", \"prime\")).futureValue\n      (actor1 ? withTags(3, \"three\", \"3\", \"prime\")).futureValue\n      (actor1 ? withTags(4, \"four\", \"4\")).futureValue\n      (actor1 ? withTags(5, \"five\", \"5\", \"prime\")).futureValue\n      (actor2 ? withTags(3, \"three\", \"3\", \"prime\")).futureValue\n      (actor3 ? withTags(3, \"three\", \"3\", \"prime\")).futureValue\n\n      (actor1 ? 6).futureValue\n      (actor1 ? 7).futureValue\n      (actor1 ? 8).futureValue\n      (actor1 ? 9).futureValue\n      (actor1 ? 10).futureValue\n\n      eventually {\n        journalOps.countJournal.futureValue shouldBe 12\n      }\n\n      journalOps.withEventsByTag(10.seconds)(\"prime\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(2), \"my-1\", 2, 2, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-1\", 3, 3, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(5), \"my-1\", 5, 5, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(6), \"my-2\", 1, 3, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(7), \"my-3\", 1, 3, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag(10.seconds)(\"three\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-1\", 3, 3, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(6), \"my-2\", 1, 3, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(7), \"my-3\", 1, 3, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag(10.seconds)(\"3\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(3), \"my-1\", 3, 3, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(6), \"my-2\", 1, 3, timestamp = 0L))\n        tp.expectNext(EventEnvelope(Sequence(7), \"my-3\", 1, 3, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag(10.seconds)(\"one\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(1), \"my-1\", 1, 1, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag(10.seconds)(\"four\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNextPF { case EventEnvelope(Sequence(4), \"my-1\", 4, 4) => }\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n      }\n\n      journalOps.withEventsByTag(10.seconds)(\"five\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectNext(EventEnvelope(Sequence(5), \"my-1\", 5, 5, timestamp = 0L))\n        tp.expectNoMessage(NoMsgTime)\n        tp.cancel()\n        tp.expectNoMessage(NoMsgTime)\n      }\n    }\n  }\n\n  def timeoutMultiplier: Int = 1\n\n  it should \"show the configured performance characteristics\" in withActorSystem { implicit system =>\n\n    import system.dispatcher\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, actor2, actor3) =>\n      def sendMessagesWithTag(tag: String, numberOfMessagesPerActor: Int): Future[Done] = {\n        val futures = for (actor <- Seq(actor1, actor2, actor3); i <- 1 to numberOfMessagesPerActor) yield {\n          actor ? TaggedAsyncEvent(Event(i.toString), tag)\n        }\n        Future.sequence(futures).map(_ => Done)\n      }\n\n      val tag1 = \"someTag\"\n      // send a batch of 3 * 50\n      sendMessagesWithTag(tag1, 50)\n\n      // start the query before the future completes\n      journalOps.withEventsByTag()(tag1, NoOffset) { tp =>\n        tp.within(5.seconds) {\n          tp.request(Int.MaxValue)\n          tp.expectNextN(150)\n        }\n        tp.expectNoMessage(NoMsgTime)\n\n        // Send a small batch of 3 * 5 messages\n        sendMessagesWithTag(tag1, 5)\n        // Since queries are executed `refreshInterval`, there must be a small delay before this query gives a result\n        tp.within(min = refreshInterval / 2, max = 2.seconds * timeoutMultiplier) {\n          tp.expectNextN(15)\n        }\n        tp.expectNoMessage(NoMsgTime)\n\n        // another large batch should be retrieved fast\n        // send a second batch of 3 * 100\n        sendMessagesWithTag(tag1, 100)\n        tp.within(min = refreshInterval / 2, max = 10.seconds * timeoutMultiplier) {\n          tp.request(Int.MaxValue)\n          tp.expectNextN(300)\n        }\n        tp.expectNoMessage(NoMsgTime)\n      }\n    }\n  }\n}\n\nclass H2ScalaEventsByTagTest extends EventsByTagTest(\"h2-application.conf\") with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/EventsByUnfrequentTagTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.pattern.ask\nimport akka.persistence.jdbc.query.EventsByUnfrequentTagTest._\nimport akka.persistence.query.{ EventEnvelope, NoOffset, Sequence }\nimport com.typesafe.config.{ ConfigValue, ConfigValueFactory }\n\nimport scala.concurrent.duration._\n\nobject EventsByUnfrequentTagTest {\n  val maxBufferSize = 20\n  val refreshInterval = 500.milliseconds\n\n  val configOverrides: Map[String, ConfigValue] = Map(\n    \"jdbc-read-journal.events-by-tag-buffer-sizes-per-query\" -> ConfigValueFactory.fromAnyRef(1.toString),\n    \"jdbc-read-journal.max-buffer-size\" -> ConfigValueFactory.fromAnyRef(maxBufferSize.toString),\n    \"jdbc-read-journal.refresh-interval\" -> ConfigValueFactory.fromAnyRef(refreshInterval.toString()))\n}\n\nabstract class EventsByUnfrequentTagTest(config: String) extends QueryTestSpec(config, configOverrides) {\n\n  final val NoMsgTime: FiniteDuration = 100.millis\n  it should \"persist and find a tagged event with multiple (frequently and unfrequently) tags\" in withActorSystem {\n    implicit system =>\n\n      val journalOps = new ScalaJdbcReadJournalOperations(system)\n      withTestActors(replyToMessages = true) { (actor1, _, _) =>\n        val often = \"often\"\n        val notOften = \"not-often\"\n        withClue(\"Persisting multiple tagged events\") {\n          (0 until 100).foreach { i =>\n            val additional = if (i % 40 == 0) {\n              Seq(notOften)\n            } else Seq.empty\n            val tags = Seq(often) ++ additional\n            (actor1 ? withTags(1, tags: _*)).futureValue\n          }\n\n          eventually {\n            journalOps.countJournal.futureValue shouldBe 100\n          }\n          journalOps.withEventsByTag()(often, NoOffset) { tp =>\n            tp.request(Int.MaxValue)\n            (1 to 100).foreach { i =>\n              tp.expectNextPF { case EventEnvelope(Sequence(`i`), _, _, _) => }\n            }\n            tp.cancel()\n            tp.expectNoMessage(NoMsgTime)\n          }\n\n          journalOps.withEventsByTag(10.seconds)(notOften, NoOffset) { tp =>\n            tp.request(Int.MaxValue)\n            tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }\n            tp.expectNextPF { case EventEnvelope(Sequence(41), _, _, _) => }\n            tp.expectNextPF { case EventEnvelope(Sequence(81), _, _, _) => }\n\n            tp.cancel()\n            tp.expectNoMessage(NoMsgTime)\n          }\n        }\n\n      }\n  }\n\n}\n\nclass H2ScalaEventsByUnfrequentTagTest extends EventsByUnfrequentTagTest(\"h2-shared-db-application.conf\") with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/HardDeleteQueryTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.persistence.query.NoOffset\nimport akka.pattern._\n\nimport scala.concurrent.duration._\nimport org.scalatest.matchers.should.Matchers\n\nabstract class HardDeleteQueryTest(config: String) extends QueryTestSpec(config) with Matchers {\n  implicit val askTimeout: FiniteDuration = 500.millis\n\n  it should \"not return deleted events when using CurrentEventsByTag\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, _, _) =>\n      (actor1 ? withTags(1, \"number\")).futureValue\n      (actor1 ? withTags(2, \"number\")).futureValue\n      (actor1 ? withTags(3, \"number\")).futureValue\n\n      // delete all three events and wait for confirmations\n      (actor1 ? DeleteCmd(1)).futureValue shouldBe \"deleted-1\"\n      (actor1 ? DeleteCmd(2)).futureValue shouldBe \"deleted-2\"\n      (actor1 ? DeleteCmd(3)).futureValue shouldBe \"deleted-3\"\n\n      // check that nothing gets delivered\n      journalOps.withCurrentEventsByTag()(\"number\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectComplete()\n      }\n    }\n  }\n\n  it should \"not return deleted events when using EventsByTag\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, _, _) =>\n      (actor1 ? withTags(1, \"number\")).futureValue\n      (actor1 ? withTags(2, \"number\")).futureValue\n      (actor1 ? withTags(3, \"number\")).futureValue\n\n      // delete all three events and wait for confirmations\n      (actor1 ? DeleteCmd(1)).futureValue shouldBe \"deleted-1\"\n      (actor1 ? DeleteCmd(2)).futureValue shouldBe \"deleted-2\"\n      (actor1 ? DeleteCmd(3)).futureValue shouldBe \"deleted-3\"\n\n      // check that nothing gets delivered\n      journalOps.withEventsByTag()(\"number\", NoOffset) { tp =>\n        tp.request(Int.MaxValue)\n        tp.cancel()\n      }\n    }\n  }\n\n  it should \"not return deleted events when using CurrentEventsByPersistenceId\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, _, _) =>\n      (actor1 ? withTags(1, \"number\")).futureValue\n      (actor1 ? withTags(2, \"number\")).futureValue\n      (actor1 ? withTags(3, \"number\")).futureValue\n\n      // delete all three events and wait for confirmations\n      (actor1 ? DeleteCmd(1)).futureValue shouldBe \"deleted-1\"\n      (actor1 ? DeleteCmd(2)).futureValue shouldBe \"deleted-2\"\n      (actor1 ? DeleteCmd(3)).futureValue shouldBe \"deleted-3\"\n\n      // check that nothing gets delivered\n      journalOps.withCurrentEventsByPersistenceId()(\"my-1\") { tp =>\n        tp.request(Int.MaxValue)\n        tp.expectComplete()\n      }\n    }\n  }\n\n  it should \"not return deleted events when using EventsByPersistenceId\" in withActorSystem { implicit system =>\n    val journalOps = new ScalaJdbcReadJournalOperations(system)\n    withTestActors(replyToMessages = true) { (actor1, _, _) =>\n      (actor1 ? withTags(1, \"number\")).futureValue\n      (actor1 ? withTags(2, \"number\")).futureValue\n      (actor1 ? withTags(3, \"number\")).futureValue\n\n      // delete all three events and wait for confirmations\n      (actor1 ? DeleteCmd(1)).futureValue shouldBe \"deleted-1\"\n      (actor1 ? DeleteCmd(2)).futureValue shouldBe \"deleted-2\"\n      (actor1 ? DeleteCmd(3)).futureValue shouldBe \"deleted-3\"\n\n      // check that nothing gets delivered\n      journalOps.withEventsByPersistenceId()(\"my-1\") { tp =>\n        tp.request(Int.MaxValue)\n        tp.cancel()\n      }\n    }\n  }\n}\n\nclass H2HardDeleteQueryTest extends HardDeleteQueryTest(\"h2-application.conf\") with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/JournalDaoStreamMessagesMemoryTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.actor.{ ActorSystem, ExtendedActorSystem }\nimport akka.persistence.jdbc.config.JournalConfig\nimport akka.persistence.jdbc.journal.dao.JournalDao\nimport akka.persistence.{ AtomicWrite, PersistentRepr }\nimport akka.serialization.{ Serialization, SerializationExtension }\nimport akka.stream.scaladsl.{ Sink, Source }\nimport akka.stream.testkit.scaladsl.TestSink\nimport akka.stream.{ Materializer, SystemMaterializer }\nimport com.typesafe.config.{ ConfigValue, ConfigValueFactory }\nimport org.scalatest.concurrent.PatienceConfiguration.Timeout\nimport org.slf4j.LoggerFactory\nimport slick.jdbc.JdbcBackend.Database\nimport slick.jdbc.JdbcProfile\n\nimport java.lang.management.{ ManagementFactory, MemoryMXBean }\nimport java.util.UUID\nimport scala.collection.immutable\nimport scala.concurrent.duration._\nimport scala.concurrent.{ ExecutionContext, ExecutionContextExecutor }\nimport scala.util.{ Failure, Success }\n\nobject JournalDaoStreamMessagesMemoryTest {\n\n  val configOverrides: Map[String, ConfigValue] = Map(\"jdbc-journal.fetch-size\" -> ConfigValueFactory.fromAnyRef(\"100\"))\n\n  val MB = 1024 * 1024\n}\n\nabstract class JournalDaoStreamMessagesMemoryTest(configFile: String)\n    extends QueryTestSpec(configFile, JournalDaoStreamMessagesMemoryTest.configOverrides) {\n  import JournalDaoStreamMessagesMemoryTest.MB\n\n  private val log = LoggerFactory.getLogger(this.getClass)\n\n  val memoryMBean: MemoryMXBean = ManagementFactory.getMemoryMXBean\n\n  it should \"stream events\" in {\n    withActorSystem { implicit system: ActorSystem =>\n      withDatabase { db =>\n        implicit val ec: ExecutionContextExecutor = system.dispatcher\n        implicit val mat: Materializer = SystemMaterializer(system).materializer\n\n        val persistenceId = UUID.randomUUID().toString\n        val writerUuid = UUID.randomUUID().toString\n        val fqcn = journalConfig.pluginConfig.dao\n        val args = Seq(\n          (classOf[Database], db),\n          (classOf[JdbcProfile], profile),\n          (classOf[JournalConfig], journalConfig),\n          (classOf[Serialization], SerializationExtension(system)),\n          (classOf[ExecutionContext], ec),\n          (classOf[Materializer], mat))\n        val dao: JournalDao =\n          system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[JournalDao](fqcn, args) match {\n            case Success(dao)   => dao\n            case Failure(cause) => throw cause\n          }\n\n        val payloadSize = 5000 // 5000 bytes\n        val eventsPerBatch = 1000\n\n        val maxMem = 64 * MB\n\n        val numberOfInsertBatches = {\n          // calculate the number of batches using a factor to make sure we go a little bit over the limit\n          (maxMem / (payloadSize * eventsPerBatch) * 1.2).round.toInt\n        }\n        val totalMessages = numberOfInsertBatches * eventsPerBatch\n        val totalMessagePayload = totalMessages * payloadSize\n        log.info(\n          s\"batches: $numberOfInsertBatches (with $eventsPerBatch events), total messages: $totalMessages, total msgs size: $totalMessagePayload\")\n\n        // payload can be the same when inserting to avoid unnecessary memory usage\n        val payload = Array.fill(payloadSize)('a'.toByte)\n\n        val lastInsert =\n          Source\n            .fromIterator(() => (1 to numberOfInsertBatches).iterator)\n            .mapAsync(1) { i =>\n              val end = i * eventsPerBatch\n              val start = end - (eventsPerBatch - 1)\n              log.info(s\"batch $i - events from $start to $end\")\n              val atomicWrites =\n                (start to end).map { j =>\n                  AtomicWrite(immutable.Seq(PersistentRepr(payload, j, persistenceId, writerUuid = writerUuid)))\n                }\n\n              dao.asyncWriteMessages(atomicWrites).map(_ => i)\n            }\n            .runWith(Sink.last)\n\n        // wait until we write all messages\n        // being very generous, 1 second per message\n        lastInsert.futureValue(Timeout(totalMessages.seconds))\n\n        log.info(\"Events written, starting replay\")\n\n        // sleep and gc to have some kind of stable measurement of current heap usage\n        Thread.sleep(1000)\n        System.gc()\n        Thread.sleep(1000)\n        val usedBefore = memoryMBean.getHeapMemoryUsage.getUsed\n\n        val messagesSrc =\n          dao.messagesWithBatch(persistenceId, 0, totalMessages, batchSize = 100, None)\n        val probe =\n          messagesSrc\n            .map {\n              case Success((repr, _)) =>\n                if (repr.sequenceNr % 100 == 0)\n                  log.info(s\"fetched: ${repr.persistenceId} - ${repr.sequenceNr}/${totalMessages}\")\n              case Failure(exception) =>\n                log.error(\"Failure when reading messages.\", exception)\n            }\n            .runWith(TestSink())\n\n        probe.request(10)\n        probe.within(20.seconds) {\n          probe.expectNextN(10)\n        }\n\n        // sleep and gc to have some kind of stable measurement of current heap usage\n        Thread.sleep(2000)\n        System.gc()\n        Thread.sleep(1000)\n        val usedAfter = memoryMBean.getHeapMemoryUsage.getUsed\n\n        log.info(s\"Used heap before ${usedBefore / MB} MB, after ${usedAfter / MB} MB\")\n        // actual usage is much less than 10 MB\n        (usedAfter - usedBefore) should be <= (10L * MB)\n\n        probe.cancel()\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/JournalSequenceActorTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.actor.{ ActorRef, ActorSystem }\nimport akka.pattern.ask\nimport akka.persistence.jdbc.config.JournalSequenceRetrievalConfig\nimport akka.persistence.jdbc.journal.dao.legacy.{ JournalRow, JournalTables }\nimport akka.persistence.jdbc.query.JournalSequenceActor.{ GetMaxOrderingId, MaxOrderingId }\nimport akka.persistence.jdbc.query.dao.TestProbeReadJournalDao\nimport akka.persistence.jdbc.SharedActorSystemTestSpec\nimport akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao\nimport akka.serialization.SerializationExtension\nimport akka.stream.scaladsl.{ Sink, Source }\nimport akka.testkit.TestProbe\nimport org.slf4j.LoggerFactory\nimport slick.jdbc.{ JdbcBackend, JdbcCapabilities }\n\nimport scala.concurrent.Future\nimport scala.concurrent.duration._\nimport org.scalatest.time.Span\n\nabstract class JournalSequenceActorTest(configFile: String, isOracle: Boolean)\n    extends QueryTestSpec(configFile)\n    with JournalTables {\n  private val log = LoggerFactory.getLogger(classOf[JournalSequenceActorTest])\n\n  val journalSequenceActorConfig = readJournalConfig.journalSequenceRetrievalConfiguration\n  val journalTableCfg = journalConfig.journalTableConfiguration\n\n  import profile.api._\n\n  implicit val askTimeout: FiniteDuration = 50.millis\n\n  def generateId: Int = 0\n\n  behavior.of(\"JournalSequenceActor\")\n\n  it should \"recover normally\" in {\n    if (newDao)\n      pending\n    withActorSystem { implicit system: ActorSystem =>\n      withDatabase { db =>\n        val numberOfRows = 15000\n        val rows = for (i <- 1 to numberOfRows) yield JournalRow(generateId, deleted = false, \"id\", i, Array(0.toByte))\n        db.run(JournalTable ++= rows).futureValue\n        withJournalSequenceActor(db, maxTries = 100) { actor =>\n          eventually {\n            actor.ask(GetMaxOrderingId).mapTo[MaxOrderingId].futureValue shouldBe MaxOrderingId(numberOfRows)\n          }\n        }\n      }\n    }\n  }\n\n  private def canForceInsert: Boolean = profile.capabilities.contains(JdbcCapabilities.forceInsert)\n\n  if (canForceInsert && !newDao) {\n    it should s\"recover ${if (isOracle) \"one hundred thousand\" else \"one million\"} events quickly if no ids are missing\" in {\n      withActorSystem { implicit system: ActorSystem =>\n        withDatabase { db =>\n          val elements = if (isOracle) 100000 else 1000000\n          Source\n            .fromIterator(() => (1 to elements).iterator)\n            .map(id => JournalRow(id, deleted = false, \"id\", id, Array(0.toByte)))\n            .grouped(10000)\n            .mapAsync(4) { rows =>\n              db.run(JournalTable.forceInsertAll(rows))\n            }\n            .runWith(Sink.ignore)\n            .futureValue\n\n          val startTime = System.currentTimeMillis()\n          withJournalSequenceActor(db, maxTries = 100) { actor =>\n            implicit val patienceConfig: PatienceConfig =\n              PatienceConfig(10.seconds, Span(200, org.scalatest.time.Millis))\n            eventually {\n              val currentMax = actor.ask(GetMaxOrderingId).mapTo[MaxOrderingId].futureValue.maxOrdering\n              currentMax shouldBe elements\n            }\n          }\n          val timeTaken = System.currentTimeMillis() - startTime\n          log.info(s\"Recovered all events in $timeTaken ms\")\n        }\n      }\n    }\n  }\n\n  if (!isOracle && canForceInsert && !newDao) {\n    // Note this test case cannot be executed for oracle, because forceInsertAll is not supported in the oracle driver.\n    it should \"recover after the specified max number if tries if the first event has a very high sequence number and lots of large gaps exist\" in {\n      withActorSystem { implicit system: ActorSystem =>\n        withDatabase { db =>\n          val numElements = 1000\n          val gapSize = 10000\n          val firstElement = 100000000\n          val lastElement = firstElement + (numElements * gapSize)\n          Source\n            .fromIterator(() => (firstElement to lastElement by gapSize).iterator)\n            .map(id => JournalRow(id, deleted = false, \"id\", id, Array(0.toByte)))\n            .grouped(10000)\n            .mapAsync(4) { rows =>\n              db.run(JournalTable.forceInsertAll(rows))\n            }\n            .runWith(Sink.ignore)\n            .futureValue\n\n          withJournalSequenceActor(db, maxTries = 2) { actor =>\n            // Should normally recover after `maxTries` seconds\n            implicit val patienceConfig: PatienceConfig =\n              PatienceConfig(10.seconds, Span(200, org.scalatest.time.Millis))\n            eventually {\n              val currentMax = actor.ask(GetMaxOrderingId).mapTo[MaxOrderingId].futureValue.maxOrdering\n              currentMax shouldBe lastElement\n            }\n          }\n        }\n      }\n    }\n  }\n\n  if (canForceInsert && !newDao) {\n    it should s\"assume that the max ordering id in the database on startup is the max after (queryDelay * maxTries)\" in {\n      withActorSystem { implicit system: ActorSystem =>\n        withDatabase { db =>\n          val maxElement = 100000\n          // only even numbers, odd numbers are missing\n          val idSeq = 2 to maxElement by 2\n          Source\n            .fromIterator(() => idSeq.iterator)\n            .map(id => JournalRow(id, deleted = false, \"id\", id, Array(0.toByte)))\n            .grouped(10000)\n            .mapAsync(4) { rows =>\n              db.run(JournalTable.forceInsertAll(rows))\n            }\n            .runWith(Sink.ignore)\n            .futureValue\n\n          val highestValue = if (isOracle) {\n            // ForceInsert does not seem to work for oracle, we must delete the odd numbered events\n            db.run(JournalTable.filter(_.ordering % 2L === 1L).delete).futureValue\n            maxElement / 2\n          } else maxElement\n\n          withJournalSequenceActor(db, maxTries = 2) { actor =>\n            // The actor should assume the max after 2 seconds\n            implicit val patienceConfig: PatienceConfig = PatienceConfig(3.seconds)\n            eventually {\n              val currentMax = actor.ask(GetMaxOrderingId).mapTo[MaxOrderingId].futureValue.maxOrdering\n              currentMax shouldBe highestValue\n            }\n          }\n        }\n      }\n    }\n  }\n\n  /**\n   * @param maxTries The number of tries before events are assumed missing\n   *                 (since the actor queries every second by default,\n   *                 this is effectively the number of seconds after which events are assumed missing)\n   */\n  def withJournalSequenceActor(db: JdbcBackend.Database, maxTries: Int)(f: ActorRef => Unit)(\n      implicit system: ActorSystem): Unit = {\n    import system.dispatcher\n    val readJournalDao = new ByteArrayReadJournalDao(db, profile, readJournalConfig, SerializationExtension(system))\n    val actor =\n      system.actorOf(JournalSequenceActor.props(readJournalDao, journalSequenceActorConfig.copy(maxTries = maxTries)))\n    try f(actor)\n    finally system.stop(actor)\n  }\n}\n\nclass MockDaoJournalSequenceActorTest extends SharedActorSystemTestSpec {\n  def fetchMaxOrderingId(journalSequenceActor: ActorRef): Future[Long] = {\n    journalSequenceActor.ask(GetMaxOrderingId)(20.millis).mapTo[MaxOrderingId].map(_.maxOrdering)\n  }\n\n  it should \"re-query with delay only when events are missing.\" in {\n    val batchSize = 100\n    val maxTries = 5\n    val queryDelay = 200.millis\n\n    val almostQueryDelay = queryDelay - 50.millis\n    val almostImmediately = 50.millis\n    withTestProbeJournalSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, _) =>\n      daoProbe.expectMsg(almostImmediately, TestProbeReadJournalDao.JournalSequence(0, batchSize))\n      val firstBatch = (1L to 40L) ++ (51L to 110L)\n      daoProbe.reply(firstBatch)\n      withClue(s\"when events are missing, the actor should wait for $queryDelay before querying again\") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(almostQueryDelay, TestProbeReadJournalDao.JournalSequence(40, batchSize))\n      }\n      // number 41 is still missing after this batch\n      val secondBatch = 42L to 110L\n      daoProbe.reply(secondBatch)\n      withClue(s\"when events are missing, the actor should wait for $queryDelay before querying again\") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(almostQueryDelay, TestProbeReadJournalDao.JournalSequence(40, batchSize))\n      }\n      val thirdBatch = 41L to 110L\n      daoProbe.reply(thirdBatch)\n      withClue(\n        s\"when no more events are missing, but less that batchSize elemens have been received, \" +\n        s\"the actor should wait for $queryDelay before querying again\") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(almostQueryDelay, TestProbeReadJournalDao.JournalSequence(110, batchSize))\n      }\n\n      val fourthBatch = 111L to 210L\n      daoProbe.reply(fourthBatch)\n      withClue(\n        \"When no more events are missing and the number of events received is equal to batchSize, \" +\n        \"the actor should query again immediately\") {\n        daoProbe.expectMsg(almostImmediately, TestProbeReadJournalDao.JournalSequence(210, batchSize))\n      }\n\n      // Reply to prevent a dead letter warning on the timeout\n      daoProbe.reply(Seq.empty)\n      daoProbe.expectNoMessage(almostImmediately)\n    }\n  }\n\n  it should \"Assume an element missing after the configured amount of maxTries\" in {\n    val batchSize = 100\n    val maxTries = 5\n    val queryDelay = 150.millis\n\n    val slightlyMoreThanQueryDelay = queryDelay + 50.millis\n    val almostImmediately = 20.millis\n\n    val allIds = (1L to 40L) ++ (43L to 200L)\n\n    withTestProbeJournalSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, actor) =>\n      daoProbe.expectMsg(almostImmediately, TestProbeReadJournalDao.JournalSequence(0, batchSize))\n      daoProbe.reply(allIds.take(100))\n\n      val idsLargerThan40 = allIds.dropWhile(_ <= 40)\n      val retryResponse = idsLargerThan40.take(100)\n      for (i <- 1 to maxTries) withClue(s\"should retry $maxTries times (attempt $i)\") {\n        daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeReadJournalDao.JournalSequence(40, batchSize))\n        daoProbe.reply(retryResponse)\n      }\n\n      // sanity check\n      retryResponse.last shouldBe 142\n      withClue(\n        \"The elements 41 and 42 should be assumed missing, \" +\n        \"the actor should query again immediately since a full batch has been received\") {\n        daoProbe.expectMsg(almostImmediately, TestProbeReadJournalDao.JournalSequence(142, batchSize))\n        fetchMaxOrderingId(actor).futureValue shouldBe 142\n      }\n\n      // Reply to prevent a dead letter warning on the timeout\n      daoProbe.reply(Seq.empty)\n      daoProbe.expectNoMessage(almostImmediately)\n    }\n  }\n\n  def withTestProbeJournalSequenceActor(batchSize: Int, maxTries: Int, queryDelay: FiniteDuration)(\n      f: (TestProbe, ActorRef) => Unit)(implicit system: ActorSystem): Unit = {\n    val testProbe = TestProbe()\n    val config = JournalSequenceRetrievalConfig(\n      batchSize = batchSize,\n      maxTries = maxTries,\n      queryDelay = queryDelay,\n      maxBackoffQueryDelay = 4.seconds,\n      askTimeout = 100.millis)\n    val mockDao = new TestProbeReadJournalDao(testProbe)\n    val actor = system.actorOf(JournalSequenceActor.props(mockDao, config))\n    try f(testProbe, actor)\n    finally system.stop(actor)\n  }\n}\n\nclass H2JournalSequenceActorTest\n    extends JournalSequenceActorTest(\"h2-application.conf\", isOracle = false)\n    with H2Cleaner\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/MultipleReadJournalTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.persistence.jdbc.query.EventsByTagTest._\nimport akka.persistence.jdbc.query.scaladsl.JdbcReadJournal\nimport akka.persistence.query.{ NoOffset, PersistenceQuery }\nimport akka.stream.scaladsl.Sink\n\nclass MultipleReadJournalTest\n    extends QueryTestSpec(\"h2-two-read-journals-application.conf\", configOverrides)\n    with H2Cleaner {\n  it should \"be able to create two read journals and use eventsByTag on them\" in withActorSystem { implicit system =>\n    val normalReadJournal = PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier)\n    val secondReadJournal = PersistenceQuery(system).readJournalFor[JdbcReadJournal](\"jdbc-read-journal-number-two\")\n\n    val events1 = normalReadJournal.currentEventsByTag(\"someTag\", NoOffset).runWith(Sink.seq)\n    val events2 = secondReadJournal.currentEventsByTag(\"someTag\", NoOffset).runWith(Sink.seq)\n    events1.futureValue shouldBe empty\n    events2.futureValue shouldBe empty\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/QueryTestSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.actor.{ ActorRef, ActorSystem, Props, Stash, Status }\nimport akka.pattern.ask\nimport akka.event.LoggingReceive\nimport akka.persistence.{\n  DeleteMessagesFailure,\n  DeleteMessagesSuccess,\n  PersistentActor,\n  SaveSnapshotFailure,\n  SaveSnapshotSuccess,\n  SnapshotOffer\n}\nimport akka.persistence.jdbc.SingleActorSystemPerTestSpec\nimport akka.persistence.jdbc.query.EventAdapterTest.{ Event, Snapshot, TaggedAsyncEvent, TaggedEvent }\nimport akka.persistence.jdbc.query.javadsl.{ JdbcReadJournal => JavaJdbcReadJournal }\nimport akka.persistence.jdbc.query.scaladsl.JdbcReadJournal\nimport akka.persistence.journal.Tagged\nimport akka.persistence.query.{ EventEnvelope, Offset, PersistenceQuery }\nimport akka.stream.scaladsl.Sink\nimport akka.stream.testkit.TestSubscriber\nimport akka.stream.testkit.javadsl.{ TestSink => JavaSink }\nimport akka.stream.testkit.scaladsl.TestSink\nimport akka.stream.{ Materializer, SystemMaterializer }\nimport com.typesafe.config.ConfigValue\n\nimport scala.concurrent.Future\nimport scala.concurrent.duration._\nimport akka.persistence.jdbc.testkit.internal.H2\nimport akka.persistence.jdbc.testkit.internal.MySQL\nimport akka.persistence.jdbc.testkit.internal.Oracle\nimport akka.persistence.jdbc.testkit.internal.Postgres\nimport akka.persistence.jdbc.testkit.internal.SqlServer\n\nimport scala.concurrent.ExecutionContext\n\ntrait ReadJournalOperations {\n  def withCurrentPersistenceIds(within: FiniteDuration = 60.second)(f: TestSubscriber.Probe[String] => Unit): Unit\n  def withPersistenceIds(within: FiniteDuration = 60.second)(f: TestSubscriber.Probe[String] => Unit): Unit\n  def withCurrentEventsByPersistenceId(within: FiniteDuration = 60.second)(\n      persistenceId: String,\n      fromSequenceNr: Long = 0,\n      toSequenceNr: Long = Long.MaxValue)(f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit\n  def withEventsByPersistenceId(within: FiniteDuration = 60.second)(\n      persistenceId: String,\n      fromSequenceNr: Long = 0,\n      toSequenceNr: Long = Long.MaxValue)(f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit\n  def withCurrentEventsByTag(within: FiniteDuration = 60.second)(tag: String, offset: Offset)(\n      f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit\n  def withEventsByTag(within: FiniteDuration = 60.second)(tag: String, offset: Offset)(\n      f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit\n  def countJournal: Future[Long]\n}\n\nclass ScalaJdbcReadJournalOperations(readJournal: JdbcReadJournal)(implicit system: ActorSystem, mat: Materializer)\n    extends ReadJournalOperations {\n  def this(system: ActorSystem) =\n    this(PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier))(\n      system,\n      SystemMaterializer(system).materializer)\n\n  import system.dispatcher\n\n  def withCurrentPersistenceIds(within: FiniteDuration)(f: TestSubscriber.Probe[String] => Unit): Unit = {\n    val tp = readJournal.currentPersistenceIds().runWith(TestSink[String]())\n    tp.within(within)(f(tp))\n  }\n\n  def withPersistenceIds(within: FiniteDuration)(f: TestSubscriber.Probe[String] => Unit): Unit = {\n    val tp = readJournal.persistenceIds().runWith(TestSink[String]())\n    tp.within(within)(f(tp))\n  }\n\n  def withCurrentEventsByPersistenceId(\n      within: FiniteDuration)(persistenceId: String, fromSequenceNr: Long = 0, toSequenceNr: Long = Long.MaxValue)(\n      f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = {\n    val tp = readJournal\n      .currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr)\n      .runWith(TestSink[EventEnvelope]())\n    tp.within(within)(f(tp))\n  }\n\n  def withEventsByPersistenceId(\n      within: FiniteDuration)(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long)(\n      f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = {\n    val tp =\n      readJournal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).runWith(TestSink[EventEnvelope]())\n    tp.within(within)(f(tp))\n  }\n\n  def withCurrentEventsByTag(within: FiniteDuration)(tag: String, offset: Offset)(\n      f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = {\n    val tp = readJournal.currentEventsByTag(tag, offset).runWith(TestSink[EventEnvelope]())\n    tp.within(within)(f(tp))\n  }\n\n  def withEventsByTag(within: FiniteDuration)(tag: String, offset: Offset)(\n      f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = {\n    val tp = readJournal.eventsByTag(tag, offset).runWith(TestSink[EventEnvelope]())\n    tp.within(within)(f(tp))\n  }\n\n  override def countJournal: Future[Long] =\n    readJournal\n      .currentPersistenceIds()\n      .filter(pid => (1 to 3).map(id => s\"my-$id\").contains(pid))\n      .mapAsync(1) { pid =>\n        readJournal.currentEventsByPersistenceId(pid, 0, Long.MaxValue).map(_ => 1L).runWith(Sink.seq).map(_.sum)\n      }\n      .runWith(Sink.seq)\n      .map(_.sum)\n}\n\nclass JavaDslJdbcReadJournalOperations(readJournal: javadsl.JdbcReadJournal)(\n    implicit system: ActorSystem,\n    mat: Materializer)\n    extends ReadJournalOperations {\n  def this(system: ActorSystem) =\n    this(\n      PersistenceQuery.get(system).getReadJournalFor(classOf[javadsl.JdbcReadJournal], JavaJdbcReadJournal.Identifier))(\n      system,\n      SystemMaterializer(system).materializer)\n\n  import system.dispatcher\n\n  def withCurrentPersistenceIds(within: FiniteDuration)(f: TestSubscriber.Probe[String] => Unit): Unit = {\n    val sink: akka.stream.javadsl.Sink[String, TestSubscriber.Probe[String]] = JavaSink.create[String](system)\n    val tp = readJournal.currentPersistenceIds().runWith(sink, mat)\n    tp.within(within)(f(tp))\n  }\n\n  def withPersistenceIds(within: FiniteDuration)(f: TestSubscriber.Probe[String] => Unit): Unit = {\n    val sink: akka.stream.javadsl.Sink[String, TestSubscriber.Probe[String]] = JavaSink.create[String](system)\n    val tp = readJournal.persistenceIds().runWith(sink, mat)\n    tp.within(within)(f(tp))\n  }\n\n  def withCurrentEventsByPersistenceId(\n      within: FiniteDuration)(persistenceId: String, fromSequenceNr: Long = 0, toSequenceNr: Long = Long.MaxValue)(\n      f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = {\n    val sink: akka.stream.javadsl.Sink[EventEnvelope, TestSubscriber.Probe[EventEnvelope]] =\n      JavaSink.create[EventEnvelope](system)\n    val tp = readJournal.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).runWith(sink, mat)\n    tp.within(within)(f(tp))\n  }\n\n  def withEventsByPersistenceId(\n      within: FiniteDuration)(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long)(\n      f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = {\n    val sink: akka.stream.javadsl.Sink[EventEnvelope, TestSubscriber.Probe[EventEnvelope]] =\n      JavaSink.create[EventEnvelope](system)\n    val tp = readJournal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).runWith(sink, mat)\n    tp.within(within)(f(tp))\n  }\n\n  def withCurrentEventsByTag(within: FiniteDuration)(tag: String, offset: Offset)(\n      f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = {\n    val sink: akka.stream.javadsl.Sink[EventEnvelope, TestSubscriber.Probe[EventEnvelope]] =\n      JavaSink.create[EventEnvelope](system)\n    val tp = readJournal.currentEventsByTag(tag, offset).runWith(sink, mat)\n    tp.within(within)(f(tp))\n  }\n\n  def withEventsByTag(within: FiniteDuration)(tag: String, offset: Offset)(\n      f: TestSubscriber.Probe[EventEnvelope] => Unit): Unit = {\n    val sink: akka.stream.javadsl.Sink[EventEnvelope, TestSubscriber.Probe[EventEnvelope]] =\n      JavaSink.create[EventEnvelope](system)\n    val tp = readJournal.eventsByTag(tag, offset).runWith(sink, mat)\n    tp.within(within)(f(tp))\n  }\n\n  override def countJournal: Future[Long] =\n    readJournal\n      .currentPersistenceIds()\n      .asScala\n      .filter(pid => (1 to 3).map(id => s\"my-$id\").contains(pid))\n      .mapAsync(1) { pid =>\n        readJournal\n          .currentEventsByPersistenceId(pid, 0, Long.MaxValue)\n          .asScala\n          .map(_ => 1L)\n          .runFold(List.empty[Long])(_ :+ _)\n          .map(_.sum)\n      }\n      .runFold(List.empty[Long])(_ :+ _)\n      .map(_.sum)\n}\n\nobject QueryTestSpec {\n  implicit final class EventEnvelopeProbeOps(val probe: TestSubscriber.Probe[EventEnvelope]) extends AnyVal {\n    def expectNextEventEnvelope(\n        persistenceId: String,\n        sequenceNr: Long,\n        event: Any): TestSubscriber.Probe[EventEnvelope] = {\n      val env = probe.expectNext()\n      assertEnvelope(env, persistenceId, sequenceNr, event)\n      probe\n    }\n\n    def expectNextEventEnvelope(\n        timeout: FiniteDuration,\n        persistenceId: String,\n        sequenceNr: Long,\n        event: Any): TestSubscriber.Probe[EventEnvelope] = {\n      val env = probe.expectNext(timeout)\n      assertEnvelope(env, persistenceId, sequenceNr, event)\n      probe\n    }\n\n    private def assertEnvelope(env: EventEnvelope, persistenceId: String, sequenceNr: Long, event: Any): Unit = {\n      assert(\n        env.persistenceId == persistenceId,\n        s\"expected persistenceId $persistenceId, found ${env.persistenceId}, in $env\")\n      assert(env.sequenceNr == sequenceNr, s\"expected sequenceNr $sequenceNr, found ${env.sequenceNr}, in $env\")\n      assert(env.event == event, s\"expected event $event, found ${env.event}, in $env\")\n    }\n  }\n}\n\nabstract class QueryTestSpec(config: String, configOverrides: Map[String, ConfigValue] = Map.empty)\n    extends SingleActorSystemPerTestSpec(config, configOverrides) {\n  case class DeleteCmd(toSequenceNr: Long = Long.MaxValue) extends Serializable\n\n  final val ExpectNextTimeout = 10.second\n\n  class TestActor(id: Int, replyToMessages: Boolean) extends PersistentActor with Stash {\n    override val persistenceId: String = \"my-\" + id\n\n    var state: Int = 0\n    var snapshotSender: Option[ActorRef] = None\n\n    override def receiveCommand: Receive = idle\n\n    def idle: Receive =\n      LoggingReceive {\n        case \"state\" =>\n          sender() ! state\n\n        case DeleteCmd(toSequenceNr) =>\n          deleteMessages(toSequenceNr)\n          if (replyToMessages) {\n            context.become(awaitingDeleting(sender()))\n          }\n\n        case event: Int =>\n          persist(event) { (event: Int) =>\n            updateState(event)\n            if (replyToMessages) sender() ! akka.actor.Status.Success(event)\n          }\n\n        case event @ Tagged(payload: Int, tags) =>\n          persist(event) { _ =>\n            updateState(payload)\n            if (replyToMessages) sender() ! akka.actor.Status.Success((payload, tags))\n          }\n        case event: Event =>\n          persist(event) { evt =>\n            if (replyToMessages) sender() ! akka.actor.Status.Success(evt)\n          }\n\n        case event @ TaggedEvent(payload: Event, tag) =>\n          persist(event) { _ =>\n            if (replyToMessages) sender() ! akka.actor.Status.Success((payload, tag))\n          }\n        case event @ TaggedAsyncEvent(payload: Event, tag) =>\n          persistAsync(event) { _ =>\n            if (replyToMessages) sender() ! akka.actor.Status.Success((payload, tag))\n          }\n\n        case SaveSnapshotSuccess(_) =>\n          snapshotSender.foreach { sender =>\n            sender ! akka.actor.Status.Success(state)\n          }\n\n        case SaveSnapshotFailure(_, reason) =>\n          snapshotSender.foreach { sender =>\n            sender ! akka.actor.Status.Failure(reason)\n          }\n\n        case Snapshot =>\n          saveSnapshot(state)\n          if (replyToMessages) snapshotSender = Some(sender())\n      }\n\n    def awaitingDeleting(origSender: ActorRef): Receive =\n      LoggingReceive {\n        case DeleteMessagesSuccess(toSequenceNr) =>\n          origSender ! s\"deleted-$toSequenceNr\"\n          unstashAll()\n          context.become(idle)\n\n        case DeleteMessagesFailure(ex, _) =>\n          origSender ! Status.Failure(ex)\n          unstashAll()\n          context.become(idle)\n\n        // stash whatever other messages\n        case _ => stash()\n      }\n\n    def updateState(event: Int): Unit = {\n      state = state + event\n    }\n\n    override def receiveRecover: Receive =\n      LoggingReceive {\n        case event: Int =>\n          updateState(event)\n        case SnapshotOffer(_, offeredSnapshot) =>\n          state = offeredSnapshot.asInstanceOf[Int]\n      }\n  }\n\n  def setupEmpty(persistenceId: Int, replyToMessages: Boolean)(implicit system: ActorSystem): ActorRef = {\n    system.actorOf(Props(new TestActor(persistenceId, replyToMessages)))\n  }\n\n  def withTestActors(seq: Int = 1, replyToMessages: Boolean = false)(f: (ActorRef, ActorRef, ActorRef) => Unit)(\n      implicit system: ActorSystem): Unit = {\n    val refs = (seq until seq + 3).map(setupEmpty(_, replyToMessages)).toList\n    try {\n      expectAllStarted(refs)\n      f(refs.head, refs.drop(1).head, refs.drop(2).head)\n    } finally killActors(refs: _*)\n  }\n\n  def withManyTestActors(amount: Int, seq: Int = 1, replyToMessages: Boolean = false)(f: Seq[ActorRef] => Unit)(\n      implicit system: ActorSystem): Unit = {\n    val refs = (seq until seq + amount).map(setupEmpty(_, replyToMessages)).toList\n    try {\n      expectAllStarted(refs)\n      f(refs)\n    } finally killActors(refs: _*)\n  }\n\n  def expectAllStarted(refs: Seq[ActorRef])(implicit system: ActorSystem): Unit = {\n    // make sure we notice early if the actors failed to start (because of issues with journal) makes debugging\n    // failing tests easier as we know it is not the actual interaction from the test that is the problem\n    implicit val ec: ExecutionContext = system.dispatcher\n    Future.sequence(refs.map(_ ? \"state\")).futureValue\n  }\n\n  def withTags(payload: Any, tags: String*) = Tagged(payload, Set(tags: _*))\n\n}\n\ntrait PostgresCleaner extends QueryTestSpec {\n\n  def clearPostgres(): Unit =\n    tables.foreach { name => withStatement(stmt => stmt.executeUpdate(s\"DELETE FROM $name\")) }\n\n  override def beforeAll(): Unit = {\n    dropAndCreate(Postgres)\n    super.beforeAll()\n  }\n\n  override def beforeEach(): Unit = {\n    dropAndCreate(Postgres)\n    super.beforeEach()\n  }\n}\n\ntrait MysqlCleaner extends QueryTestSpec {\n\n  def clearMySQL(): Unit = {\n    withStatement { stmt =>\n      stmt.execute(\"SET FOREIGN_KEY_CHECKS = 0\")\n      tables.foreach { name => stmt.executeUpdate(s\"TRUNCATE $name\") }\n      stmt.execute(\"SET FOREIGN_KEY_CHECKS = 1\")\n    }\n  }\n\n  override def beforeAll(): Unit = {\n    dropAndCreate(MySQL)\n    super.beforeAll()\n  }\n\n  override def beforeEach(): Unit = {\n    clearMySQL()\n    super.beforeEach()\n  }\n}\n\ntrait OracleCleaner extends QueryTestSpec {\n\n  def clearOracle(): Unit = {\n    tables.foreach { name =>\n      withStatement(stmt => stmt.executeUpdate(s\"\"\"DELETE FROM \"$name\" \"\"\"))\n    }\n    withStatement(stmt => stmt.executeUpdate(\"\"\"BEGIN \"reset_sequence\"; END; \"\"\"))\n  }\n\n  override def beforeAll(): Unit = {\n    dropAndCreate(Oracle)\n    super.beforeAll()\n  }\n\n  override def beforeEach(): Unit = {\n    clearOracle()\n    super.beforeEach()\n  }\n}\n\ntrait SqlServerCleaner extends QueryTestSpec {\n\n  var initial = true\n\n  def clearSqlServer(): Unit = {\n    val reset = if (initial) {\n      initial = false\n      1\n    } else {\n      0\n    }\n    withStatement { stmt =>\n      tables.foreach { name => stmt.executeUpdate(s\"DELETE FROM $name\") }\n      stmt.executeUpdate(s\"DBCC CHECKIDENT('${journalTableName}', RESEED, $reset)\")\n    }\n  }\n\n  override def beforeAll(): Unit = {\n    dropAndCreate(SqlServer)\n    super.beforeAll()\n  }\n\n  override def afterAll(): Unit = {\n    dropAndCreate(SqlServer)\n    super.afterAll()\n  }\n\n  override def beforeEach(): Unit = {\n    clearSqlServer()\n    super.beforeEach()\n  }\n}\n\ntrait H2Cleaner extends QueryTestSpec {\n\n  def clearH2(): Unit =\n    tables.foreach { name => withStatement(stmt => stmt.executeUpdate(s\"DELETE FROM $name\")) }\n\n  override def beforeEach(): Unit = {\n    dropAndCreate(H2)\n    super.beforeEach()\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/TaggingEventAdapter.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query\n\nimport akka.persistence.jdbc.query.TaggingEventAdapter.TagEvent\nimport akka.persistence.journal.{ Tagged, WriteEventAdapter }\n\nobject TaggingEventAdapter {\n  case class TagEvent(payload: Any, tags: Set[String])\n}\n\n/**\n * The TaggingEventAdapter will instruct persistence\n * to tag the received event.\n */\nclass TaggingEventAdapter extends WriteEventAdapter {\n  override def manifest(event: Any): String = \"\"\n\n  override def toJournal(event: Any): Any =\n    event match {\n      case TagEvent(payload, tags) =>\n        Tagged(payload, tags)\n      case _ => event\n    }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/dao/ReadJournalTablesTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query.dao.legacy\n\nimport akka.persistence.jdbc.TablesTestSpec\nimport akka.persistence.jdbc.journal.dao.legacy.JournalTables\nimport slick.jdbc.JdbcProfile\n\nclass ReadJournalTablesTest extends TablesTestSpec {\n  val readJournalTableConfiguration = readJournalConfig.journalTableConfiguration\n\n  object TestByteAReadJournalTables extends JournalTables {\n    override val profile: JdbcProfile = slick.jdbc.PostgresProfile\n    override val journalTableCfg = readJournalTableConfiguration\n  }\n\n  \"JournalTable\" should \"be configured with a schema name\" in {\n    TestByteAReadJournalTables.JournalTable.baseTableRow.schemaName shouldBe readJournalTableConfiguration.schemaName\n  }\n\n  it should \"be configured with a table name\" in {\n    TestByteAReadJournalTables.JournalTable.baseTableRow.tableName shouldBe readJournalTableConfiguration.tableName\n  }\n\n  it should \"be configured with column names\" in {\n    val colName = toColumnName(readJournalTableConfiguration.tableName)(_)\n    TestByteAReadJournalTables.JournalTable.baseTableRow.persistenceId.toString shouldBe colName(\n      readJournalTableConfiguration.columnNames.persistenceId)\n    TestByteAReadJournalTables.JournalTable.baseTableRow.sequenceNumber.toString shouldBe colName(\n      readJournalTableConfiguration.columnNames.sequenceNumber)\n    //    TestByteAJournalTables.JournalTable.baseTableRow.tags.toString() shouldBe colName(journalTableConfiguration.columnNames.tags)\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/query/dao/TestProbeReadJournalDao.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.query.dao\n\nimport akka.NotUsed\nimport akka.persistence.jdbc.query.dao.TestProbeReadJournalDao.JournalSequence\nimport akka.persistence.PersistentRepr\nimport akka.stream.scaladsl.Source\nimport akka.testkit.TestProbe\nimport akka.util.Timeout\nimport akka.pattern.ask\n\nimport scala.concurrent.Future\nimport scala.concurrent.duration._\nimport scala.util.Try\nimport akka.actor.Scheduler\n\nobject TestProbeReadJournalDao {\n  case class JournalSequence(offset: Long, limit: Long)\n}\n\n/**\n * Read journal dao where the journalSequence query is backed by a testprobe\n */\nclass TestProbeReadJournalDao(val probe: TestProbe) extends ReadJournalDao {\n  // Since the testprobe is instrumented by the test, it should respond very fast\n  implicit val askTimeout: Timeout = Timeout(100.millis)\n\n  /**\n   * Returns distinct stream of persistenceIds\n   */\n  override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] = ???\n\n  /**\n   * Returns a Source of bytes for certain tag from an offset. The result is sorted by\n   * created time asc thus the offset is relative to the creation time\n   */\n  override def eventsByTag(\n      tag: String,\n      offset: Long,\n      maxOffset: Long,\n      max: Long): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = ???\n\n  /**\n   * Returns a Source of bytes for a certain persistenceId\n   */\n  override def messages(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long,\n      max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = ???\n\n  override def messagesWithBatch(\n      persistenceId: String,\n      fromSequenceNr: Long,\n      toSequenceNr: Long,\n      batchSize: Int,\n      refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[Try[(PersistentRepr, Long)], NotUsed] = ???\n\n  /**\n   * @param offset Minimum value to retrieve\n   * @param limit  Maximum number of values to retrieve\n   * @return A Source of journal event sequence numbers (corresponding to the Ordering column)\n   */\n  override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] = {\n    val f = probe.ref.ask(JournalSequence(offset, limit)).mapTo[scala.collection.immutable.Seq[Long]]\n    Source.future(f).mapConcat(identity)\n  }\n\n  /**\n   * @return The value of the maximum (ordering) id in the journal\n   */\n  override def maxJournalSequence(): Future[Long] = Future.successful(0)\n\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/serialization/StoreOnlySerializableMessagesTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.serialization\n\nimport scala.concurrent.duration._\n\nimport akka.actor.ActorRef\nimport akka.actor.Props\nimport akka.event.LoggingReceive\nimport akka.persistence.PersistentActor\nimport akka.persistence.RecoveryCompleted\nimport akka.persistence.jdbc.SharedActorSystemTestSpec\nimport akka.persistence.jdbc.testkit.internal.H2\nimport akka.persistence.jdbc.testkit.internal.SchemaType\nimport akka.testkit.TestProbe\n\nabstract class StoreOnlySerializableMessagesTest(config: String, schemaType: SchemaType)\n    extends SharedActorSystemTestSpec(config) {\n  case class PersistFailure(cause: Throwable, event: Any, seqNr: Long)\n  case class PersistRejected(cause: Throwable, event: Any, seqNr: Long)\n\n  class TestActor(\n      val persistenceId: String,\n      recoverProbe: ActorRef,\n      persistFailureProbe: ActorRef,\n      persistRejectedProbe: ActorRef)\n      extends PersistentActor {\n    override val receiveRecover: Receive = LoggingReceive { case msg =>\n      recoverProbe ! msg\n    }\n\n    override val receiveCommand: Receive = LoggingReceive { case msg =>\n      persist(msg) { _ =>\n        sender() ! akka.actor.Status.Success(\"\")\n      }\n    }\n\n    override protected def onPersistFailure(cause: Throwable, event: Any, seqNr: Long): Unit =\n      persistFailureProbe ! PersistFailure(cause, event, seqNr)\n\n    override protected def onPersistRejected(cause: Throwable, event: Any, seqNr: Long): Unit =\n      persistRejectedProbe ! PersistRejected(cause, event, seqNr)\n  }\n\n  def withActor(id: String = \"1\")(f: ActorRef => TestProbe => TestProbe => TestProbe => Unit): Unit = {\n    val recoverProbe = TestProbe()\n    val persistFailureProbe = TestProbe()\n    val persistRejectedProbe = TestProbe()\n    val persistentActor = system.actorOf(\n      Props(new TestActor(s\"my-$id\", recoverProbe.ref, persistFailureProbe.ref, persistRejectedProbe.ref)))\n    try f(persistentActor)(recoverProbe)(persistFailureProbe)(persistRejectedProbe)\n    finally killActors(persistentActor)\n  }\n\n  override def beforeAll(): Unit = {\n    dropAndCreate(schemaType)\n    super.beforeAll()\n  }\n\n  it should \"persist a single serializable message\" in {\n    withActor(\"1\") { actor => recover => failure => rejected =>\n      val tp = TestProbe()\n      recover.expectMsg(RecoveryCompleted)\n      tp.send(actor, \"foo\") // strings are serializable\n      tp.expectMsg(akka.actor.Status.Success(\"\"))\n      failure.expectNoMessage(100.millis)\n      rejected.expectNoMessage(100.millis)\n    }\n\n    // the recover cycle\n    withActor(\"1\") { _ => recover => failure => rejected =>\n      recover.expectMsg(\"foo\")\n      recover.expectMsg(RecoveryCompleted)\n      failure.expectNoMessage(100.millis)\n      rejected.expectNoMessage(100.millis)\n    }\n  }\n\n  it should \"not persist a single non-serializable message\" in {\n    class NotSerializable\n    withActor(\"2\") { actor => recover => _ => rejected =>\n      val tp = TestProbe()\n      recover.expectMsg(RecoveryCompleted)\n      tp.send(actor, new NotSerializable) // the NotSerializable class cannot be serialized\n      tp.expectNoMessage(300.millis) // the handler should not have been called, because persist has failed\n      // the actor should call the OnPersistRejected\n      rejected.expectMsgPF() { case PersistRejected(_, _, _) =>\n      }\n    }\n\n    // the recover cycle, no message should be recovered\n    withActor(\"2\") { _ => recover => _ => _ =>\n      recover.expectMsg(RecoveryCompleted)\n      recover.expectNoMessage(100.millis)\n    }\n  }\n\n  it should \"persist only serializable messages\" in {\n    class NotSerializable\n    withActor(\"3\") { actor => recover => _ => rejected =>\n      val tp = TestProbe()\n      recover.expectMsg(RecoveryCompleted)\n      tp.send(actor, \"foo\")\n      tp.expectMsg(akka.actor.Status.Success(\"\"))\n      tp.send(actor, new NotSerializable) // the NotSerializable class cannot be serialized\n      tp.expectNoMessage(300.millis) // the handler should not have been called, because persist has failed\n      // the actor should call the OnPersistRejected\n      rejected.expectMsgPF() { case PersistRejected(_, _, _) =>\n      }\n      rejected.expectNoMessage(100.millis)\n    }\n\n    // recover cycle\n    withActor(\"3\") { _ => recover => failure => rejected =>\n      recover.expectMsg(\"foo\")\n      recover.expectMsg(RecoveryCompleted)\n      failure.expectNoMessage(100.millis)\n      rejected.expectNoMessage(100.millis)\n    }\n  }\n}\n\nclass H2StoreOnlySerializableMessagesTest extends StoreOnlySerializableMessagesTest(\"h2-application.conf\", H2)\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/snapshot/JdbcSnapshotStoreSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot\n\nimport akka.persistence.CapabilityFlag\nimport akka.persistence.jdbc.config._\nimport akka.persistence.jdbc.util.{ ClasspathResources, DropCreate }\nimport akka.persistence.jdbc.db.SlickDatabase\nimport akka.persistence.snapshot.SnapshotStoreSpec\nimport com.typesafe.config.{ Config, ConfigFactory }\nimport org.scalatest.BeforeAndAfterAll\nimport org.scalatest.concurrent.ScalaFutures\nimport scala.concurrent.duration._\n\nimport akka.persistence.jdbc.testkit.internal.H2\nimport akka.persistence.jdbc.testkit.internal.SchemaType\n\nimport scala.concurrent.ExecutionContext\n\nabstract class JdbcSnapshotStoreSpec(config: Config, schemaType: SchemaType)\n    extends SnapshotStoreSpec(config)\n    with BeforeAndAfterAll\n    with ScalaFutures\n    with ClasspathResources\n    with DropCreate {\n  implicit val pc: PatienceConfig = PatienceConfig(timeout = 10.seconds)\n\n  implicit lazy val ec: ExecutionContext = system.dispatcher\n\n  lazy val cfg = system.settings.config.getConfig(\"jdbc-journal\")\n\n  lazy val journalConfig = new JournalConfig(cfg)\n\n  lazy val db = SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig(\"slick\")), \"slick.db\")\n\n  protected override def supportsSerialization: CapabilityFlag = newDao\n  protected override def supportsMetadata: CapabilityFlag = newDao\n\n  override def beforeAll(): Unit = {\n    dropAndCreate(schemaType)\n    super.beforeAll()\n  }\n\n  override def afterAll(): Unit = {\n    db.close()\n  }\n}\n\nclass H2SnapshotStoreSpec extends JdbcSnapshotStoreSpec(ConfigFactory.load(\"h2-application.conf\"), H2)\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/snapshot/dao/legacy/SnapshotTablesTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.snapshot.dao.legacy\n\nimport akka.persistence.jdbc.TablesTestSpec\nimport slick.jdbc.JdbcProfile\n\nclass SnapshotTablesTest extends TablesTestSpec {\n  val snapshotTableConfiguration = snapshotConfig.legacySnapshotTableConfiguration\n  object TestByteASnapshotTables extends SnapshotTables {\n    override val profile: JdbcProfile = slick.jdbc.PostgresProfile\n    override val snapshotTableCfg = snapshotTableConfiguration\n  }\n\n  \"SnapshotTable\" should \"be configured with a schema name\" in {\n    TestByteASnapshotTables.SnapshotTable.baseTableRow.schemaName shouldBe snapshotTableConfiguration.schemaName\n  }\n\n  it should \"be configured with a table name\" in {\n    TestByteASnapshotTables.SnapshotTable.baseTableRow.tableName shouldBe snapshotTableConfiguration.tableName\n  }\n\n  it should \"be configured with column names\" in {\n    val colName = toColumnName(snapshotTableConfiguration.tableName)(_)\n    TestByteASnapshotTables.SnapshotTable.baseTableRow.persistenceId.toString shouldBe colName(\n      snapshotTableConfiguration.columnNames.persistenceId)\n    TestByteASnapshotTables.SnapshotTable.baseTableRow.sequenceNumber.toString shouldBe colName(\n      snapshotTableConfiguration.columnNames.sequenceNumber)\n    TestByteASnapshotTables.SnapshotTable.baseTableRow.created.toString shouldBe colName(\n      snapshotTableConfiguration.columnNames.created)\n    TestByteASnapshotTables.SnapshotTable.baseTableRow.snapshot.toString shouldBe colName(\n      snapshotTableConfiguration.columnNames.snapshot)\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/state/Payloads.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state\n\nimport akka.serialization._\n\nfinal case class MyPayload(data: String)\n\nclass MyPayloadSerializer extends Serializer {\n  val MyPayloadClass = classOf[MyPayload]\n\n  def identifier: Int = 77123\n  def includeManifest: Boolean = true\n\n  def toBinary(o: AnyRef): Array[Byte] = o match {\n    case MyPayload(data) => s\"${data}\".getBytes(\"UTF-8\")\n    case _               => throw new Exception(\"Unknown object for serialization\")\n  }\n\n  def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = manifest match {\n    case Some(MyPayloadClass) => MyPayload(s\"${new String(bytes, \"UTF-8\")}\")\n    case Some(c)              => throw new Exception(s\"unexpected manifest ${c}\")\n    case None                 => throw new Exception(\"no manifest\")\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state\n\nimport scala.concurrent.Future\nimport akka.actor.ActorSystem\nimport akka.Done\nimport org.scalatest.concurrent.ScalaFutures\nimport org.scalatest.matchers.should.Matchers\n\nimport scala.annotation.nowarn\n\n@nowarn(\"msg=never used\")\nobject ScaladslSnippets extends ScalaFutures with Matchers {\n\n  def create(): Unit = {\n    // #create\n    import akka.persistence.jdbc.testkit.scaladsl.SchemaUtils\n\n    implicit val system: ActorSystem = ActorSystem(\"example\")\n    val _: Future[Done] = SchemaUtils.createIfNotExists()\n    // #create\n  }\n\n  def durableStatePlugin(): Unit = {\n    implicit val system: ActorSystem = ActorSystem()\n\n    // #jdbc-durable-state-store\n    import akka.persistence.state.DurableStateStoreRegistry\n    import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore\n    val store = DurableStateStoreRegistry\n      .get(system)\n      .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)\n    // #jdbc-durable-state-store\n  }\n\n  def getObject(): Unit = {\n    implicit val system: ActorSystem = ActorSystem()\n\n    // #get-object\n    import akka.persistence.state.DurableStateStoreRegistry\n    import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore\n    import akka.persistence.state.scaladsl.GetObjectResult\n\n    val store = DurableStateStoreRegistry\n      .get(system)\n      .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)\n\n    val futureResult: Future[GetObjectResult[String]] = store.getObject(\"InvalidPersistenceId\")\n    futureResult.futureValue.value shouldBe None\n    // #get-object\n  }\n\n  def upsertAndGetObject(): Unit = {\n    implicit val system: ActorSystem = ActorSystem()\n    implicit val e = system.dispatcher\n\n    // #upsert-get-object\n    import akka.persistence.state.DurableStateStoreRegistry\n    import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore\n    import akka.persistence.state.scaladsl.GetObjectResult\n\n    val store = DurableStateStoreRegistry\n      .get(system)\n      .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)\n\n    val v: Future[GetObjectResult[String]] =\n      for {\n        n <- store.upsertObject(\"p234\", 1, \"a valid string\", \"t123\")\n        _ = n shouldBe akka.Done\n        g <- store.getObject(\"p234\")\n        _ = g.value shouldBe Some(\"a valid string\")\n        u <- store.upsertObject(\"p234\", 2, \"updated valid string\", \"t123\")\n        _ = u shouldBe akka.Done\n        h <- store.getObject(\"p234\")\n      } yield h\n\n    v.futureValue.value shouldBe Some(\"updated valid string\")\n    // #upsert-get-object\n  }\n\n  def deleteObject(): Unit = {\n    implicit val system: ActorSystem = ActorSystem()\n\n    // #delete-object\n    import akka.persistence.state.DurableStateStoreRegistry\n    import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore\n\n    val store = DurableStateStoreRegistry\n      .get(system)\n      .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)\n\n    store.deleteObject(\"p123\", 0L).futureValue shouldBe Done\n    store.getObject(\"p123\").futureValue.value shouldBe None\n    // #delete-object\n  }\n\n  def currentChanges(): Unit = {\n    implicit val system: ActorSystem = ActorSystem()\n\n    // #current-changes\n    import akka.NotUsed\n    import akka.stream.scaladsl.Source\n    import akka.persistence.state.DurableStateStoreRegistry\n    import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore\n    import akka.persistence.query.{ DurableStateChange, NoOffset }\n\n    val store = DurableStateStoreRegistry\n      .get(system)\n      .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)\n\n    val willCompleteTheStream: Source[DurableStateChange[String], NotUsed] =\n      store.currentChanges(\"tag-1\", NoOffset)\n    // #current-changes\n  }\n\n  def changes(): Unit = {\n    implicit val system: ActorSystem = ActorSystem()\n\n    // #changes\n    import akka.NotUsed\n    import akka.stream.scaladsl.Source\n    import akka.persistence.state.DurableStateStoreRegistry\n    import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore\n    import akka.persistence.query.{ DurableStateChange, NoOffset }\n\n    val store = DurableStateStoreRegistry\n      .get(system)\n      .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)\n\n    val willNotCompleteTheStream: Source[DurableStateChange[String], NotUsed] =\n      store.changes(\"tag-1\", NoOffset)\n    // #changes\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/DataGenerationHelper.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state.scaladsl\n\nimport org.scalatest.concurrent.ScalaFutures\nimport scala.concurrent.{ ExecutionContext, Future }\n\ntrait DataGenerationHelper extends ScalaFutures {\n\n  implicit def defaultPatience: PatienceConfig\n\n  // upsert multiple records for 1 persistence id\n  def upsertManyForOnePersistenceId(\n      store: JdbcDurableStateStore[String],\n      persistenceId: String,\n      tag: String,\n      startIndex: Int,\n      n: Int) = {\n    (startIndex until startIndex + n).map { c =>\n      store.upsertObject(persistenceId, c, s\"$c valid string\", tag).futureValue\n    }\n  }\n\n  // upsert multiple records for 1 persistence id\n  def upsertForManyDifferentPersistenceIds(\n      store: JdbcDurableStateStore[String],\n      persistenceIdPrefix: String,\n      revision: Int,\n      tag: String,\n      startIndex: Int,\n      n: Int) = {\n    (startIndex until startIndex + n).map { c =>\n      store.upsertObject(s\"$persistenceIdPrefix-$c\", revision, s\"$c valid string\", tag).futureValue\n    }\n  }\n\n  private def times(n: Int, ls: List[String]) = ls.flatMap { List.fill(n)(_) }\n\n  // upsert multiple records for a random shuffle of a list of persistence ids\n  def upsertRandomlyShuffledPersistenceIds(\n      store: JdbcDurableStateStore[String],\n      persistenceIds: List[String],\n      tag: String,\n      replicationFactor: Int) = {\n    val allPersistenceIds = scala.util.Random.shuffle(times(replicationFactor, persistenceIds))\n    val m = collection.mutable.Map.empty[String, Long]\n    allPersistenceIds.map { p =>\n      m.get(p)\n        .fold {\n          val _ = store.upsertObject(p, 1, s\"1 valid string\", tag).futureValue\n          m += ((p, 1))\n        } { seq =>\n          {\n            val _ = store.upsertObject(p, seq + 1, s\"${seq + 1} valid string\", tag).futureValue\n            m += ((p, seq + 1))\n          }\n        }\n    }\n  }\n\n  def upsertParallel(store: JdbcDurableStateStore[String], pids: Set[String], tag: String, noOfItems: Int)(\n      implicit ec: ExecutionContext) = {\n\n    for {\n      _ <- Future.unit\n      f1 = Future(upsertManyForOnePersistenceId(store, pids.head, tag, 1, noOfItems))\n      f2 = Future(upsertManyForOnePersistenceId(store, pids.tail.head, tag, 1, noOfItems))\n      f3 = Future(upsertManyForOnePersistenceId(store, pids.last, tag, 1, noOfItems))\n      _ <- f1\n      _ <- f2\n      _ <- f3\n    } yield (())\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/DurableStateSequenceActorTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state.scaladsl\n\nimport scala.concurrent.Future\nimport scala.concurrent.duration._\n\nimport com.typesafe.config.{ Config, ConfigFactory }\nimport akka.actor.{ ActorRef, ActorSystem, ExtendedActorSystem }\nimport akka.pattern.ask\nimport akka.persistence.jdbc.SharedActorSystemTestSpec\nimport akka.persistence.jdbc.state.scaladsl.DurableStateSequenceActor.VisitedElement\nimport akka.persistence.jdbc.state.scaladsl.DurableStateSequenceActor.{ GetMaxGlobalOffset, MaxGlobalOffset }\nimport akka.persistence.jdbc.testkit.internal.{ H2, SchemaType }\nimport akka.testkit.TestProbe\nimport akka.util.Timeout\nimport org.scalatest.concurrent.Eventually\n\nabstract class DurableStateSequenceActorTest(config: Config, schemaType: SchemaType)\n    extends StateSpecBase(config, schemaType)\n    with DataGenerationHelper\n    with Eventually {\n\n  val durableStateSequenceActorConfig = durableStateConfig.stateSequenceConfig\n\n  implicit val askTimeout: FiniteDuration = 50.millis\n  implicit val timeout: Timeout = Timeout(1.minute)\n\n  \"A DurableStateSequenceActor\" must {\n    \"recover normally\" in {\n      withActorSystem { implicit system =>\n        val store =\n          new JdbcDurableStateStore[String](\n            JdbcDurableStateStore.Identifier,\n            db,\n            schemaTypeToProfile(schemaType),\n            durableStateConfig,\n            serialization)\n        upsertForManyDifferentPersistenceIds(store, \"pid\", 1, \"t1\", 1, 400).size shouldBe 400\n\n        withDurableStateSequenceActor(store, maxTries = 100) { actor =>\n          eventually {\n            actor.ask(GetMaxGlobalOffset).mapTo[MaxGlobalOffset].futureValue shouldBe MaxGlobalOffset(400)\n          }\n        }\n      }\n    }\n  }\n\n  /**\n   * @param maxTries The number of tries before events are assumed missing\n   *                 (since the actor queries every second by default,\n   *                 this is effectively the number of seconds after which events are assumed missing)\n   */\n  def withDurableStateSequenceActor(store: JdbcDurableStateStore[String], maxTries: Int)(f: ActorRef => Unit)(\n      implicit system: ActorSystem): Unit = {\n    val actor =\n      system.actorOf(DurableStateSequenceActor.props(store, durableStateSequenceActorConfig.copy(maxTries = maxTries)))\n    try f(actor)\n    finally system.stop(actor)\n  }\n}\n\nclass MockDurableStateSequenceActorTest extends SharedActorSystemTestSpec {\n  def fetchMaxGlobalOffset(durableStateSequenceActor: ActorRef): Future[Long] = {\n    durableStateSequenceActor.ask(GetMaxGlobalOffset)(3.seconds).mapTo[MaxGlobalOffset].map(_.maxOffset)\n  }\n\n  it should \"re-query with delay only when events are missing\" in {\n    val batchSize = 100\n    val maxTries = 5\n    val queryDelay = 300.millis\n\n    val almostQueryDelay = queryDelay - 50.millis\n    val almostImmediately = 50.millis\n    withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, _) =>\n      daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize))\n      val firstBatch = ((1L to 40L) ++ (51L to 110L)).map(n => VisitedElement(s\"pid-$n\", n, 1))\n      daoProbe.reply(firstBatch)\n      withClue(s\"when events are missing, the actor should wait for $queryDelay before querying again\") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(almostQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(40, batchSize))\n      }\n      // number 41 is still missing after this batch\n      val secondBatch = (42L to 110L).map(n => VisitedElement(s\"pid-$n\", n, 1))\n      daoProbe.reply(secondBatch)\n      withClue(s\"when events are missing, the actor should wait for $queryDelay before querying again\") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(almostQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(40, batchSize))\n      }\n      val thirdBatch = (41L to 110L).map(n => VisitedElement(s\"pid-$n\", n, 1))\n      daoProbe.reply(thirdBatch)\n      withClue(\n        s\"when no more events are missing, but less that batchSize elemens have been received, \" +\n        s\"the actor should wait for $queryDelay before querying again\") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(almostQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(110, batchSize))\n      }\n\n      val fourthBatch = (111L to 210L).map(n => VisitedElement(s\"pid-$n\", n, 1))\n      daoProbe.reply(fourthBatch)\n      withClue(\n        \"When no more events are missing and the number of events received is equal to batchSize, \" +\n        \"the actor should query again immediately\") {\n        daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(210, batchSize))\n      }\n\n      // Reply to prevent a dead letter warning on the timeout\n      daoProbe.reply(Seq.empty)\n      daoProbe.expectNoMessage(almostImmediately)\n    }\n  }\n\n  it should \"Assume an element missing after the configured amount of maxTries\" in {\n    val batchSize = 100\n    val maxTries = 5\n    val queryDelay = 300.millis\n\n    val slightlyMoreThanQueryDelay = queryDelay + 100.millis\n    val almostImmediately = 50.millis\n\n    val allIds = ((1L to 40L) ++ (43L to 200L)).map(n => VisitedElement(s\"pid-$n\", n, 1))\n\n    withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, actor) =>\n      daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize))\n      daoProbe.reply(allIds.take(100))\n\n      val idsLargerThan40 = allIds.dropWhile(_.offset <= 40)\n      val retryResponse = idsLargerThan40.take(100)\n      for (i <- 1 to maxTries) withClue(s\"should retry $maxTries times (attempt $i)\") {\n        daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(40, batchSize))\n        daoProbe.reply(retryResponse)\n      }\n\n      // sanity check\n      retryResponse.last.offset shouldBe 142\n      withClue(\n        \"The elements 41 and 42 should be assumed missing, \" +\n        \"the actor should query again immediately since a full batch has been received\") {\n        daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(142, batchSize))\n        fetchMaxGlobalOffset(actor).futureValue shouldBe 142\n      }\n\n      // Reply to prevent a dead letter warning on the timeout\n      daoProbe.reply(Seq.empty)\n      daoProbe.expectNoMessage(almostImmediately)\n    }\n  }\n\n  it should \"not delay several updates to known pid\" in {\n    val batchSize = 7\n    val maxTries = 5\n    val queryDelay = 300.millis\n    import DurableStateSequenceActor.VisitedElement\n\n    val almostQueryDelay = queryDelay - 50.millis\n    val almostImmediately = 50.millis\n\n    withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, actor) =>\n      daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize))\n\n      val firstBatch = List(VisitedElement(\"p1\", 1, 1), VisitedElement(\"p2\", 2, 1), VisitedElement(\"p3\", 3, 1))\n\n      daoProbe.reply(firstBatch)\n      withClue(s\"when offsets are not missing \") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(queryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(3, batchSize))\n        fetchMaxGlobalOffset(actor).futureValue shouldBe 3\n      }\n\n      // two updates to p3\n      val secondBatch = List(VisitedElement(\"p1\", 4, 2), VisitedElement(\"p2\", 5, 2), VisitedElement(\"p3\", 7, 3))\n\n      daoProbe.reply(secondBatch)\n      withClue(s\"when several updates to known pid \") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(queryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(7, batchSize))\n        fetchMaxGlobalOffset(actor).futureValue shouldBe 7\n      }\n\n      // five updates to p2 and three to p3\n      val thirdBatch = List(VisitedElement(\"p1\", 8, 3), VisitedElement(\"p2\", 13, 7), VisitedElement(\"p3\", 16, 6))\n\n      daoProbe.reply(thirdBatch)\n      withClue(s\"when several updates to known pid \") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(queryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(16, batchSize))\n        fetchMaxGlobalOffset(actor).futureValue shouldBe 16\n      }\n\n      // Reply to prevent a dead letter warning on the timeout\n      daoProbe.reply(Seq.empty)\n      daoProbe.expectNoMessage(almostImmediately)\n    }\n  }\n\n  it should \"not delay more complex updates from several pids\" in {\n    val batchSize = 7\n    val maxTries = 5\n    val queryDelay = 300.millis\n    import DurableStateSequenceActor.VisitedElement\n\n    val almostQueryDelay = queryDelay - 50.millis\n    val almostImmediately = 50.millis\n    val slightlyMoreThanQueryDelay = queryDelay + 100.millis\n\n    withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, actor) =>\n      daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize))\n\n      val firstBatch = List(VisitedElement(\"p1\", 1, 1), VisitedElement(\"p2\", 2, 1), VisitedElement(\"p3\", 3, 1))\n\n      daoProbe.reply(firstBatch)\n      withClue(s\"when offsets are not missing \") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(queryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(3, batchSize))\n        fetchMaxGlobalOffset(actor).futureValue shouldBe 3\n      }\n\n      // updates like this:\n      // p1, 4, 2 <<<\n      // p2, 5, 2\n      // p2, 6, 3\n      // p2, 7, 4\n      // p3, 8, 2\n      // p3, 9, 3\n      // p3, 10, 4 <<<\n      // p2, 11, 5 <<<\n      val secondBatch = List(VisitedElement(\"p1\", 4, 2), VisitedElement(\"p3\", 10, 4), VisitedElement(\"p2\", 11, 5))\n\n      daoProbe.reply(secondBatch)\n      daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(11, batchSize))\n      fetchMaxGlobalOffset(actor).futureValue shouldBe 11\n\n      // Reply to prevent a dead letter warning on the timeout\n      daoProbe.reply(Seq.empty)\n      daoProbe.expectNoMessage(almostImmediately)\n    }\n  }\n\n  it should \"re-query for unknown pid\" in {\n    val batchSize = 7\n    val maxTries = 5\n    val queryDelay = 300.millis\n    import DurableStateSequenceActor.VisitedElement\n\n    val almostQueryDelay = queryDelay - 50.millis\n    val almostImmediately = 50.millis\n    val slightlyMoreThanQueryDelay = queryDelay + 100.millis\n\n    withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay) { (daoProbe, actor) =>\n      daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize))\n\n      val firstBatch = List(VisitedElement(\"p1\", 1, 1))\n\n      daoProbe.reply(firstBatch)\n      withClue(s\"when offsets are not missing \") {\n        daoProbe.expectNoMessage(almostQueryDelay)\n        daoProbe.expectMsg(queryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(1, batchSize))\n        fetchMaxGlobalOffset(actor).futureValue shouldBe 1\n      }\n\n      // updates like this:\n      // p1, 2, 2 <<<\n      // p2, 3, 2\n      // p2, 4, 3 <<<\n      // p3, 5, 2\n      // p3, 6, 3\n      // p3, 7, 4 <<<\n      val secondBatch = List(VisitedElement(\"p1\", 2, 2), VisitedElement(\"p2\", 4, 3), VisitedElement(\"p3\", 7, 4))\n\n      daoProbe.reply(secondBatch)\n      for (i <- 1 to maxTries) withClue(s\"when updates to unknown pid, attempt $i \") {\n        val expectedQueryOffset = 2 // p1 offset 2 is ok\n        daoProbe.expectMsg(\n          slightlyMoreThanQueryDelay,\n          TestProbeDurableStateStoreQuery.StateInfoSequence(expectedQueryOffset, batchSize))\n        daoProbe.reply(secondBatch)\n      }\n\n      daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(7, batchSize))\n      fetchMaxGlobalOffset(actor).futureValue shouldBe 7\n\n      // two updates for p2 and p3 but now they are known and gaps can be filled\n      val thirdBatch = List(VisitedElement(\"p2\", 9, 5), VisitedElement(\"p3\", 11, 6))\n      daoProbe.reply(thirdBatch)\n      daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(11, batchSize))\n      fetchMaxGlobalOffset(actor).futureValue shouldBe 11\n\n      // Reply to prevent a dead letter warning on the timeout\n      daoProbe.reply(Seq.empty)\n      daoProbe.expectNoMessage(almostImmediately)\n    }\n  }\n\n  it should \"evict revision cache when exceeding capacity\" in {\n    val batchSize = 100\n    val maxTries = 5\n    val queryDelay = 300.millis\n    import DurableStateSequenceActor.VisitedElement\n\n    val almostImmediately = 50.millis\n    val slightlyMoreThanQueryDelay = queryDelay + 100.millis\n\n    withTestProbeDurableStateSequenceActor(batchSize, maxTries, queryDelay, revisionCacheCapacity = 5) {\n      (daoProbe, actor) =>\n        daoProbe.expectMsg(almostImmediately, TestProbeDurableStateStoreQuery.StateInfoSequence(0, batchSize))\n\n        val firstBatch = List(\n          VisitedElement(\"p1\", 1, 1),\n          VisitedElement(\"p2\", 2, 1),\n          VisitedElement(\"p3\", 3, 1),\n          VisitedElement(\"p4\", 4, 1),\n          VisitedElement(\"p5\", 5, 1))\n        daoProbe.reply(firstBatch)\n        withClue(s\"when offsets are not missing \") {\n          daoProbe.expectMsg(\n            slightlyMoreThanQueryDelay,\n            TestProbeDurableStateStoreQuery.StateInfoSequence(5, batchSize))\n          fetchMaxGlobalOffset(actor).futureValue shouldBe 5\n        }\n\n        // pids in cache\n        val secondBatch = List(\n          VisitedElement(\"p1\", 7, 3),\n          VisitedElement(\"p2\", 9, 3),\n          VisitedElement(\"p3\", 11, 3),\n          VisitedElement(\"p4\", 13, 3))\n        daoProbe.reply(secondBatch)\n        withClue(s\"when offsets are not missing \") {\n          daoProbe.expectMsg(\n            slightlyMoreThanQueryDelay,\n            TestProbeDurableStateStoreQuery.StateInfoSequence(13, batchSize))\n          fetchMaxGlobalOffset(actor).futureValue shouldBe 13\n        }\n\n        // exceeding cache capacity of 5, p1, p2, p5 will be evicted because lowest offset\n        val thirdBatch = List(VisitedElement(\"p4\", 15, 5), VisitedElement(\"p6\", 16, 1), VisitedElement(\"p7\", 17, 1))\n        daoProbe.reply(thirdBatch)\n        withClue(s\"when offsets are not missing \") {\n          daoProbe.expectMsg(\n            slightlyMoreThanQueryDelay,\n            TestProbeDurableStateStoreQuery.StateInfoSequence(17, batchSize))\n          fetchMaxGlobalOffset(actor).futureValue shouldBe 17\n        }\n\n        // p1, p2, p5 were evicted because lowest offset\n        val fourthBatch = List(VisitedElement(\"p2\", 19, 5), VisitedElement(\"p1\", 21, 5), VisitedElement(\"p5\", 23, 3))\n        daoProbe.reply(fourthBatch)\n        for (i <- 1 to maxTries) withClue(s\"when updates to unknown pid, attempt $i \") {\n          daoProbe\n            .expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(17, batchSize))\n          daoProbe.reply(fourthBatch)\n        }\n\n        daoProbe.expectMsg(slightlyMoreThanQueryDelay, TestProbeDurableStateStoreQuery.StateInfoSequence(23, batchSize))\n        fetchMaxGlobalOffset(actor).futureValue shouldBe 23\n\n        // Reply to prevent a dead letter warning on the timeout\n        daoProbe.reply(Seq.empty)\n        daoProbe.expectNoMessage(almostImmediately)\n    }\n  }\n\n  import akka.persistence.jdbc.config.DurableStateTableConfiguration\n  def withTestProbeDurableStateSequenceActor(\n      batchSize: Int,\n      maxTries: Int,\n      queryDelay: FiniteDuration,\n      revisionCacheCapacity: Int = 10000)(f: (TestProbe, ActorRef) => Unit)(implicit system: ActorSystem): Unit = {\n    val testProbe = TestProbe()\n\n    val customConfig = ConfigFactory.parseString(s\"\"\"\n      jdbc-durable-state-store {\n        batchSize = $batchSize\n        refreshInterval = 500.milliseconds\n        durable-state-sequence-retrieval {\n          query-delay = $queryDelay\n          max-tries = $maxTries\n          max-backoff-query-delay = 4.seconds\n          ask-timeout = 100.millis\n          batch-size = $batchSize\n          revision-cache-capacity = $revisionCacheCapacity\n        }\n      }\n    \"\"\")\n\n    lazy val cfg = customConfig\n      .getConfig(\"jdbc-durable-state-store\")\n      .withFallback(system.settings.config.getConfig(\"jdbc-durable-state-store\"))\n      .withFallback(ConfigFactory.load(\"h2-application.conf\").getConfig(\"jdbc-durable-state-store\"))\n\n    val stateTableConfig = new DurableStateTableConfiguration(cfg)\n\n    val mockStore =\n      new TestProbeDurableStateStoreQuery(testProbe, db, slick.jdbc.H2Profile, stateTableConfig, serialization)(\n        system.asInstanceOf[ExtendedActorSystem])\n    val actor = system.actorOf(\n      DurableStateSequenceActor\n        .props(mockStore.asInstanceOf[JdbcDurableStateStore[String]], stateTableConfig.stateSequenceConfig))\n    try f(testProbe, actor)\n    finally system.stop(actor)\n  }\n}\n\nclass H2DurableStateSequenceActorTest\n    extends DurableStateSequenceActorTest(ConfigFactory.load(\"h2-application.conf\"), H2) {\n  implicit lazy val system: ActorSystem =\n    ActorSystem(\"test\", config.withFallback(customSerializers))\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/DurableStateStorePluginSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state.scaladsl\n\nimport com.typesafe.config.{ Config, ConfigFactory }\nimport akka.actor._\nimport org.scalatest.matchers.should.Matchers\nimport org.scalatest.wordspec.AnyWordSpecLike\nimport org.scalatest.BeforeAndAfterAll\nimport org.scalatest.concurrent.ScalaFutures\nimport akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore\nimport akka.persistence.state.DurableStateStoreRegistry\nimport slick.jdbc.{ H2Profile, JdbcProfile }\n\nabstract class DurableStateStorePluginSpec(config: Config, profile: JdbcProfile)\n    extends AnyWordSpecLike\n    with BeforeAndAfterAll\n    with Matchers\n    with ScalaFutures {\n\n  implicit lazy val system: ExtendedActorSystem =\n    ActorSystem(\"test\", config).asInstanceOf[ExtendedActorSystem]\n\n  \"A durable state store plugin\" must {\n    \"instantiate a JdbcDurableDataStore successfully\" in {\n      val store = DurableStateStoreRegistry\n        .get(system)\n        .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)\n\n      store shouldBe a[JdbcDurableStateStore[_]]\n      store.system.settings.config shouldBe system.settings.config\n      store.profile shouldBe profile\n    }\n\n    \"instantiate another JdbcDurableDataStore successfully\" in {\n      val store1 = DurableStateStoreRegistry\n        .get(system)\n        .durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)\n\n      val store2 = DurableStateStoreRegistry\n        .get(system)\n        .durableStateStoreFor[JdbcDurableStateStore[String]](\"another-jdbc-durable-state-store\")\n\n      store1.configPath shouldBe JdbcDurableStateStore.Identifier\n      store2.configPath shouldBe \"another-jdbc-durable-state-store\"\n    }\n  }\n\n  override def afterAll(): Unit = {\n    system.terminate().futureValue\n  }\n}\n\nclass H2DurableStateStorePluginSpec\n    extends DurableStateStorePluginSpec(ConfigFactory.load(\"h2-application.conf\"), H2Profile)\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/JdbcDurableStateSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state.scaladsl\n\nimport com.typesafe.config.{ Config, ConfigFactory }\nimport akka.actor._\nimport akka.persistence.jdbc.state.{ MyPayload, OffsetSyntax }\nimport OffsetSyntax._\nimport akka.persistence.jdbc.testkit.internal.{ H2, Postgres, SchemaType }\nimport akka.persistence.query.UpdatedDurableState\nimport akka.persistence.query.{ NoOffset, Offset, Sequence }\nimport akka.stream.scaladsl.Sink\nimport org.scalatest.time.Millis\nimport org.scalatest.time.Seconds\nimport org.scalatest.time.Span\n\nimport scala.annotation.nowarn\nimport scala.concurrent.Future\n\n@nowarn(\"msg=deprecated\")\nabstract class JdbcDurableStateSpec(config: Config, schemaType: SchemaType) extends StateSpecBase(config, schemaType) {\n\n  override implicit val defaultPatience: PatienceConfig =\n    PatienceConfig(timeout = Span(60, Seconds), interval = Span(100, Millis))\n\n  \"A durable state store\" must withActorSystem { implicit system =>\n    val stateStoreString =\n      new JdbcDurableStateStore[String](\n        JdbcDurableStateStore.Identifier,\n        db,\n        schemaTypeToProfile(schemaType),\n        durableStateConfig,\n        serialization)\n\n    \"not load a state given an invalid persistenceId\" in {\n      whenReady {\n        stateStoreString.getObject(\"InvalidPersistenceId\")\n      } { v =>\n        v.value shouldBe None\n      }\n    }\n    \"add a valid state successfully\" in {\n      whenReady {\n        stateStoreString.upsertObject(\"p123\", 1, \"a valid string\", \"t123\")\n      } { v =>\n        v shouldBe akka.Done\n      }\n    }\n    \"support composite upsert-fetch-repeat loop\" in {\n      whenReady {\n        for {\n\n          n <- stateStoreString.upsertObject(\"p234\", 1, \"a valid string\", \"t123\")\n          _ = n shouldBe akka.Done\n          g <- stateStoreString.getObject(\"p234\")\n          _ = g.value shouldBe Some(\"a valid string\")\n          u <- stateStoreString.upsertObject(\"p234\", 2, \"updated valid string\", \"t123\")\n          _ = u shouldBe akka.Done\n          h <- stateStoreString.getObject(\"p234\")\n\n        } yield h\n      } { v =>\n        v.value shouldBe Some(\"updated valid string\")\n      }\n    }\n    \"fail inserting an already existing sequence number\" in {\n      whenReady {\n        (for {\n\n          n <- stateStoreString.upsertObject(\"p345\", 1, \"a valid string\", \"t123\")\n          _ = n shouldBe akka.Done\n          g <- stateStoreString.getObject(\"p345\")\n          _ = g.value shouldBe Some(\"a valid string\")\n          u <- stateStoreString.upsertObject(\"p345\", 1, \"updated valid string\", \"t123\")\n\n        } yield u).failed\n      } { e =>\n        schemaType match {\n          case H2 =>\n            e shouldBe an[org.h2.jdbc.JdbcSQLIntegrityConstraintViolationException]\n          case Postgres =>\n            e shouldBe an[org.postgresql.util.PSQLException]\n          case _ => ???\n        }\n      }\n    }\n    \"fail inserting incorrect sequence number with 0 rows affected\" in {\n      whenReady {\n        stateStoreString.upsertObject(\"p234\", 1, \"1 valid string\", \"t1\").futureValue\n        stateStoreString.upsertObject(\"p234\", 2, \"2 valid string\", \"t1\").futureValue\n        stateStoreString.upsertObject(\"p234\", 3, \"3 valid string\", \"t1\").futureValue\n        stateStoreString.upsertObject(\"p234\", 5, \"5 valid string\", \"t1\").failed\n      } { e =>\n        e shouldBe an[IllegalStateException]\n        // offset should not change\n        stateStoreString.maxStateStoreOffset().futureValue shouldBe 3\n        // sequence number should not change\n        stateStoreString.getObject(\"p234\").futureValue.revision shouldBe 3\n      }\n    }\n    \"delete an existing state\" in {\n      whenReady {\n        stateStoreString.deleteObject(\"p123\")\n      } { v =>\n        v shouldBe akka.Done\n        whenReady {\n          stateStoreString.getObject(\"p123\")\n        } { v =>\n          v.value shouldBe None\n        }\n      }\n    }\n  }\n\n  \"A durable state store with payload that needs custom serializer\" must withActorSystem { implicit system =>\n    val stateStorePayload =\n      new JdbcDurableStateStore[MyPayload](\n        JdbcDurableStateStore.Identifier,\n        db,\n        schemaTypeToProfile(schemaType),\n        durableStateConfig,\n        serialization)\n\n    \"not load a state given an invalid persistenceId\" in {\n      whenReady {\n        stateStorePayload.getObject(\"InvalidPersistenceId\")\n      } { v =>\n        v.value shouldBe None\n      }\n    }\n    \"add a valid state successfully\" in {\n      whenReady {\n        stateStorePayload.upsertObject(\"p123\", 1, MyPayload(\"a valid string\"), \"t123\")\n      } { v =>\n        v shouldBe akka.Done\n      }\n    }\n    \"support composite upsert-fetch-repeat loop\" in {\n      whenReady {\n        for {\n\n          n <- stateStorePayload.upsertObject(\"p234\", 1, MyPayload(\"a valid string\"), \"t123\")\n          _ = n shouldBe akka.Done\n          g <- stateStorePayload.getObject(\"p234\")\n          _ = g.value shouldBe Some(MyPayload(\"a valid string\"))\n          u <- stateStorePayload.upsertObject(\"p234\", 2, MyPayload(\"updated valid string\"), \"t123\")\n          _ = u shouldBe akka.Done\n          h <- stateStorePayload.getObject(\"p234\")\n\n        } yield h\n      } { v =>\n        v.value shouldBe Some(MyPayload(\"updated valid string\"))\n      }\n    }\n    \"delete an existing state\" in {\n      whenReady {\n        stateStorePayload.deleteObject(\"p234\")\n      } { v =>\n        v shouldBe akka.Done\n        whenReady {\n          stateStorePayload.getObject(\"p234\")\n        } { v =>\n          v.value shouldBe None\n        }\n      }\n    }\n  }\n\n  \"A JDBC durable state store\" must {\n\n    \"find all states by tag either from the beginning or from a specific offset\" in withActorSystem { implicit system =>\n      val stateStoreString =\n        new JdbcDurableStateStore[String](\n          JdbcDurableStateStore.Identifier,\n          db,\n          schemaTypeToProfile(schemaType),\n          durableStateConfig,\n          serialization)\n\n      import stateStoreString._\n\n      // fetch from beginning\n      upsertManyForOnePersistenceId(stateStoreString, \"p1\", \"t1\", 1, 4)\n      val chgs = currentChanges(\"t1\", NoOffset).runWith(Sink.seq).futureValue\n      chgs.size shouldBe 1\n      chgs.map(_.offset.value).max shouldBe 4\n\n      // upsert more and fetch from after the last offset\n      upsertManyForOnePersistenceId(stateStoreString, \"p1\", \"t1\", 5, 7)\n      val moreChgs = currentChanges(\"t1\", chgs.head.offset).runWith(Sink.seq).futureValue\n      moreChgs.size shouldBe 1\n      moreChgs.map(_.offset.value).max shouldBe 11\n\n      // upsert same tag, different persistence id and fetch from after the last offset\n      upsertManyForOnePersistenceId(stateStoreString, \"p2\", \"t1\", 1, 3)\n      val otherChgs = currentChanges(\"t1\", moreChgs.head.offset).runWith(Sink.seq).futureValue\n      otherChgs.size shouldBe 1\n      otherChgs.map(_.offset.value).max shouldBe 14\n\n      // again fetch from the beginning\n      val cs = currentChanges(\"t1\", NoOffset).runWith(Sink.seq).futureValue\n      cs.size shouldBe 2\n      cs.map(_.offset.value).max shouldBe 14\n    }\n\n    \"find the max offset after a series of upserts with multiple persistence ids\" in withActorSystem {\n      implicit system =>\n        val stateStoreString =\n          new JdbcDurableStateStore[String](\n            JdbcDurableStateStore.Identifier,\n            db,\n            schemaTypeToProfile(schemaType),\n            durableStateConfig,\n            serialization)\n\n        import stateStoreString._\n        upsertRandomlyShuffledPersistenceIds(stateStoreString, List(\"p1\", \"p2\", \"p3\"), \"t1\", 3)\n        val chgs = currentChanges(\"t1\", NoOffset).runWith(Sink.seq).futureValue\n        chgs.size shouldBe 3\n        chgs.map(_.offset.value).max shouldBe 9\n    }\n\n    \"find all states by tags with offsets sorted and proper max and min offsets when starting offset is specified\" in withActorSystem {\n      implicit system =>\n        val stateStoreString =\n          new JdbcDurableStateStore[String](\n            JdbcDurableStateStore.Identifier,\n            db,\n            schemaTypeToProfile(schemaType),\n            durableStateConfig,\n            serialization)\n\n        import stateStoreString._\n        upsertRandomlyShuffledPersistenceIds(stateStoreString, List(\"p1\", \"p2\", \"p3\"), \"t1\", 3)\n        val chgs = stateStoreString.currentChanges(\"t1\", Offset.sequence(7)).runWith(Sink.seq).futureValue\n        chgs.map(_.offset.value) shouldBe sorted\n        chgs.map(_.offset.value).max shouldBe 9\n        chgs.map(_.offset.value).min should be > 7L\n    }\n\n    \"find all states by tags returning a live source with no offset specified\" in withActorSystem { implicit system =>\n      val stateStoreString =\n        new JdbcDurableStateStore[String](\n          JdbcDurableStateStore.Identifier,\n          db,\n          schemaTypeToProfile(schemaType),\n          durableStateConfig,\n          serialization)\n\n      import stateStoreString._\n      upsertRandomlyShuffledPersistenceIds(stateStoreString, List(\"p1\", \"p2\", \"p3\"), \"t1\", 3)\n      val source = stateStoreString.changes(\"t1\", NoOffset)\n      val m = collection.mutable.ListBuffer.empty[(String, Long)]\n\n      // trick to complete the future\n      val f = source\n        .takeWhile { e =>\n          m += ((e.persistenceId, e.offset.value))\n          e.offset.value < 12\n        }\n        .runWith(Sink.seq)\n\n      // more data after some delay\n      Thread.sleep(100)\n      upsertObject(\"p3\", 4, \"4 valid string\", \"t2\").futureValue\n      upsertObject(\"p2\", 4, \"4 valid string\", \"t1\").futureValue\n      upsertObject(\"p1\", 4, \"4 valid string\", \"t1\").futureValue\n\n      whenReady(f) { _ =>\n        m.size shouldBe 2\n        m.toList.map(_._2) shouldBe sorted\n        m.toList.map(_._2).max shouldBe 12\n      }\n    }\n\n    \"find all states by tags returning a live source with a starting offset specified\" in withActorSystem {\n      implicit system =>\n        val stateStoreString =\n          new JdbcDurableStateStore[String](\n            JdbcDurableStateStore.Identifier,\n            db,\n            schemaTypeToProfile(schemaType),\n            durableStateConfig,\n            serialization)\n\n        import stateStoreString._\n        upsertRandomlyShuffledPersistenceIds(stateStoreString, List(\"p1\", \"p2\", \"p3\"), \"t1\", 3)\n        val source = stateStoreString.changes(\"t1\", Sequence(4))\n        val m = collection.mutable.ListBuffer.empty[(String, Long)]\n\n        // trick to complete the future\n        val f = source\n          .takeWhile { e =>\n            m += ((e.persistenceId, e.offset.value))\n            e.offset.value < 12\n          }\n          .runWith(Sink.seq)\n\n        // more data after some delay\n        Thread.sleep(100)\n        upsertManyForOnePersistenceId(stateStoreString, \"p3\", \"t1\", 4, 3)\n\n        whenReady(f) { _ =>\n          m.map(_._2) shouldBe sorted\n          m.map(_._2).min should be > 4L\n          m.map(_._2).max shouldBe 12\n        }\n    }\n  }\n\n  \"A JDBC durable state store in the face of parallel upserts\" must {\n\n    \"fetch proper values of offsets with currentChanges()\" in withActorSystem { implicit system =>\n      val stateStoreString =\n        new JdbcDurableStateStore[String](\n          JdbcDurableStateStore.Identifier,\n          db,\n          schemaTypeToProfile(schemaType),\n          durableStateConfig,\n          serialization)\n\n      import stateStoreString._\n\n      upsertParallel(stateStoreString, Set(\"p1\", \"p2\", \"p3\"), \"t1\", 1000)(e).futureValue\n      whenReady {\n        currentChanges(\"t1\", NoOffset)\n          .collect { case u: UpdatedDurableState[String] => u }\n          .runWith(Sink.seq): Future[Seq[UpdatedDurableState[String]]]\n      } { chgs =>\n        chgs.map(_.offset.value) shouldBe sorted\n        chgs.map(_.offset.value).max shouldBe 3000\n      }\n\n      whenReady {\n        currentChanges(\"t1\", Sequence(2000))\n          .collect { case u: UpdatedDurableState[String] => u }\n          .runWith(Sink.seq): Future[Seq[UpdatedDurableState[String]]]\n      } { chgs =>\n        chgs.map(_.offset.value) shouldBe sorted\n        chgs.map(_.offset.value).max shouldBe 3000\n      }\n    }\n\n    \"fetch proper values of offsets from beginning with changes() and phased upserts\" in withActorSystem {\n      implicit system =>\n        val stateStoreString =\n          new JdbcDurableStateStore[String](\n            JdbcDurableStateStore.Identifier,\n            db,\n            schemaTypeToProfile(schemaType),\n            durableStateConfig,\n            serialization)\n\n        import stateStoreString._\n\n        upsertParallel(stateStoreString, Set(\"p1\", \"p2\", \"p3\"), \"t1\", 5)(e).futureValue\n        val source = changes(\"t2\", NoOffset)\n        val m = collection.mutable.ListBuffer.empty[(String, Long)]\n        // trick to complete the future\n        val f = source\n          .takeWhile { e =>\n            m += ((e.persistenceId, e.offset.value))\n            e.offset.value < 21\n          }\n          .runWith(Sink.seq)\n\n        // more data after some delay\n        Thread.sleep(1000)\n        upsertManyForOnePersistenceId(stateStoreString, \"p3\", \"t1\", 6, 3)\n        Thread.sleep(1000)\n        upsertManyForOnePersistenceId(stateStoreString, \"p3\", \"t2\", 9, 3)\n\n        whenReady(f) { _ =>\n          m.map(_._2) shouldBe sorted\n          m.map(_._2).min should be > 0L\n          m.map(_._2).max shouldBe 21\n        }\n    }\n\n    \"fetch proper values of offsets from beginning for a larger dataset with changes() and phased upserts\" in withActorSystem {\n      implicit system =>\n        val stateStoreString =\n          new JdbcDurableStateStore[String](\n            JdbcDurableStateStore.Identifier,\n            db,\n            schemaTypeToProfile(schemaType),\n            durableStateConfig,\n            serialization)\n\n        import stateStoreString._\n\n        upsertParallel(stateStoreString, Set(\"p1\", \"p2\", \"p3\"), \"t1\", 1000)(e).futureValue\n        val source = changes(\"t2\", NoOffset)\n        val m = collection.mutable.ListBuffer.empty[(String, Long)]\n        // trick to complete the future\n        val f = source\n          .takeWhile { e =>\n            m += ((e.persistenceId, e.offset.value))\n            e.offset.value < 3060\n          }\n          .runWith(Sink.seq)\n\n        // more data after some delay\n        Thread.sleep(1000)\n        upsertManyForOnePersistenceId(stateStoreString, \"p3\", \"t1\", 1001, 30)\n        Thread.sleep(1000)\n        upsertManyForOnePersistenceId(stateStoreString, \"p3\", \"t2\", 1031, 30)\n\n        whenReady(f) { _ =>\n          m.map(_._2) shouldBe sorted\n          m.map(_._2).min should be > 0L\n          m.map(_._2).max shouldBe 3060\n        }\n    }\n  }\n}\n\nclass H2DurableStateSpec extends JdbcDurableStateSpec(ConfigFactory.load(\"h2-application.conf\"), H2) {\n  implicit lazy val system: ActorSystem =\n    ActorSystem(\"test\", config.withFallback(customSerializers))\n}\n\n// In H2's default mode unquoted identifiers are uppercased, so raw-SQL paths in\n// DurableStateQueries must quote identifiers via the profile to match the schema.\nclass H2DefaultModeDurableStateSpec\n    extends JdbcDurableStateSpec(ConfigFactory.load(\"h2-default-mode-application.conf\"), H2) {\n  implicit lazy val system: ActorSystem =\n    ActorSystem(\"test\", config.withFallback(customSerializers))\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/StateSpecBase.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state.scaladsl\n\nimport com.typesafe.config.{ Config, ConfigFactory }\n\nimport scala.concurrent.duration._\nimport scala.concurrent.ExecutionContext\nimport scala.util.{ Failure, Success }\nimport org.scalatest.matchers.should.Matchers\nimport org.scalatest.wordspec.AnyWordSpecLike\nimport org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }\nimport org.scalatest.concurrent.ScalaFutures\nimport org.scalatest.time._\nimport akka.actor._\nimport akka.persistence.jdbc.db.SlickDatabase\nimport akka.persistence.jdbc.config._\nimport akka.persistence.jdbc.testkit.internal.{ H2, MySQL, Postgres, SchemaType }\nimport akka.persistence.jdbc.util.DropCreate\nimport akka.serialization.SerializationExtension\nimport akka.util.Timeout\n\nabstract class StateSpecBase(val config: Config, schemaType: SchemaType)\n    extends AnyWordSpecLike\n    with BeforeAndAfterAll\n    with BeforeAndAfterEach\n    with Matchers\n    with ScalaFutures\n    with DropCreate\n    with DataGenerationHelper {\n  implicit def system: ActorSystem\n\n  implicit lazy val e: ExecutionContext = system.dispatcher\n\n  private[jdbc] def schemaTypeToProfile(s: SchemaType) = s match {\n    case H2       => slick.jdbc.H2Profile\n    case Postgres => slick.jdbc.PostgresProfile\n    case MySQL    => slick.jdbc.MySQLProfile\n    case _        => ???\n  }\n\n  val customSerializers = ConfigFactory.parseString(\"\"\"\n      akka.actor {\n        serializers {\n          my-payload = \"akka.persistence.jdbc.state.MyPayloadSerializer\"\n        }\n        serialization-bindings {\n          \"akka.persistence.jdbc.state.MyPayload\" = my-payload\n        }\n      }\n    \"\"\")\n\n  val customConfig = ConfigFactory.parseString(s\"\"\"\n    jdbc-durable-state-store {\n      batchSize = 200\n      refreshInterval = 300.milliseconds\n      durable-state-sequence-retrieval {\n        batch-size = 1000\n        query-delay = 100.milliseconds\n        max-tries = 3\n      }\n    }\n  \"\"\")\n\n  lazy val cfg = customConfig\n    .getConfig(\"jdbc-durable-state-store\")\n    .withFallback(system.settings.config.getConfig(\"jdbc-durable-state-store\"))\n    .withFallback(config.getConfig(\"jdbc-durable-state-store\"))\n    .withFallback(customSerializers.getConfig(\"akka.actor\"))\n\n  lazy val db = if (cfg.hasPath(\"slick.profile\")) {\n    SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig(\"slick\")), \"slick.db\")\n  } else {\n    // needed for integration test where we use postgres-shared-db-application.conf\n    SlickDatabase.database(\n      config,\n      new SlickConfiguration(config.getConfig(\"akka-persistence-jdbc.shared-databases.slick\")),\n      \"akka-persistence-jdbc.shared-databases.slick.db\")\n  }\n\n  lazy val durableStateConfig = new DurableStateTableConfiguration(cfg)\n  lazy val serialization = SerializationExtension(system)\n\n  implicit val defaultPatience: PatienceConfig =\n    PatienceConfig(timeout = Span(60, Seconds), interval = Span(100, Millis))\n\n  def withActorSystem(f: ExtendedActorSystem => Unit): Unit = {\n    implicit val system: ExtendedActorSystem =\n      ActorSystem(\"JdbcDurableStateSpec\", config.withFallback(customSerializers)).asInstanceOf[ExtendedActorSystem]\n    implicit val timeout: Timeout = Timeout(1.minute)\n    try {\n      f(system)\n    } finally {\n      system\n        .actorSelection(\n          \"system/\" + s\"${JdbcDurableStateStore.Identifier}.akka-persistence-jdbc-durable-state-sequence-actor\")\n        .resolveOne()\n        .onComplete {\n          case Success(actorRef) => {\n            system.stop(actorRef)\n            Thread.sleep(1000)\n            system.log.debug(s\"Is terminated: ${actorRef.isTerminated}\")\n          }\n          case Failure(_) =>\n            system.log.warning(\"system/\" + \"-persistence-jdbc-durable-state-sequence-actorsomename\" + \" does not exist\")\n        }\n      system.terminate().futureValue\n    }\n  }\n\n  override def beforeAll(): Unit = {\n    dropAndCreate(schemaType)\n    super.beforeAll()\n  }\n\n  override def beforeEach(): Unit = {\n    dropAndCreate(schemaType)\n    super.beforeAll()\n  }\n\n  override def afterAll(): Unit = {\n    db.close()\n    system.terminate().futureValue\n  }\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/state/scaladsl/TestProbeDurableStateStoreQuery.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.state.scaladsl\n\nimport scala.concurrent.Future\nimport scala.concurrent.duration._\nimport akka.NotUsed\nimport akka.actor.ExtendedActorSystem\nimport akka.pattern.ask\nimport akka.persistence.jdbc.config.DurableStateTableConfiguration\nimport akka.persistence.query.DurableStateChange\nimport akka.persistence.query.Offset\nimport akka.persistence.state.scaladsl.GetObjectResult\nimport akka.stream.scaladsl.Source\nimport akka.testkit.TestProbe\nimport akka.util.Timeout\nimport slick.jdbc.{ JdbcBackend, JdbcProfile }\nimport akka.serialization.Serialization\n\nobject TestProbeDurableStateStoreQuery {\n  case class StateInfoSequence(offset: Long, limit: Long)\n}\n\nclass TestProbeDurableStateStoreQuery(\n    val probe: TestProbe,\n    db: JdbcBackend#Database,\n    profile: JdbcProfile,\n    durableStateConfig: DurableStateTableConfiguration,\n    serialization: Serialization)(override implicit val system: ExtendedActorSystem)\n    extends JdbcDurableStateStore[String](\n      JdbcDurableStateStore.Identifier,\n      db,\n      profile,\n      durableStateConfig,\n      serialization)(system) {\n\n  implicit val askTimeout: Timeout = Timeout(100.millis)\n\n  override def getObject(persistenceId: String): Future[GetObjectResult[String]] = ???\n  override def currentChanges(tag: String, offset: Offset): Source[DurableStateChange[String], NotUsed] = ???\n\n  override def changes(tag: String, offset: Offset): Source[DurableStateChange[String], NotUsed] = ???\n\n  override def stateStoreStateInfo(offset: Long, limit: Long): Source[(String, Long, Long), NotUsed] = {\n    val f = probe.ref\n      .ask(TestProbeDurableStateStoreQuery.StateInfoSequence(offset, limit))\n      .mapTo[scala.collection.immutable.Seq[DurableStateSequenceActor.VisitedElement]]\n\n    Source.future(f).mapConcat(e => e.map(x => (x.pid, x.offset, x.revision)))\n  }\n\n  override def maxStateStoreOffset(): Future[Long] = Future.successful(0)\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/util/ClasspathResources.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.util\n\nimport java.io.InputStream\n\nimport scala.io.{ Source => ScalaIOSource }\n\nobject ClasspathResources extends ClasspathResources\n\ntrait ClasspathResources {\n  def streamToString(is: InputStream): String =\n    ScalaIOSource.fromInputStream(is).mkString\n\n  def fromClasspathAsString(fileName: String): String =\n    streamToString(fromClasspathAsStream(fileName))\n\n  def fromClasspathAsStream(fileName: String): InputStream =\n    getClass.getClassLoader.getResourceAsStream(fileName)\n}\n"
  },
  {
    "path": "core/src/test/scala/akka/persistence/jdbc/util/DropCreate.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.util\n\nimport java.sql.Statement\n\nimport akka.annotation.InternalApi\nimport akka.persistence.jdbc.testkit.internal.SchemaType\nimport akka.persistence.jdbc.testkit.internal.SchemaUtilsImpl\nimport com.typesafe.config.Config\nimport org.slf4j.LoggerFactory\nimport slick.jdbc.JdbcBackend.Database\nimport slick.jdbc.JdbcBackend.Session\n\n/**\n * INTERNAL API\n */\n@InternalApi\nprivate[jdbc] trait DropCreate {\n\n  private val logger = LoggerFactory.getLogger(this.getClass)\n  def db: Database\n  def config: Config\n\n  def newDao: Boolean = !SchemaUtilsImpl.legacy(\"jdbc-journal\", config)\n\n  /**\n   * INTERNAL API\n   */\n  @InternalApi\n  private[jdbc] def dropAndCreate(schemaType: SchemaType): Unit = {\n    // blocking calls, usually done in our before test methods\n    SchemaUtilsImpl.dropWithSlick(schemaType, logger, db, !newDao)\n    SchemaUtilsImpl.createWithSlick(schemaType, logger, db, !newDao)\n  }\n\n  def withSession[A](f: Session => A): A = {\n    withDatabase { db =>\n      val session = db.createSession()\n      try f(session)\n      finally session.close()\n    }\n  }\n\n  def withStatement[A](f: Statement => A): A =\n    withSession(session => session.withStatement()(f))\n\n  /**\n   * INTERNAL API\n   */\n  @InternalApi\n  private[jdbc] def withDatabase[A](f: Database => A): A =\n    f(db)\n}\n"
  },
  {
    "path": "doc/deadlock.md",
    "content": "\n\n# Slick Scheduling Algorithm\nFor the [new scheduling algorithm in #1461](https://github.com/slick/slick/pull/1461) to work correctly without deadlocks\nit is critical that Slicks knows the connection pool size. It is set automatically [when you let Slick configure HikariCP](https://github.com/szeiger/slick/commit/353a6e41f389fbe776f1c38166bbe1a3f0a3f2e0)\nby calling `Database.forDatabase(...)` or `Database.forDataSource` where the default maxConnections is set to `1` connection if you don't override it yourself which\nfor most cases if too low, in all other cases you have to do it on your own by using\n\nIntroduction of priority levels:\n\n- __HighPriority__: a DBIO which already has a connection associated (due to running in a transaction or with pinned session)\n- __MediumPriority__: the old highPrio = true case: a continuation of another DBIO action, it should always be able to be enqueued\n- __LowPriority__: any other DBIO, if the queue is full it will not be enqueued\n\nThe queue backing Slick's ThreadPoolExecutor now consists internally of two backing queues:\n\n- A __HighPriority__ queue which contains only the HighPriority runnables\n- The old queue containing the other priority levels.\n\nAsyncExecutor\n\nHighPriority items are always taken first, until this queue is exhausted and only then Low- or MediumPriority items are considered.\n\nWe also do connection counting in the HikariCPJdbcDataSource:\nWhen there are no more connections in the pool, we prevent low/medium priority items from being processed from the queue.\nOnly HighPriority items are able to make progress, because they already have a connection.\n\n## Database thread pool\nEvery Database contains an AsyncExecutor that manages the thread pool for asynchronous execution of Database I/O Actions.\nIts size is the main parameter to tune for the best performance of the Database object. It should be set to the value that\nyou would use for the size of the connection pool in a traditional, blocking application.\n\nWhen using Database.forConfig, the thread pool is configured directly in the external configuration file together with\nthe connection parameters. If you use any other factory method to get a Database, you can either use a default configuration\nor specify a custom AsyncExecutor:\n\n```scala\nval db = Database.forURL(\"jdbc:h2:mem:test1;DB_CLOSE_DELAY=-1\", driver=\"org.h2.Driver\",\n  executor = AsyncExecutor(\"test1\", numThreads=10, queueSize=1000))\n```\n\n## Configuration\nThe object `akka.persistence.jdbc.util.SlickDatabase` reads the HikariJdbcDataSource configuration from typesafe\nconfiguration from the object `slick.db`. It expects a `connectionPool` field that has been set to `HikariCP`\nThe class `slick.jdbc.hikaricp.HikariCPJdbcDataSource` reads the configured Hikari Connection Pool configuration.\n\n```scala\ndef forConfig(c: Config, driver: Driver, name: String, classLoader: ClassLoader): HikariCPJdbcDataSource = {\n    val hconf = new HikariConfig()\n\n    // Connection settings\n    if (c.hasPath(\"dataSourceClass\")) {\n      hconf.setDataSourceClassName(c.getString(\"dataSourceClass\"))\n    } else {\n      Option(c.getStringOr(\"driverClassName\", c.getStringOr(\"driver\"))).map(hconf.setDriverClassName _)\n    }\n    hconf.setJdbcUrl(c.getStringOr(\"url\", null))\n    c.getStringOpt(\"user\").foreach(hconf.setUsername)\n    c.getStringOpt(\"password\").foreach(hconf.setPassword)\n    c.getPropertiesOpt(\"properties\").foreach(hconf.setDataSourceProperties)\n\n    // Pool configuration\n    hconf.setConnectionTimeout(c.getMillisecondsOr(\"connectionTimeout\", 1000))\n    hconf.setValidationTimeout(c.getMillisecondsOr(\"validationTimeout\", 1000))\n    hconf.setIdleTimeout(c.getMillisecondsOr(\"idleTimeout\", 600000))\n    hconf.setMaxLifetime(c.getMillisecondsOr(\"maxLifetime\", 1800000))\n    hconf.setLeakDetectionThreshold(c.getMillisecondsOr(\"leakDetectionThreshold\", 0))\n    hconf.setInitializationFailFast(c.getBooleanOr(\"initializationFailFast\", false))\n    c.getStringOpt(\"connectionTestQuery\").foreach(hconf.setConnectionTestQuery)\n    c.getStringOpt(\"connectionInitSql\").foreach(hconf.setConnectionInitSql)\n    val numThreads = c.getIntOr(\"numThreads\", 20)\n    hconf.setMaximumPoolSize(c.getIntOr(\"maxConnections\", numThreads * 5))\n    hconf.setMinimumIdle(c.getIntOr(\"minConnections\", numThreads))\n    hconf.setPoolName(c.getStringOr(\"poolName\", name))\n    hconf.setRegisterMbeans(c.getBooleanOr(\"registerMbeans\", false))\n\n    // Equivalent of ConnectionPreparer\n    hconf.setReadOnly(c.getBooleanOr(\"readOnly\", false))\n    c.getStringOpt(\"isolation\").map(\"TRANSACTION_\" + _).foreach(hconf.setTransactionIsolation)\n    hconf.setCatalog(c.getStringOr(\"catalog\", null))\n\n    val ds = new HikariDataSource(hconf)\n    new HikariCPJdbcDataSource(ds, hconf)\n  }\n```\n\n## Resources\n- [Transactionally deadlock still there on 3.2.0-M1?](https://github.com/slick/slick/issues/1614)\n- [Slick deadlock #1461](https://github.com/slick/slick/pull/1461)\n- [connection leak detected #1678](https://github.com/slick/slick/issues/1678)\n"
  },
  {
    "path": "docs/LICENSE",
    "content": "﻿LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT\r\n\r\nTHIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS \"AGREEMENT\") IS A LEGAL AGREEMENT BETWEEN YOU (\"USER\") AND LIGHTBEND, INC. (\"LICENSOR\"). \r\nBY CLICKING THE \"I ACCEPT\" BUTTON, OR INSTALLING, COPYING OR OTHERWISE USING LIGHTBEND’S SOFTWARE (THE “SOFTWARE”) AND ITS ASSOCIATED USER DOCUMENTATION, USER ACKNOWLEDGES THAT USER HAS REVIEWED AND ACCEPTS THIS AGREEMENT AND AGREES TO BE BOUND BY ALL OF ITS TERMS. \r\nIF YOU ARE AGREEING TO THIS AGREEMENT AS AN INDIVIDUAL, “USER” REFERS TO YOU INDIVIDUALLY.  IF YOU ARE AGREEING TO THIS AGREEMENT AS A REPRESENTATIVE OF AN ENTITY, YOU REPRESENT THAT YOU HAVE THE AUTHORITY TO BIND THAT ENTITY AND “USER” REFERS TO THAT ENTITY AND ALL THE USERS ACCESSING THE SOFTWARE BY, THROUGH OR ON BEHALF OF THAT ENTITY.  \r\nIF USER DOES NOT AGREE WITH ALL OF THE TERMS OF THIS AGREEMENT, DO NOT INSTALL, COPY OR OTHERWISE USE THE SOFTWARE OR ITS DOCUMENTATION. \r\n\r\n1. DEFINITIONS. \r\n   1. “User System” means User’s website(s), computers, servers and other equipment and software upon and with which the Software is run.\r\n   2. “Documentation” means the user instructions and help files made available by Licensor for use with the Software, as may be updated from time to time by Licensor.\r\n   3. “Intellectual Property Rights” means all intellectual property rights or similar proprietary rights, including \r\n\t(a) patent rights and utility models, \r\n\t(b) copyrights and database rights, \r\n\t(c) trademarks, trade names, domain names and trade dress and the goodwill associated therewith, \r\n\t(d) trade secrets, \r\n\t(e) mask works, and \r\n\t(f) industrial design rights; in each case, including any registrations of, applications to register, and renewals and extensions of, any of the foregoing in any jurisdiction in the world.\r\n   4. “Open Source Software” means all software that is available under the GNU Affero General Public License (AGPL), GNU General Public License (GPL), GNU Lesser General Public License (LGPL), Mozilla Public License (MPL), Apache License, BSD licenses, or any other license that is approved by or similar to those approved by the Open Source Initiative (www.opensource.org).\r\n\r\n2. LICENSES AND RESTRICTIONS.  \r\n   1. License.  Subject to User’s compliance with the terms and conditions of this Agreement, Licensor hereby grants to User, during the term of this Agreement, a limited, non-exclusive, non-transferable and non-sublicensable right to \r\n\t(i) install and execute one (1) copy of the Software in accordance with the Documentation, solely in binary form, and not for the benefit of any other person or entity, and \r\n\t(ii) access and use the Documentation, solely for User’s own internally purposes in support of End User’s permitted use of the Software.  \r\n   2. Restrictions.  User shall not, directly or indirectly, or permit any User or third party to: \r\n\t(a) reverse engineer, decompile, disassemble or otherwise attempt to discover the source code or underlying ideas or algorithms of the Software;  \r\n\t(b) modify, translate, or create derivative works based on any element of the Software or any related Documentation (except to the extent applicable laws specifically prohibit such restriction for interoperability purposes, in which case you agree to first contact Licensor and provide Licensor an opportunity to create such changes as are needed for interoperability purposes); \r\n\t(c) use, rent, lease, distribute, sell, resell, assign, or otherwise transfer the Software or any copy thereof; \r\n\t(d) use the Software for timesharing purposes or otherwise for the benefit of any person or entity other than for the benefit of User and Users; \r\n\t(e) remove any proprietary notices from the Software or the Documentation or attempt to defeat any copy protection device included with the Software; or \r\n\t(f) use the Software for any purpose other than its intended purpose.\r\n   3. Reservation of Rights.  Nothing in this Agreement shall be construed to give User a right to use or otherwise obtain access to any source code from which the Software is compiled or interpreted.  Except as expressly granted in this Agreement, there are no other licenses granted to User, express, implied or by way of estoppel.  All rights not granted in this Agreement are reserved by Licensor.\r\n   4. Open Source Software.  Notwithstanding the foregoing, certain items of software included with the Software are Open Source Software and remains subject Open Source Software licenses.  Such Open Source Software is not subject to the terms and conditions of this Agreement.  \r\nInstead, each such item of Open Source Software is licensed under the terms of the end user license that accompanies such Open Source Software and nothing in this Agreement limits your rights under, or grants you rights that supersede, the terms and conditions of any applicable end user license for such Open Source Software.  If required by any license for particular Open Source Software, Licensor makes such Open Source Software, and any Licensor modifications to that Open Source Software, available as further described in the Documentation.\r\nUSE OF THE SOFTWARE IN ANY MANNER OTHER THAN AS PROVIDED IN THIS AGREEMENT IS STRICTLY PROHIBITED AND MAY INFRINGE ON THE INTELLECTUAL PROPERTY RIGHTS OF LICENSOR AND/OR ITS LICENSOR(S), SUBJECTING USER TO CIVIL AND CRIMINAL PENALTIES, INCLUDING WITHOUT LIMITATION MONETARY DAMAGES AND IMPRISONMENT FOR COPYRIGHT INFRINGEMENT.\r\n\r\n3. USER OBLIGATIONS.\r\n   1. User System.  User is responsible for \r\n\t(a) obtaining, deploying and maintaining the User System, and all computer hardware, software, modems, routers and other communications equipment necessary for User and its Users to install and use the Software; and \r\n\t(b) paying all third party fees and access charges incurred in connection with the foregoing.  Licensor shall not be responsible for supplying any hardware, software or other equipment to User under this Agreement.\r\n   2. Compliance with Laws.  User agrees to use the Software in compliance with all applicable laws, including local laws of the country or region in which User resides, and in compliance with all United States export laws and regulations.  User shall not use the Software for any purpose prohibited by applicable law.  \r\n   3. Trademarks and Tradenames.  With regard to all copies of the Software permitted herein, User shall reproduce on such copies all Licensor copyright notices, and other proprietary notices appearing on and in the original copy of the software received from Licensor. Except as set forth in the foregoing sentence, User will not, during the term of this Agreement or thereafter, use any trademark of Licensor, or any word and/or symbol likely to be confused with any Licensor trademark, either alone or in any combination with other words and/or symbols.\r\n\r\n4. SUPPORT AND MAINTENANCE.\r\n   1. Support.  Licensor is not responsible for maintenance or support of the Software, or the equipment on which the Software resides or is used, under this Agreement. By accepting the license granted under this Agreement, User agrees that Licensor will be under no obligation to provide any support, maintenance or service in connection with the Software or such equipment.  \r\n   2. Upgrades and Updates.  Licensor may from time to time in its sole discretion develop and provide updates for the Software, which may include upgrades, bug fixes, patches, other error corrections, and/or new features (collectively, including related documentation, “Updates”). Updates may also modify or delete in their entirety certain features and functionality. You agree that Licensor has no obligation to provide any Updates or to continue to provide or enable any particular features or functionality. \r\n\r\n5. REPRESENTATIONS AND WARRANTIES; DISCLAIMER.\r\n   1. Mutual Representations and Warranties.  Each party represents, warrants and covenants that: \r\n\t(a) it has the full power and authority to enter into this Agreement and to perform its obligations hereunder, without the need for any consents, approvals or immunities not yet obtained; and \r\n\t(b) its acceptance of and performance under this Agreement shall not breach any oral or written agreement with any third party or any obligation owed by it to any third party to keep any information or materials in confidence or in trust. \r\n   2. Disclaimer.  EXCEPT FOR THE WARRANTIES SET FORTH IN THIS SECTION 5, THE SOFTWARE AND THE DOCUMENTATION ARE PROVIDED ON AN AS-IS BASIS.  USER’S USE OF THE SOFTWARE AND THE DOCUMENTATION IS AT USER’S OWN RISK.  LICENSOR DOES NOT MAKE, AND HEREBY DISCLAIMS, ANY AND ALL OTHER EXPRESS, STATUTORY AND IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE, QUALITY, SUITABILITY, OPERABILITY, CONDITION, SYSTEM INTEGRATION, NON-INTERFERENCE, WORKMANSHIP, TRUTH, ACCURACY (OF DATA OR ANY OTHER INFORMATION OR CONTENT), ABSENCE OF DEFECTS, WHETHER LATENT OR PATENT, AND ANY WARRANTIES ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE.  LICENSOR ALSO DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN, PERFORMED AND/OR PROVIDED BY THE SOFTWARE WILL MEET USER’S REQUIREMENTS, THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, THAT THE SOFTWARE WILL BE COMPATIBLE OR WORK WITH ANY THIRD-PARTY SOFTWARE, APPLICATIONS OR DEVICES, OR THAT DEFECTS IN THE SOFTWARE WILL BE CORRECTED.  USER EXPRESSLY ACKNOWLEDGES AND AGREES THAT, TO THE EXTENT PERMITTED BY APPLICABLE LAW, ITS USE OF THE SOFTWARE IS AT ITS SOLE RISK AND THAT THE ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT IS WITH USER. USER FURTHER ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS NOT INTENDED OR SUITABLE FOR USE IN SITUATIONS OR ENVIRONMENTS WHERE THE FAILURE OR TIME DELAYS OF, OR ERRORS OR INACCURACIES IN THE CONTENT, DATA OR INFORMATION PROVIDED BY THE SOFTWARE COULD LEAD TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. ANY WARRANTIES MADE BY LICENSOR ARE FOR THE BENEFIT OF USER ONLY AND NOT FOR THE BENEFIT OF ANY THIRD PARTY.  THE SOFTWARE AND THE DOCUMENTATION ARE LICENSED AND NOT SOLD. NO AGENT OF LICENSOR IS AUTHORIZED TO ALTER OR EXPAND THE WARRANTIES OF LICENSOR AS SET FORTH HEREIN.  \r\n\r\n6. INDEMNIFICATION. User shall defend Licensor and its licensors and their respective officers, directors and employees (“Licensor Indemnified Parties”) from and against any and all Third-Party Claims which arise out of or relate to: \r\n\t(a) User’s use or alleged use of the Software other than as permitted under this Agreement; or \r\n\t(b) arising out of or relating to any violation of Section 2.2, or any violation of applicable laws.  User shall pay all damages, costs and expenses, including attorneys’ fees and costs (whether by settlement or award of by a final judicial judgment) incurred by the Licensor Indemnified Parties from any such Third-Party Claim.  In no event shall Licensor settle any claim without User’s prior written approval.  Licensor may, at its own expense, engage separate counsel to advise Licensor regarding a Third-Party Claim and to participate in the defense of the claim, subject to User’s right to control the defense and settlement. If you are a New Jersey resident, this indemnification clause is to be only as broad and inclusive as is permitted by the law of the state of New Jersey.\r\n\r\n7. CONFIDENTIALITY. \r\n   1. Confidential Information. User acknowledges that the Software contains valuable proprietary information and trade secrets and that unauthorized or improper use of the Software will result in irreparable harm to Licensor for which monetary damages would be inadequate and for which Licensor may be entitled to immediate injunctive relief. Accordingly, you will maintain the confidentiality of the proprietary information and not sell, license, publish, display, distribute, disclose or otherwise make available such proprietary information to any third party, nor use such information except as authorized by this Agreement.\r\n   2. Injunctive Relief.  User agrees that any unauthorized disclosure of confidential information may cause immediate and irreparable injury to Licensor and that, in the event of such breach, Licensor will be entitled, in addition to any other available remedies, to seek immediate injunctive and other equitable relief, without bond and without the necessity of showing actual monetary damages.\r\n\r\n8. PROPRIETARY RIGHTS. \r\n   1. Licensor.  As between Licensor and User, all right, title and interest in the Software, the Documentation, and any other Licensor materials furnished or made available hereunder, and all modifications and enhancements thereof, and all suggestions, ideas and feedback proposed by User regarding the Software, including all copyright rights, patent rights and other Intellectual Property Rights in each of the foregoing, belong to and are retained solely by Licensor or Licensor’s licensors and providers, as applicable.  User hereby does and will irrevocably assign to Licensor all evaluations, ideas, feedback and suggestions made by User to Licensor regarding the Software or the Documentation (collectively, “Feedback”) and all Intellectual Property Rights in the Feedback.  \r\n\r\n9. LIMITATION OF LIABILITY.\r\n   1. No Consequential Damages.  NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR ANY DAMAGES FOR LOST DATA, BUSINESS INTERRUPTION, LOST PROFITS, LOST REVENUE OR LOST BUSINESS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT, EVEN IF LICENSOR OR ITS LICENSORS OR USER HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, INCLUDING WITHOUT LIMITATION, ANY SUCH DAMAGES ARISING OUT OF THE LICENSING, PROVISION OR USE OF THE SOFTWARE OR THE RESULTS OF THE USE OF THE SOFTWARE.  LICENSOR WILL NOT BE LIABLE FOR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES.\r\n   2. LIMITS ON LIABILITY.  NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR CUMULATIVE, AGGREGATE DAMAGES GREATER THAN FIVE HUNDRED DOLLARS (US $500).  \r\n   3. ESSENTIAL PURPOSE.  USER ACKNOWLEDGES THAT THE TERMS IN THIS SECTION 9 (LIMITATION OF LIABILITY) SHALL APPLY TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW AND SHALL APPLY EVEN IF AN EXCLUSIVE OR LIMITED REMEDY STATED HEREIN FAILS OF ITS ESSENTIAL PURPOSE. SOME JURISDICTIONS DO NOT ALLOW CERTAIN LIMITATIONS OF LIABILITY, SO SOME OR ALL OF THE ABOVE LIMITATIONS OF LIABILITY MAY NOT APPLY TO YOU.  IF YOU ARE A NEW JERSEY RESIDENT, THIS LIMITATION OF LIABILITY SECTION IS TO BE ONLY AS BROAD AND INCLUSIVE AS IS PERMITTED BY THE LAW OF THE STATE OF NEW JERSEY.\r\n\r\n10. TERM AND TERMINATION.  \r\n   1. Term.  This Agreement and User’s right to use the Software commences on earlier of the date that User: \r\n\t(a) installs the Software, \r\n\t(b) begins using the Software or \r\n\t(c) otherwise demonstrates assent to this Agreement.  \r\n\tUser’s right to use the Software shall continue until such time as this Agreement is terminated (the “Term”).  \r\n   2. Termination for Cause.  A party may terminate this Agreement, upon written notice to the other party in the event the other party files a petition for bankruptcy or has a petition for bankruptcy filed against it that is not dismissed within sixty (60) calendar days after filing  or admits its inability to pay its debts as they mature, makes an assignment for the benefit of its creditors or ceases to function as a going concern or to conduct its operations in the normal course of business and such termination shall occur immediately upon notice.  Licensor may terminate this Agreement at any time without notice if it ceases to support the Software, which Licensor may do in its sole discretion. In addition, this Agreement will terminate immediately and automatically without any notice if User breaches any of its terms and conditions.\r\n   3. Termination for Convenience.  Either party may terminate this Agreement for convenience on at least thirty (30) calendar days prior written notice to the other party.  User may also terminate this Agreement by ceasing all use of the Software.\r\n   4. Effects of Termination.  Upon expiration or termination of this Agreement, User’s shall cease all use of the Software and the Documentation and shall destroy all copies of the Software in User’s possession or control.\r\n   5. Survival.  This Section and Sections 1, 2.2 (Restrictions), 2.3 (Reservation of Rights), 2.4 (Open Source Software), 5.2 (Disclaimer), 6 (Indemnification), 7 (Confidentiality), 8 (Proprietary Rights), 9 (Limitation of Liability), 10.4 (Effects of Termination) and 11 (Miscellaneous) shall survive any termination or expiration of this Agreement. \r\n\r\n11. MISCELLANEOUS.\r\n   1. Notices.  Licensor may give notice to User by means of a general notice through electronic mail to User’s e-mail address, or by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service to User’s address on record with Licensor. User may give notice to Licensor by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service addressed to Licensor, Lightbend Inc., 580 California, #1231, San Francisco, CA 94104, Attention: User Support.  Notice shall be deemed to have been given upon receipt or, if earlier, two (2) business days after mailing, as applicable. All communications and notices to be made or given pursuant to this Agreement shall be in the English language.  \r\n   2. Governing Law.  This Agreement and the rights and obligations of the parties to and under this agreement shall be governed by and construed under the laws of the United States and the State of California as applied to agreements entered into and to be performed in such State without giving effect to conflicts of laws rules or principles.  The parties agree that the United Nations Convention on Contracts for the International Sale of Goods is specifically excluded from application to this Agreement and that the application of the Uniform Computer Information Transactions Act (UCITA) is specifically disclaimed.  Any dispute arising out of or in connection with this Agreement, including but not limited to any question regarding its existence, interpretation, validity, performance, or termination, or any dispute between the parties arising from the parties' relationship created by this Agreement, shall be referred to and finally resolved by arbitration administered by the American Arbitration Association under its rules.  The number of arbitrators shall be one (1).  The parties shall endeavor to agree upon the sole arbitrator and jointly nominate the arbitrator.  If the parties cannot agree upon the sole arbitrator within a time prescribed by AAA, the parties shall request the AAA to propose five (5) arbitrators and each party shall rank the proposed arbitrators.  The AAA shall appoint an arbitrator from the list of five (5), based upon the parties' rankings.  \r\n   3. U.S. Government Users.  If User is a Federal Government entity, Licensor provides the Software and the Documentation, including related software and technology, for ultimate Federal Government end use solely in accordance with the following:  Government technical data rights include only those rights customarily provided to the public with a commercial item or process and Government software rights related to the Software and the Documentation include only those rights customarily provided to the public, as defined in this Agreement.  The technical data rights and customary commercial software license is provided in accordance with FAR 12.211 (Technical Data) and FAR 12.212 (Software) and, for Department of Defense transactions, DFAR 252.227-7015 (Technical Data – Commercial Items) and DFAR 227.7202-3 (Rights in Commercial Computer Software or Computer Software Documentation).  If greater rights are needed, a mutually acceptable written addendum specifically conveying such rights must be included in this Agreement. \r\n   4. Export.  The Software utilizes software and technology that may be subject to United States and foreign export controls. User acknowledges and agrees that the Software shall not be used, and none of the underlying information, software, or technology may be transferred or otherwise exported or re-exported to countries as to which the United States maintains an embargo (collectively, “Embargoed Countries”), or to or by a national or resident thereof, or any person or entity on the U.S. Department of Treasury’s List of Specially Designated Nationals or the U.S. Department of Commerce’s Table of Denial Orders (collectively, “Designated Nationals”). The lists of Embargoed Countries and Designated Nationals are subject to change without notice. By using the Software, User represents and warrants that it is not located in, under the control of, or a national or resident of an Embargoed Country or Designated National. The Software may use encryption technology that is subject to licensing requirements under the U.S. Export Administration Regulations, 15 C.F.R. Parts 730-774 and Council Regulation (EC) No. 1334/2000. User agrees to comply strictly with all applicable export laws and assume sole responsibility for obtaining licenses to export or re-export as may be required. Licensor and its licensors make no representation that the Software is appropriate or available for use in other locations. By using the Software, User represents and warrants that it is not located in any such country or on any such list.\r\n   5. General.  User shall not assign its rights hereunder, or delegate the performance of any of its duties or obligations hereunder, whether by merger, acquisition, sale of assets, operation of law, or otherwise, without the prior written consent of Licensor.  Any purported assignment in violation of the preceding sentence is null and void.  Subject to the foregoing, this Agreement shall be binding upon, and inure to the benefit of, the successors and assigns of the parties thereto.  Except as otherwise specified in this Agreement, this Agreement may be amended or supplemented only by a writing that refers explicitly to this Agreement and that is signed on behalf of both parties.  No waiver will be implied from conduct or failure to enforce rights.  No waiver will be effective unless in a writing signed on behalf of the party against whom the waiver is asserted.  If any of this Agreement is found invalid or unenforceable that term will be enforced to the maximum extent permitted by law and the remainder of this Agreement will remain in full force.  \r\nNothing contained herein shall be construed as creating an agency, partnership, or other form of joint enterprise between the parties.  \r\nThis Agreement constitutes the entire agreement between the parties relating to this subject matter and supersedes all prior or simultaneous understandings, representations, discussions, negotiations, and agreements, whether written or oral.  \r\nNeither party shall be liable to the other party or any third party for failure or delay in performing its obligations under this Agreement when such failure or delay is due to any cause beyond the control of the party concerned, including, without limitation, force majeure, governmental orders or restrictions, fire, or flood, provided that upon cessation of such events such party shall thereupon promptly perform or complete the performance of its obligations hereunder."
  },
  {
    "path": "docs/release-train-issue-template.md",
    "content": "Release Akka Persistence JDBC $VERSION$\n\n<!--\n# Release Train Issue Template for Akka Persistence JDBC\n\n(Liberally copied and adopted from Scala itself https://github.com/scala/scala-dev/blob/b11cd2e4a4431de7867db6b39362bea8fa6650e7/notes/releases/template.md)\n\nFor every release, use the `scripts/create-release-issue.sh` to make a copy of this file named after the release, and expand the variables.\n\nVariables to be expanded in this template:\n- $VERSION$=???\n\nKey links:\n  - akka/akka-persistence-jdbc milestone: https://github.com/akka/akka-peristence-jdbc/milestone/?\n-->\n\n### Cutting the release\n\n- [ ] Check that open PRs and issues assigned to the milestone are reasonable\n- [ ] Update the Change date and version in the LICENSE file.\n- [ ] Create a new milestone for the [next version](https://github.com/akka/akka-persistence-jdbc/milestones)\n- [ ] Close the [$VERSION$ milestone](https://github.com/akka/akka-persistence-jdbc/milestones?direction=asc&sort=due_date)\n- [ ] Make sure all important PRs have been merged\n- [ ] Update the revision in Fossa in the Akka Group for the Akka umbrella version, e.g. `22.10`. Note that the revisions for the release is udpated by Akka Group > Projects > Edit. For recent dependency updates the Fossa validation can be triggered from the GitHub actions \"Dependency License Scanning\".\n- [ ] Wait until [main build finished](https://github.com/akka/akka-persistence-jdbc/actions) after merging the latest PR\n- [ ] Update the [draft release](https://github.com/akka/akka-persistence-jdbc/releases) with the next tag version `v$VERSION$`, title and release description. Use the `Publish release` button, which will create the tag.\n- [ ] Check that GitHub Actions release build has executed successfully (GitHub Actions will start a [CI build](https://github.com/akka/akka-persistence-jdbc/actions) for the new tag and publish artifacts to https://repo.akka.io/maven)\n\n### Check availability\n\n- [ ] Check [API](https://doc.akka.io/api/akka-persistence-jdbc/$VERSION$/) documentation\n- [ ] Check [reference](https://doc.akka.io/libraries/akka-persistence-jdbc/$VERSION$/) documentation. Check that the reference docs were deployed and show a version warning (see section below on how to fix the version warning).\n- [ ] Check the release `mvn dependency:get -Dartifact=com.lightbend.akka:akka-persistence-jdbc_2.13:$VERSION$`\n\n### When everything is on https://repo.akka.io/maven\n  - [ ] Log into `gustav.akka.io` as `akkarepo` \n    - [ ] If this updates the `current` version, run `./update-akka-persistence-jdbc-current-version.sh $VERSION$`\n    - [ ] otherwise check changes and commit the new version to the local git repository\n         ```\n         cd ~/www\n         git status\n         git add libraries/akka-persistence-jdbc/current libraries/akka-persistence-jdbc/$VERSION$\n         git add api/akka-persistence-jdbc/current api/akka-persistence-jdbc/$VERSION$\n         git commit -m \"Akka Persistence JDBC $VERSION$\"\n         ```\n\n### Announcements\n\nFor important patch releases, and only if critical issues have been fixed:\n\n- [ ] Send a release notification to [Lightbend discuss](https://discuss.akka.io)\n- [ ] Tweet using the [@akkateam](https://twitter.com/akkateam/) account (or ask someone to) about the new release\n- [ ] Announce internally (with links to Tweet, discuss)\n\nFor minor or major releases:\n\n- [ ] Include noteworthy features and improvements in Akka umbrella release announcement at akka.io. Coordinate with PM and marketing.\n\n### Afterwards\n\n- [ ] Update [akka-dependencies bom](https://github.com/lightbend/akka-dependencies) and version for [Akka module versions](https://doc.akka.io/libraries/akka-dependencies/current/) in [akka-dependencies repo](https://github.com/akka/akka-dependencies)\n- [ ] Update [Akka Guide samples](https://github.com/akka/akka-platform-guide)\n- Close this issue\n"
  },
  {
    "path": "docs/src/main/paradox/_template/projectSpecificFooter.st",
    "content": "<script type=\"text/javascript\" src=\"$page.base$assets/js/warnOldVersion.js\"></script>\n<script type=\"text/javascript\">//<![CDATA[\njQuery(function(jq){initOldVersionWarnings(jq, '$page.properties.(\"project.version\")$', '$page.properties.(\"project.url\")$')});\n//]]></script>\n"
  },
  {
    "path": "docs/src/main/paradox/assets/js/warnOldVersion.js",
    "content": "function initOldVersionWarnings($, thisVersion, projectUrl) {\n    if (projectUrl && projectUrl !== \"\") {\n        var schemeLessUrl = projectUrl;\n        if (projectUrl.startsWith(\"http://\")) projectUrl = schemeLessUrl.substring(5);\n        else if (projectUrl.startsWith(\"https://\")) projectUrl = schemeLessUrl.substring(6);\n        const url = schemeLessUrl + (schemeLessUrl.endsWith(\"\\/\") ? \"\" : \"/\") + \"paradox.json\";\n        $.get(url, function (versionData) {\n            const currentVersion = versionData.version;\n            if (thisVersion !== currentVersion) {\n                showVersionWarning(thisVersion, currentVersion, projectUrl);\n            }\n        });\n    }\n}\n\nfunction showVersionWarning(thisVersion, currentVersion, projectUrl) {\n    $('#docs').prepend(\n        '<div class=\"callout warning\" style=\"margin-top: 16px\">' +\n        '<p><span style=\"font-weight: bold\">This documentation regards version ' + thisVersion + ', ' +\n        'however the current version is <a href=\"' + projectUrl + '\">' + currentVersion + '</a>.</span></p>' +\n        '</div>'\n    );\n}"
  },
  {
    "path": "docs/src/main/paradox/configuration.md",
    "content": "# Configuration\n\nThe plugin relies on @extref[Slick](slick:) to do create the SQL dialect for the database in use, therefore the following must be configured in `application.conf`\n\nConfigure `akka-persistence`:\n\n- instruct akka persistence to use the `jdbc-journal` plugin,\n- instruct akka persistence to use the `jdbc-snapshot-store` plugin,\n\nConfigure `slick`:\n\n- The following slick profiles are supported:\n  - `slick.jdbc.PostgresProfile$`\n  - `slick.jdbc.MySQLProfile$`\n  - `slick.jdbc.H2Profile$`\n  - `slick.jdbc.OracleProfile$`\n  - `slick.jdbc.SQLServerProfile$`\n\n## Database Schema\n\n- @extref:[Postgres Schema](github:/core/src/main/resources/schema/postgres/postgres-create-schema.sql)\n- @extref:[MySQL Schema](github:/core/src/main/resources/schema/mysql/mysql-create-schema.sql)\n- @extref:[H2 Schema](github:/core/src/main/resources/schema/h2/h2-create-schema.sql)\n- @extref:[Oracle Schema](github:/core/src/main/resources/schema/oracle/oracle-create-schema.sql)\n- @extref:[SQL Server Schema](github:/core/src/main/resources/schema/sqlserver/sqlserver-create-schema.sql)\n\n@@@ note\n\nPlease note that the H2 database is not recommended to be used as a production database, and support for H2 is primarily for testing purposes.\n\n@@@\n\nFor testing purposes the journal and snapshot tables can be created programmatically using the provided `SchemaUtils`.\n\n\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala) { #create }\n\nJava\n:  @@snip[snip](/core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java) { #create }\n\nA `dropIfExists` variant is also available.\n\n**Note**: `SchemaUtils` was introduced in version 5.0.0.\n\n\n## Reference Configuration\n\nakka-persistence-jdbc provides the defaults as part of the @extref:[reference.conf](github:/core/src/main/resources/reference.conf). This file documents all the values which can be configured.\n\nThere are several possible ways to configure loading your database connections. Options will be explained below.\n\n### One database connection pool per journal type\n\nThere is the possibility to create a separate database connection pool per journal-type (one pool for the write-journal,\none pool for the snapshot-journal, and one pool for the read-journal). This is the default and the following example\nconfiguration shows how this is configured:\n\nPostgres\n: @@snip[Postgres](/core/src/test/resources/postgres-application.conf)\n\nMySQL\n: @@snip[MySQL](/core/src/test/resources/mysql-application.conf)\n\nH2\n: @@snip[H2](/core/src/test/resources/h2-application.conf)\n\nOracle\n: @@snip[Oracle](/core/src/test/resources/oracle-application.conf)\n\nSQL Server\n: @@snip[SQL Server](/core/src/test/resources/sqlserver-application.conf)\n\n### Sharing the database connection pool between the journals\n\nIn order to create only one connection pool which is shared between all journals the following configuration can be used:\n\nPostgres\n: @@snip[Postgres](/core/src/test/resources/postgres-shared-db-application.conf)\n\nMySQL\n: @@snip[MySQL](/core/src/test/resources/mysql-shared-db-application.conf)\n\nH2\n: @@snip[H2](/core/src/test/resources/h2-shared-db-application.conf)\n\nOracle\n: @@snip[Oracle](/core/src/test/resources/oracle-shared-db-application.conf)\n\nSQL Server\n: @@snip[SQL Server](/core/src/test/resources/sqlserver-shared-db-application.conf)\n\n### Customized loading of the db connection\n\nIt is also possible to load a custom database connection. \nIn order to do so a custom implementation of @extref:[SlickDatabaseProvider](github:/core/src/main/scala/akka/persistence/jdbc/db/SlickExtension.scala)\nneeds to be created. The methods that need to be implemented supply the Slick `Database` and `Profile` to the journals.\n\nTo enable your custom `SlickDatabaseProvider`, the fully qualified class name of the `SlickDatabaseProvider`\nneeds to be configured in the application.conf. In addition, you might want to consider whether you want\nthe database to be closed automatically:\n\n```hocon\nakka-persistence-jdbc {\n  database-provider-fqcn = \"com.mypackage.CustomSlickDatabaseProvider\"\n}\njdbc-journal {\n  use-shared-db = \"enabled\" // setting this to any non-empty string prevents the journal from closing the database on shutdown\n}\njdbc-snapshot-store {\n  use-shared-db = \"enabled\" // setting this to any non-empty string prevents the snapshot-journal from closing the database on shutdown\n}\n```\n\n### DataSource lookup by JNDI name\n\nThe plugin uses `Slick` as the database access library. Slick @extref[supports jndi](slick:database.html#using-a-jndi-name) for looking up @javadoc[DataSource](javax.sql.DataSource)s.\n\nTo enable the JNDI lookup, you must add the following to your application.conf:\n\n```hocon\njdbc-journal {\n  slick {\n    profile = \"slick.jdbc.PostgresProfile$\"\n    jndiName = \"java:jboss/datasources/PostgresDS\"\n  }\n}\n```\n\nWhen using the `use-shared-db = slick` setting, the follow configuration can serve as an example:\n\n```hocon\nakka-persistence-jdbc {\n  shared-databases {\n    slick {\n      profile = \"slick.jdbc.PostgresProfile$\"\n      jndiName = \"java:/jboss/datasources/bla\"\n    }\n  }\n}\n```\n\n## Explicitly shutting down the database connections\n\nThe plugin automatically shuts down the HikariCP connection pool when the ActorSystem is terminated.\nThis is done using @apidoc[ActorSystem.registerOnTermination](ActorSystem).\n\n## Tuning for Lower Latency\n\nThe `jdbc-read-journal.journal-sequence-retrieval.query-delay` configuration controls how often the actor queries for new data. The default is `1s`, but this can be set lower for latency-sensitive applications to reduce the time between data retrievals.\n\nSimilarly, `jdbc-read-journal.refresh-interval` dictates how often the system polls for new events when idle, also defaulting to `1s`. In mostly idle applications that still require low latencies, it is important to adjust both `query-delay` and `refresh-interval` to achieve optimal performance. Lowering just one of these values might not be sufficient for reducing latency.\n\nAs with any performance tuning, it’s important to test these settings in your environment to find the right balance. Reducing these intervals will increase the load on your database, as each node in the cluster will be querying the event journal more frequently.\n"
  },
  {
    "path": "docs/src/main/paradox/custom-dao.md",
    "content": "# Custom DAO Implementation\n\nThe plugin supports loading a custom DAO for the journal and snapshot. You should implement a custom Data Access Object (DAO) if you wish to alter the default persistency strategy in\nany way, but wish to reuse all the logic that the plugin already has in place, eg. the Akka Persistence Query API. For example, the default persistency strategy that the plugin\nsupports serializes journal and snapshot messages using a serializer of your choice and stores them as byte arrays in the database.\n\nBy means of configuration in `application.conf` a DAO can be configured, below the default DAOs are shown:\n\n```hocon\njdbc-journal {\n  dao = \"akka.persistence.jdbc.journal.dao.DefaultJournalDao\"\n}\n\njdbc-snapshot-store {\n  dao = \"akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao\"\n}\n\njdbc-read-journal {\n  dao = \"akka.persistence.jdbc.query.dao.DefaultReadJournalDao\"\n}\n```\n\nStoring messages as byte arrays in blobs is not the only way to store information in a database. For example, you could store messages with full type information as a normal database rows, each event type having its own table.\nFor example, implementing a Journal Log table that stores all persistenceId, sequenceNumber and event type discriminator field, and storing the event data in another table with full typing\n\nYou only have to implement two interfaces `akka.persistence.jdbc.journal.dao.JournalDao` and/or `akka.persistence.jdbc.snapshot.dao.SnapshotDao`. \n\nFor example, take a look at the following two custom DAOs:\n\n```scala\nclass MyCustomJournalDao(db: Database, val profile: JdbcProfile, journalConfig: JournalConfig, serialization: Serialization)(implicit ec: ExecutionContext, mat: Materializer) extends JournalDao {\n    // snip\n}\n\nclass MyCustomSnapshotDao(db: JdbcBackend#Database, val profile: JdbcProfile, snapshotConfig: SnapshotConfig, serialization: Serialization)(implicit ec: ExecutionContext, val mat: Materializer) extends SnapshotDao {\n    // snip\n}\n```\n\nAs you can see, the custom DAOs get a _Slick database_, a _Slick profile_, the journal or snapshot _configuration_, an _akka.serialization.Serialization_, an _ExecutionContext_ and _Materializer_ injected after constructed.\nYou should register the Fully Qualified Class Name in `application.conf` so that the custom DAOs will be used.\n\nFor more information please review the two default implementations `akka.persistence.jdbc.dao.bytea.journal.ByteArrayJournalDao` and `akka.persistence.jdbc.dao.bytea.snapshot.ByteArraySnapshotDao` or the demo custom DAO example from the [demo-akka-persistence](https://github.com/dnvriend/demo-akka-persistence-jdbc) site.\n\n@@@warning { title=\"Binary compatibility\" }\n\nThe APIs for custom DAOs are not guaranteed to be binary backwards compatible between major versions of the plugin.\nFor example 4.0.0 is not binary backwards compatible with 3.5.x. There may also be source incompatible changes of\nthe APIs for customer DAOs if new capabilities must be added to to the traits.\n\n@@@\n\n"
  },
  {
    "path": "docs/src/main/paradox/durable-state-store.md",
    "content": "# DurableStateStore\n## How to get the DurableStateStore\n\nThe `DurableStateStore` for JDBC plugin is obtained through the `DurableStateStoreRegistry` extension.\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #jdbc-durable-state-store }\n\nJava\n: @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #jdbc-durable-state-store }\n\n## APIs supported by DurableStateStore\n\nThe plugin supports the following APIs:\n\n### getObject\n\n`getObject(persistenceId)` returns `GetObjectResult(value, revision)`, where `value` is an `Option` (`Optional` in Java)\nand is set to the value of the object if it exists with the passed in `persistenceId`. Otherwise `value` is empty.\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #get-object }\n\nJava\n: @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #get-object }\n\n### upsertObject\n\n`upsertObject(persistenceId, revision, value, tag)` inserts the record if the `persistenceId` does not exist in the \ndatabase. Or else it updates the record with the latest revision passed as `revision`. The update succeeds only if the\nincoming `revision` is 1 more than the already existing one. This snippet is an example of a sequnece of `upsertObject`\nand `getObject`.\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #upsert-get-object }\n\nJava\n: @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #upsert-get-object }\n\n### deleteObject\n\n`deleteObject(persistenceId)` deletes the record with the input `persistenceId`.\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #delete-object }\n\nJava\n: @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #delete-object }\n\n### currentChanges\n\n`currentChanges(tag, offset)` gets a source of the most recent changes made to objects with the given `tag` since \nthe passed in `offset`. This api returns changes that occurred up to when the `Source` returned by this call is materialized.\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #current-changes }\n\nJava\n: @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #current-changes }\n\n### changes\n\n`changes(tag, offset)` gets a source of the most recent changes made to objects with the given `tag` since \nthe passed in `offset`. The returned source will never terminate, it effectively watches for changes to the objects \nand emits changes as they happen.\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala) { #changes }\n\nJava\n: @@snip[snip](/core/src/test/java/akka/persistence/jdbc/state/JavadslSnippets.java) { #changes }\n\n"
  },
  {
    "path": "docs/src/main/paradox/index.md",
    "content": "# Akka Persistence JDBC\n\nThe Akka Persistence JDBC plugin allows for using JDBC-compliant databases as backend for @extref:[Akka Persistence](akka:persistence.html) and @extref:[Akka Persistence Query](akka:persistence-query.html).\n\n@@toc { depth=2 }\n\n@@@ index\n\n* [Overview](overview.md)\n* [Configuration](configuration.md)\n* [Migration](migration.md)\n* [Query](query.md)\n* [Custom DAO](custom-dao.md)\n* [Snapshots](snapshots.md)\n* [Durable State Store](durable-state-store.md)\n\n@@@\n"
  },
  {
    "path": "docs/src/main/paradox/migration.md",
    "content": "# Migration\n\n## Migrating to version 5.4.0\n\nRelease `5.4.0` change the schema of `event_tag` table.\n\nThe previous version was using an auto-increment column as a primary key and foreign key on the `event_tag` table. As a result, the insert of multiple events in batch was not performant.\n\nWhile in `5.4.0`, the primary key and foreign key on the `event_tag` table have been replaced with a primary key from the `event_journal` table. In order to migrate to the new schema, we made a [**migration script**](https://github.com/akka/akka-persistence-jdbc/tree/master/core/src/main/resources/schema) which is capable of creating the new column, migrate the rows and add the new constraints.\n\nBy default, the plugin will behave as in previous version. However, it's required to add two new columns to the `event_tag` table. Before upgrading to `5.4.0`, make sure that you apply at least the first step of the [**migration script**](https://github.com/akka/akka-persistence-jdbc/tree/master/core/src/main/resources/schema).\n\nIf you want to use the new `event_tag` keys, you need to run a multiple-phase rollout:\n\n1. apply the first step of the migration script (as mentioned above) and then redeploy your application with the default settings after upgrading to version `5.4.0`.\n2. apply the second step of the migration script that will migrate the rows and adapt the constraints.\n3. redeploy the application by disabling the legacy-mode:\n\n```config\njdbc-journal {\n  tables {\n    // ...\n    event_tag {\n      // ...\n      // enable the new tag key\n      legacy-tag-key = false\n    } \n  }\n}\n// or simply configue via flatting style\njdbc-journal.tables.event_tag.legacy-tag-key = false\n```\n\n\n## Migrating to version 5.2.0\n\n**Release `5.2.0` updates H2 to version 2.1.214 which is not compatible to the previous 1.4.200***\n\nH2 has undergone considerable changes that broke backwards compatibility to make H2 SQL Standard compliant.\nFor migration please refer to the H2 [migration guide](https://www.h2database.com/html/migration-to-v2.html)\n\n\n## Migrating to version 5.0.0\n\n**Release `5.0.0` introduces a new schema and serialization that is not compatible with older versions.** \n\nThe previous version was wrapping the event payload with Akka's `PersistentRepr`, while in 5.0.0 the serialized event payload is persisted directly into the column. In order to migrate to the new schema, a migration tool capable of reading the serialized representation of `PersistentRepr` is required. That [tool doesn't exist yet](https://github.com/akka/akka-persistence-jdbc/issues/317), therefore, the new schema can only be used with new applications.\n\nIf you have existing data override the DAO to continue using the old schema:\n\n```hocon\n# Use the DAOs for the legacy (pre 5.0) database schema\n\njdbc-journal {\n  dao = \"akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao\"\n}\n\njdbc-snapshot-store {\n  dao = \"akka.persistence.jdbc.snapshot.dao.legacy.ByteArraySnapshotDao\"\n}\n\njdbc-read-journal {\n  dao = \"akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao\"\n}\n```\n\nIf you have re-configured the `schemaName`, `tableName` and `columnNames` through configuration settings then you will need to move them to a new key.\n\n* key `jdbc-journal.tables.journal` becomes `jdbc-journal.tables.legacy_journal`\n* key `jdbc-snapshot-store.tables.snapshot` becomes `jdbc-snapshot-store.tables.legacy_snapshot`\n* key `jdbc-read-journal.tables.journal` becomes `jdbc-read-journal.tables.legacy_journal`\n"
  },
  {
    "path": "docs/src/main/paradox/overview.md",
    "content": "# Overview\n\nThe Akka Persistence JDBC plugin allows for using JDBC-compliant databases as backend for @extref:[Akka Persistence](akka:persistence.html) and @extref:[Akka Persistence Query](akka:persistence-query.html).\n\nakka-persistence-jdbc writes journal and snapshot entries to a configured JDBC store. It implements the full akka-persistence-query API and is therefore very useful for implementing DDD-style application models using Akka and Scala for creating reactive applications.\n\nAkka Persistence JDBC requires Akka $akka.version$ or later. It uses @extref:[Slick](slick:) $slick.version$ internally to access the database via JDBC, this does not require user code to make use of Slick.\n\n## Version history\n\n| Description                                                                                       | Version                                                                     | Akka version |\n|---------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------|--------------|\n| Required database schema migration, see @ref:[Migration](migration.md#migrating-to-version-5-4-0) | [5.4.0](https://github.com/akka/akka-persistence-jdbc/releases/tag/v5.4.0)  | Akka 2.6.+ |\n| New database schema, see @ref:[Migration](migration.md#migrating-to-version-5-0-0)                | [5.0.0](https://github.com/akka/akka-persistence-jdbc/releases/tag/v5.0.0)  | Akka 2.6.+ |\n| First release within the Akka organization                                                        | [4.0.0](https://github.com/akka/akka-persistence-jdbc/releases/tag/v4.0.0)  | Akka 2.6.+ |\n| Requires Akka 2.5.0                                                                               | [3.5.3+](https://github.com/akka/akka-persistence-jdbc/releases/tag/v3.5.3) | Akka 2.5.23+ or 2.6.x |\n\nSee the full release history at [GitHub releases](https://github.com/akka/akka-persistence-jdbc/releases).\n\n## Module info\n\nThe Akka dependencies are available from Akka's library repository. To access them there, you need to configure the URL for this repository.\n\n@@repository [sbt,Maven,Gradle] {\nid=\"akka-repository\"\nname=\"Akka library repository\"\nurl=\"https://repo.akka.io/maven\"\n}\n\nAdditionally, add the dependencies as below.\n\n@@dependency [sbt,Maven,Gradle] {\n  group=com.lightbend.akka\n  artifact=akka-persistence-jdbc_$scala.binary.version$\n  version=$project.version$\n  symbol2=AkkaVersion\n  value2=$akka.version$\n  group2=com.typesafe.akka\n  artifact2=akka-persistence-query_$scala.binary.version$\n  version2=AkkaVersion\n  symbol3=SlickVersion\n  value3=$slick.version$\n  group3=com.typesafe.slick\n  artifact3=slick_$scala.binary.version$\n  version3=SlickVersion\n  group4=com.typesafe.slick\n  artifact4=slick-hikaricp_$scala.binary.version$\n  version4=SlickVersion\n}\n\n@@project-info{ projectId=\"core\" }\n\n## Contribution policy\n\nContributions via GitHub pull requests are gladly accepted from their original author. Along with any pull requests, please state that the contribution is your original work and that you license the work to the project under the project's open source license. Whether or not you state this explicitly, by submitting any copyrighted material via pull request, email, or other means you agree to license the material under the project's open source license and warrant that you have the legal authority to do so.\n\n## Code of Conduct\n\nContributors all agree to follow the [Lightbend Community Code of Conduct](https://www.lightbend.com/conduct).\n\n## License\n\nThis source code is made available under the [Business Source License 1.1](https://raw.githubusercontent.com/akka/akka-persistence-jdbc/master/LICENSE)\n"
  },
  {
    "path": "docs/src/main/paradox/query.md",
    "content": "# Persistence Query\n\n## How to get the ReadJournal\n\nThe `ReadJournal` is retrieved via the `akka.persistence.query.PersistenceQuery` extension:\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala) { #read-journal }\n\nJava\n: @@snip[snip](/core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java) { #read-journal }\n\n## Persistence Query Plugin\n\nThe plugin supports the following queries:\n\n## AllPersistenceIdsQuery and CurrentPersistenceIdsQuery\n\n`allPersistenceIds` and `currentPersistenceIds` are used for retrieving all persistenceIds of all persistent actors.\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala) { #persistence-ids }\n\nJava\n:  @@snip[snip](/core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java) { #persistence-ids }\n\nThe returned event stream is unordered and you can expect different order for multiple executions of the query.\n\nWhen using the `persistenceIds` query, the stream is not completed when it reaches the end of the currently used persistenceIds,\nbut it continues to push new persistenceIds when new persistent actors are created.\n\nWhen using the `currentPersistenceIds` query, the stream is completed when the end of the current list of persistenceIds is reached,\nthus it is not a `live` query.\n\nThe stream is completed with failure if there is a failure in executing the query in the backend journal.\n\n## EventsByPersistenceIdQuery and CurrentEventsByPersistenceIdQuery\n\n`eventsByPersistenceId` and `currentEventsByPersistenceId` is used for retrieving events for\na specific PersistentActor identified by persistenceId.\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala) { #events-by-persistence-id }\n\nJava\n:  @@snip[snip](/core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java) { #events-by-persistence-id }\n\nYou can retrieve a subset of all events by specifying `fromSequenceNr` and `toSequenceNr` or use `0L` and `Long.MaxValue` respectively to retrieve all events. Note that the corresponding sequence number of each event is provided in the `EventEnvelope`, which makes it possible to resume the stream at a later point from a given sequence number.\n\nThe returned event stream is ordered by sequence number, i.e. the same order as the PersistentActor persisted the events. The same prefix of stream elements (in same order) are returned for multiple executions of the query, except for when events have been deleted.\n\nThe stream is completed with failure if there is a failure in executing the query in the backend journal.\n\n## EventsByTag and CurrentEventsByTag\n\n`eventsByTag` and `currentEventsByTag` are used for retrieving events that were marked with a given\n`tag`, e.g. all domain events of an Aggregate Root type.\n\nScala\n:  @@snip[snip](/core/src/test/scala/akka/persistence/jdbc/ScaladslSnippets.scala) { #events-by-tag }\n\nJava\n:  @@snip[snip](/core/src/test/java/akka/persistence/jdbc/JavadslSnippets.java) { #events-by-tag }\n\n### Performance\n\nIf you see slow database queries for `eventsByTag`, please consider adding a dedicated index for the `tag` column in the `event_tag` table.\n\nFor postgres, the following index can be used:\n\n```\nCREATE INDEX CONCURRENTLY event_tag_tag_idx ON public.event_tag (tag);\n```"
  },
  {
    "path": "docs/src/main/paradox/snapshots.md",
    "content": "---\nproject.description: Snapshot builds via the Sonatype snapshot repository.\n---\n# Snapshots\n\nSnapshots are published to https://repo.akka.io/snapshots repository after every successful build on master.\nAdd the following to your project build definition to resolve Akka Persistence JDBC's snapshots:\n\n## Configure repository\n\nMaven\n:   ```xml\n    <project>\n    ...\n        <repositories>\n          <repositories>\n            <repository>\n              <id>akka-repository</id>\n              <name>Akka library snapshot repository</name>\n              <url>https://repo.akka.io/snapshots</url>\n            </repository>\n          </repositories>\n        </repositories>\n    ...\n    </project>\n    ```\n\nsbt\n:   ```scala\n    resolvers += \"Akka library snapshot repository\".at(\"https://repo.akka.io/snapshots\")\n    ```\n\nGradle\n:   ```gradle\n    repositories {\n      maven {\n        url  \"https://repo.akka.io/snapshots\"\n      }\n    }\n    ```\n\n## Documentation\n\nThe [snapshot documentation](https://doc.akka.io/libraries/akka-persistence-jdbc/snapshot) is updated with every snapshot build.\n\n"
  },
  {
    "path": "integration/LICENSE",
    "content": "﻿LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT\r\n\r\nTHIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS \"AGREEMENT\") IS A LEGAL AGREEMENT BETWEEN YOU (\"USER\") AND LIGHTBEND, INC. (\"LICENSOR\"). \r\nBY CLICKING THE \"I ACCEPT\" BUTTON, OR INSTALLING, COPYING OR OTHERWISE USING LIGHTBEND’S SOFTWARE (THE “SOFTWARE”) AND ITS ASSOCIATED USER DOCUMENTATION, USER ACKNOWLEDGES THAT USER HAS REVIEWED AND ACCEPTS THIS AGREEMENT AND AGREES TO BE BOUND BY ALL OF ITS TERMS. \r\nIF YOU ARE AGREEING TO THIS AGREEMENT AS AN INDIVIDUAL, “USER” REFERS TO YOU INDIVIDUALLY.  IF YOU ARE AGREEING TO THIS AGREEMENT AS A REPRESENTATIVE OF AN ENTITY, YOU REPRESENT THAT YOU HAVE THE AUTHORITY TO BIND THAT ENTITY AND “USER” REFERS TO THAT ENTITY AND ALL THE USERS ACCESSING THE SOFTWARE BY, THROUGH OR ON BEHALF OF THAT ENTITY.  \r\nIF USER DOES NOT AGREE WITH ALL OF THE TERMS OF THIS AGREEMENT, DO NOT INSTALL, COPY OR OTHERWISE USE THE SOFTWARE OR ITS DOCUMENTATION. \r\n\r\n1. DEFINITIONS. \r\n   1. “User System” means User’s website(s), computers, servers and other equipment and software upon and with which the Software is run.\r\n   2. “Documentation” means the user instructions and help files made available by Licensor for use with the Software, as may be updated from time to time by Licensor.\r\n   3. “Intellectual Property Rights” means all intellectual property rights or similar proprietary rights, including \r\n\t(a) patent rights and utility models, \r\n\t(b) copyrights and database rights, \r\n\t(c) trademarks, trade names, domain names and trade dress and the goodwill associated therewith, \r\n\t(d) trade secrets, \r\n\t(e) mask works, and \r\n\t(f) industrial design rights; in each case, including any registrations of, applications to register, and renewals and extensions of, any of the foregoing in any jurisdiction in the world.\r\n   4. “Open Source Software” means all software that is available under the GNU Affero General Public License (AGPL), GNU General Public License (GPL), GNU Lesser General Public License (LGPL), Mozilla Public License (MPL), Apache License, BSD licenses, or any other license that is approved by or similar to those approved by the Open Source Initiative (www.opensource.org).\r\n\r\n2. LICENSES AND RESTRICTIONS.  \r\n   1. License.  Subject to User’s compliance with the terms and conditions of this Agreement, Licensor hereby grants to User, during the term of this Agreement, a limited, non-exclusive, non-transferable and non-sublicensable right to \r\n\t(i) install and execute one (1) copy of the Software in accordance with the Documentation, solely in binary form, and not for the benefit of any other person or entity, and \r\n\t(ii) access and use the Documentation, solely for User’s own internally purposes in support of End User’s permitted use of the Software.  \r\n   2. Restrictions.  User shall not, directly or indirectly, or permit any User or third party to: \r\n\t(a) reverse engineer, decompile, disassemble or otherwise attempt to discover the source code or underlying ideas or algorithms of the Software;  \r\n\t(b) modify, translate, or create derivative works based on any element of the Software or any related Documentation (except to the extent applicable laws specifically prohibit such restriction for interoperability purposes, in which case you agree to first contact Licensor and provide Licensor an opportunity to create such changes as are needed for interoperability purposes); \r\n\t(c) use, rent, lease, distribute, sell, resell, assign, or otherwise transfer the Software or any copy thereof; \r\n\t(d) use the Software for timesharing purposes or otherwise for the benefit of any person or entity other than for the benefit of User and Users; \r\n\t(e) remove any proprietary notices from the Software or the Documentation or attempt to defeat any copy protection device included with the Software; or \r\n\t(f) use the Software for any purpose other than its intended purpose.\r\n   3. Reservation of Rights.  Nothing in this Agreement shall be construed to give User a right to use or otherwise obtain access to any source code from which the Software is compiled or interpreted.  Except as expressly granted in this Agreement, there are no other licenses granted to User, express, implied or by way of estoppel.  All rights not granted in this Agreement are reserved by Licensor.\r\n   4. Open Source Software.  Notwithstanding the foregoing, certain items of software included with the Software are Open Source Software and remains subject Open Source Software licenses.  Such Open Source Software is not subject to the terms and conditions of this Agreement.  \r\nInstead, each such item of Open Source Software is licensed under the terms of the end user license that accompanies such Open Source Software and nothing in this Agreement limits your rights under, or grants you rights that supersede, the terms and conditions of any applicable end user license for such Open Source Software.  If required by any license for particular Open Source Software, Licensor makes such Open Source Software, and any Licensor modifications to that Open Source Software, available as further described in the Documentation.\r\nUSE OF THE SOFTWARE IN ANY MANNER OTHER THAN AS PROVIDED IN THIS AGREEMENT IS STRICTLY PROHIBITED AND MAY INFRINGE ON THE INTELLECTUAL PROPERTY RIGHTS OF LICENSOR AND/OR ITS LICENSOR(S), SUBJECTING USER TO CIVIL AND CRIMINAL PENALTIES, INCLUDING WITHOUT LIMITATION MONETARY DAMAGES AND IMPRISONMENT FOR COPYRIGHT INFRINGEMENT.\r\n\r\n3. USER OBLIGATIONS.\r\n   1. User System.  User is responsible for \r\n\t(a) obtaining, deploying and maintaining the User System, and all computer hardware, software, modems, routers and other communications equipment necessary for User and its Users to install and use the Software; and \r\n\t(b) paying all third party fees and access charges incurred in connection with the foregoing.  Licensor shall not be responsible for supplying any hardware, software or other equipment to User under this Agreement.\r\n   2. Compliance with Laws.  User agrees to use the Software in compliance with all applicable laws, including local laws of the country or region in which User resides, and in compliance with all United States export laws and regulations.  User shall not use the Software for any purpose prohibited by applicable law.  \r\n   3. Trademarks and Tradenames.  With regard to all copies of the Software permitted herein, User shall reproduce on such copies all Licensor copyright notices, and other proprietary notices appearing on and in the original copy of the software received from Licensor. Except as set forth in the foregoing sentence, User will not, during the term of this Agreement or thereafter, use any trademark of Licensor, or any word and/or symbol likely to be confused with any Licensor trademark, either alone or in any combination with other words and/or symbols.\r\n\r\n4. SUPPORT AND MAINTENANCE.\r\n   1. Support.  Licensor is not responsible for maintenance or support of the Software, or the equipment on which the Software resides or is used, under this Agreement. By accepting the license granted under this Agreement, User agrees that Licensor will be under no obligation to provide any support, maintenance or service in connection with the Software or such equipment.  \r\n   2. Upgrades and Updates.  Licensor may from time to time in its sole discretion develop and provide updates for the Software, which may include upgrades, bug fixes, patches, other error corrections, and/or new features (collectively, including related documentation, “Updates”). Updates may also modify or delete in their entirety certain features and functionality. You agree that Licensor has no obligation to provide any Updates or to continue to provide or enable any particular features or functionality. \r\n\r\n5. REPRESENTATIONS AND WARRANTIES; DISCLAIMER.\r\n   1. Mutual Representations and Warranties.  Each party represents, warrants and covenants that: \r\n\t(a) it has the full power and authority to enter into this Agreement and to perform its obligations hereunder, without the need for any consents, approvals or immunities not yet obtained; and \r\n\t(b) its acceptance of and performance under this Agreement shall not breach any oral or written agreement with any third party or any obligation owed by it to any third party to keep any information or materials in confidence or in trust. \r\n   2. Disclaimer.  EXCEPT FOR THE WARRANTIES SET FORTH IN THIS SECTION 5, THE SOFTWARE AND THE DOCUMENTATION ARE PROVIDED ON AN AS-IS BASIS.  USER’S USE OF THE SOFTWARE AND THE DOCUMENTATION IS AT USER’S OWN RISK.  LICENSOR DOES NOT MAKE, AND HEREBY DISCLAIMS, ANY AND ALL OTHER EXPRESS, STATUTORY AND IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE, QUALITY, SUITABILITY, OPERABILITY, CONDITION, SYSTEM INTEGRATION, NON-INTERFERENCE, WORKMANSHIP, TRUTH, ACCURACY (OF DATA OR ANY OTHER INFORMATION OR CONTENT), ABSENCE OF DEFECTS, WHETHER LATENT OR PATENT, AND ANY WARRANTIES ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE.  LICENSOR ALSO DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN, PERFORMED AND/OR PROVIDED BY THE SOFTWARE WILL MEET USER’S REQUIREMENTS, THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, THAT THE SOFTWARE WILL BE COMPATIBLE OR WORK WITH ANY THIRD-PARTY SOFTWARE, APPLICATIONS OR DEVICES, OR THAT DEFECTS IN THE SOFTWARE WILL BE CORRECTED.  USER EXPRESSLY ACKNOWLEDGES AND AGREES THAT, TO THE EXTENT PERMITTED BY APPLICABLE LAW, ITS USE OF THE SOFTWARE IS AT ITS SOLE RISK AND THAT THE ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT IS WITH USER. USER FURTHER ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS NOT INTENDED OR SUITABLE FOR USE IN SITUATIONS OR ENVIRONMENTS WHERE THE FAILURE OR TIME DELAYS OF, OR ERRORS OR INACCURACIES IN THE CONTENT, DATA OR INFORMATION PROVIDED BY THE SOFTWARE COULD LEAD TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. ANY WARRANTIES MADE BY LICENSOR ARE FOR THE BENEFIT OF USER ONLY AND NOT FOR THE BENEFIT OF ANY THIRD PARTY.  THE SOFTWARE AND THE DOCUMENTATION ARE LICENSED AND NOT SOLD. NO AGENT OF LICENSOR IS AUTHORIZED TO ALTER OR EXPAND THE WARRANTIES OF LICENSOR AS SET FORTH HEREIN.  \r\n\r\n6. INDEMNIFICATION. User shall defend Licensor and its licensors and their respective officers, directors and employees (“Licensor Indemnified Parties”) from and against any and all Third-Party Claims which arise out of or relate to: \r\n\t(a) User’s use or alleged use of the Software other than as permitted under this Agreement; or \r\n\t(b) arising out of or relating to any violation of Section 2.2, or any violation of applicable laws.  User shall pay all damages, costs and expenses, including attorneys’ fees and costs (whether by settlement or award of by a final judicial judgment) incurred by the Licensor Indemnified Parties from any such Third-Party Claim.  In no event shall Licensor settle any claim without User’s prior written approval.  Licensor may, at its own expense, engage separate counsel to advise Licensor regarding a Third-Party Claim and to participate in the defense of the claim, subject to User’s right to control the defense and settlement. If you are a New Jersey resident, this indemnification clause is to be only as broad and inclusive as is permitted by the law of the state of New Jersey.\r\n\r\n7. CONFIDENTIALITY. \r\n   1. Confidential Information. User acknowledges that the Software contains valuable proprietary information and trade secrets and that unauthorized or improper use of the Software will result in irreparable harm to Licensor for which monetary damages would be inadequate and for which Licensor may be entitled to immediate injunctive relief. Accordingly, you will maintain the confidentiality of the proprietary information and not sell, license, publish, display, distribute, disclose or otherwise make available such proprietary information to any third party, nor use such information except as authorized by this Agreement.\r\n   2. Injunctive Relief.  User agrees that any unauthorized disclosure of confidential information may cause immediate and irreparable injury to Licensor and that, in the event of such breach, Licensor will be entitled, in addition to any other available remedies, to seek immediate injunctive and other equitable relief, without bond and without the necessity of showing actual monetary damages.\r\n\r\n8. PROPRIETARY RIGHTS. \r\n   1. Licensor.  As between Licensor and User, all right, title and interest in the Software, the Documentation, and any other Licensor materials furnished or made available hereunder, and all modifications and enhancements thereof, and all suggestions, ideas and feedback proposed by User regarding the Software, including all copyright rights, patent rights and other Intellectual Property Rights in each of the foregoing, belong to and are retained solely by Licensor or Licensor’s licensors and providers, as applicable.  User hereby does and will irrevocably assign to Licensor all evaluations, ideas, feedback and suggestions made by User to Licensor regarding the Software or the Documentation (collectively, “Feedback”) and all Intellectual Property Rights in the Feedback.  \r\n\r\n9. LIMITATION OF LIABILITY.\r\n   1. No Consequential Damages.  NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR ANY DAMAGES FOR LOST DATA, BUSINESS INTERRUPTION, LOST PROFITS, LOST REVENUE OR LOST BUSINESS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT, EVEN IF LICENSOR OR ITS LICENSORS OR USER HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, INCLUDING WITHOUT LIMITATION, ANY SUCH DAMAGES ARISING OUT OF THE LICENSING, PROVISION OR USE OF THE SOFTWARE OR THE RESULTS OF THE USE OF THE SOFTWARE.  LICENSOR WILL NOT BE LIABLE FOR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES.\r\n   2. LIMITS ON LIABILITY.  NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR CUMULATIVE, AGGREGATE DAMAGES GREATER THAN FIVE HUNDRED DOLLARS (US $500).  \r\n   3. ESSENTIAL PURPOSE.  USER ACKNOWLEDGES THAT THE TERMS IN THIS SECTION 9 (LIMITATION OF LIABILITY) SHALL APPLY TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW AND SHALL APPLY EVEN IF AN EXCLUSIVE OR LIMITED REMEDY STATED HEREIN FAILS OF ITS ESSENTIAL PURPOSE. SOME JURISDICTIONS DO NOT ALLOW CERTAIN LIMITATIONS OF LIABILITY, SO SOME OR ALL OF THE ABOVE LIMITATIONS OF LIABILITY MAY NOT APPLY TO YOU.  IF YOU ARE A NEW JERSEY RESIDENT, THIS LIMITATION OF LIABILITY SECTION IS TO BE ONLY AS BROAD AND INCLUSIVE AS IS PERMITTED BY THE LAW OF THE STATE OF NEW JERSEY.\r\n\r\n10. TERM AND TERMINATION.  \r\n   1. Term.  This Agreement and User’s right to use the Software commences on earlier of the date that User: \r\n\t(a) installs the Software, \r\n\t(b) begins using the Software or \r\n\t(c) otherwise demonstrates assent to this Agreement.  \r\n\tUser’s right to use the Software shall continue until such time as this Agreement is terminated (the “Term”).  \r\n   2. Termination for Cause.  A party may terminate this Agreement, upon written notice to the other party in the event the other party files a petition for bankruptcy or has a petition for bankruptcy filed against it that is not dismissed within sixty (60) calendar days after filing  or admits its inability to pay its debts as they mature, makes an assignment for the benefit of its creditors or ceases to function as a going concern or to conduct its operations in the normal course of business and such termination shall occur immediately upon notice.  Licensor may terminate this Agreement at any time without notice if it ceases to support the Software, which Licensor may do in its sole discretion. In addition, this Agreement will terminate immediately and automatically without any notice if User breaches any of its terms and conditions.\r\n   3. Termination for Convenience.  Either party may terminate this Agreement for convenience on at least thirty (30) calendar days prior written notice to the other party.  User may also terminate this Agreement by ceasing all use of the Software.\r\n   4. Effects of Termination.  Upon expiration or termination of this Agreement, User’s shall cease all use of the Software and the Documentation and shall destroy all copies of the Software in User’s possession or control.\r\n   5. Survival.  This Section and Sections 1, 2.2 (Restrictions), 2.3 (Reservation of Rights), 2.4 (Open Source Software), 5.2 (Disclaimer), 6 (Indemnification), 7 (Confidentiality), 8 (Proprietary Rights), 9 (Limitation of Liability), 10.4 (Effects of Termination) and 11 (Miscellaneous) shall survive any termination or expiration of this Agreement. \r\n\r\n11. MISCELLANEOUS.\r\n   1. Notices.  Licensor may give notice to User by means of a general notice through electronic mail to User’s e-mail address, or by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service to User’s address on record with Licensor. User may give notice to Licensor by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service addressed to Licensor, Lightbend Inc., 580 California, #1231, San Francisco, CA 94104, Attention: User Support.  Notice shall be deemed to have been given upon receipt or, if earlier, two (2) business days after mailing, as applicable. All communications and notices to be made or given pursuant to this Agreement shall be in the English language.  \r\n   2. Governing Law.  This Agreement and the rights and obligations of the parties to and under this agreement shall be governed by and construed under the laws of the United States and the State of California as applied to agreements entered into and to be performed in such State without giving effect to conflicts of laws rules or principles.  The parties agree that the United Nations Convention on Contracts for the International Sale of Goods is specifically excluded from application to this Agreement and that the application of the Uniform Computer Information Transactions Act (UCITA) is specifically disclaimed.  Any dispute arising out of or in connection with this Agreement, including but not limited to any question regarding its existence, interpretation, validity, performance, or termination, or any dispute between the parties arising from the parties' relationship created by this Agreement, shall be referred to and finally resolved by arbitration administered by the American Arbitration Association under its rules.  The number of arbitrators shall be one (1).  The parties shall endeavor to agree upon the sole arbitrator and jointly nominate the arbitrator.  If the parties cannot agree upon the sole arbitrator within a time prescribed by AAA, the parties shall request the AAA to propose five (5) arbitrators and each party shall rank the proposed arbitrators.  The AAA shall appoint an arbitrator from the list of five (5), based upon the parties' rankings.  \r\n   3. U.S. Government Users.  If User is a Federal Government entity, Licensor provides the Software and the Documentation, including related software and technology, for ultimate Federal Government end use solely in accordance with the following:  Government technical data rights include only those rights customarily provided to the public with a commercial item or process and Government software rights related to the Software and the Documentation include only those rights customarily provided to the public, as defined in this Agreement.  The technical data rights and customary commercial software license is provided in accordance with FAR 12.211 (Technical Data) and FAR 12.212 (Software) and, for Department of Defense transactions, DFAR 252.227-7015 (Technical Data – Commercial Items) and DFAR 227.7202-3 (Rights in Commercial Computer Software or Computer Software Documentation).  If greater rights are needed, a mutually acceptable written addendum specifically conveying such rights must be included in this Agreement. \r\n   4. Export.  The Software utilizes software and technology that may be subject to United States and foreign export controls. User acknowledges and agrees that the Software shall not be used, and none of the underlying information, software, or technology may be transferred or otherwise exported or re-exported to countries as to which the United States maintains an embargo (collectively, “Embargoed Countries”), or to or by a national or resident thereof, or any person or entity on the U.S. Department of Treasury’s List of Specially Designated Nationals or the U.S. Department of Commerce’s Table of Denial Orders (collectively, “Designated Nationals”). The lists of Embargoed Countries and Designated Nationals are subject to change without notice. By using the Software, User represents and warrants that it is not located in, under the control of, or a national or resident of an Embargoed Country or Designated National. The Software may use encryption technology that is subject to licensing requirements under the U.S. Export Administration Regulations, 15 C.F.R. Parts 730-774 and Council Regulation (EC) No. 1334/2000. User agrees to comply strictly with all applicable export laws and assume sole responsibility for obtaining licenses to export or re-export as may be required. Licensor and its licensors make no representation that the Software is appropriate or available for use in other locations. By using the Software, User represents and warrants that it is not located in any such country or on any such list.\r\n   5. General.  User shall not assign its rights hereunder, or delegate the performance of any of its duties or obligations hereunder, whether by merger, acquisition, sale of assets, operation of law, or otherwise, without the prior written consent of Licensor.  Any purported assignment in violation of the preceding sentence is null and void.  Subject to the foregoing, this Agreement shall be binding upon, and inure to the benefit of, the successors and assigns of the parties thereto.  Except as otherwise specified in this Agreement, this Agreement may be amended or supplemented only by a writing that refers explicitly to this Agreement and that is signed on behalf of both parties.  No waiver will be implied from conduct or failure to enforce rights.  No waiver will be effective unless in a writing signed on behalf of the party against whom the waiver is asserted.  If any of this Agreement is found invalid or unenforceable that term will be enforced to the maximum extent permitted by law and the remainder of this Agreement will remain in full force.  \r\nNothing contained herein shall be construed as creating an agency, partnership, or other form of joint enterprise between the parties.  \r\nThis Agreement constitutes the entire agreement between the parties relating to this subject matter and supersedes all prior or simultaneous understandings, representations, discussions, negotiations, and agreements, whether written or oral.  \r\nNeither party shall be liable to the other party or any third party for failure or delay in performing its obligations under this Agreement when such failure or delay is due to any cause beyond the control of the party concerned, including, without limitation, force majeure, governmental orders or restrictions, fire, or flood, provided that upon cessation of such events such party shall thereupon promptly perform or complete the performance of its obligations hereunder."
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/AllPersistenceIdsTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n  AllPersistenceIdsTest,\n  MysqlCleaner,\n  OracleCleaner,\n  PostgresCleaner,\n  SqlServerCleaner\n}\n\nclass PostgresScalaAllPersistenceIdsTest extends AllPersistenceIdsTest(\"postgres-application.conf\") with PostgresCleaner\n\nclass MySQLScalaAllPersistenceIdsTest extends AllPersistenceIdsTest(\"mysql-application.conf\") with MysqlCleaner\n\nclass OracleScalaAllPersistenceIdsTest extends AllPersistenceIdsTest(\"oracle-application.conf\") with OracleCleaner\n\nclass SqlServerScalaAllPersistenceIdsTest\n    extends AllPersistenceIdsTest(\"sqlserver-application.conf\")\n    with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/CurrentEventsByPersistenceIdTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n  CurrentEventsByPersistenceIdTest,\n  MysqlCleaner,\n  OracleCleaner,\n  PostgresCleaner,\n  SqlServerCleaner\n}\n\n// Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config\n\nclass PostgresScalaCurrentEventsByPersistenceIdTest\n    extends CurrentEventsByPersistenceIdTest(\"postgres-shared-db-application.conf\")\n    with PostgresCleaner\n\nclass MySQLScalaCurrentEventsByPersistenceIdTest\n    extends CurrentEventsByPersistenceIdTest(\"mysql-shared-db-application.conf\")\n    with MysqlCleaner\n\nclass OracleScalaCurrentEventsByPersistenceIdTest\n    extends CurrentEventsByPersistenceIdTest(\"oracle-shared-db-application.conf\")\n    with OracleCleaner\n\nclass SqlServerScalaCurrentEventsByPersistenceIdTest\n    extends CurrentEventsByPersistenceIdTest(\"sqlserver-shared-db-application.conf\")\n    with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/CurrentEventsByTagTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n  CurrentEventsByTagTest,\n  MysqlCleaner,\n  OracleCleaner,\n  PostgresCleaner,\n  SqlServerCleaner\n}\n\n// Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config\n\nclass PostgresScalaCurrentEventsByTagTest\n    extends CurrentEventsByTagTest(\"postgres-shared-db-application.conf\")\n    with PostgresCleaner\n\nclass MySQLScalaCurrentEventsByTagTest\n    extends CurrentEventsByTagTest(\"mysql-shared-db-application.conf\")\n    with MysqlCleaner\n\nclass OracleScalaCurrentEventsByTagTest\n    extends CurrentEventsByTagTest(\"oracle-shared-db-application.conf\")\n    with OracleCleaner\n\nclass SqlServerScalaCurrentEventsByTagTest\n    extends CurrentEventsByTagTest(\"sqlserver-shared-db-application.conf\")\n    with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/CurrentPersistenceIdsTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n  CurrentPersistenceIdsTest,\n  MysqlCleaner,\n  OracleCleaner,\n  PostgresCleaner,\n  SqlServerCleaner\n}\n\n// Note: these tests use the shared-db configs, the test for all persistence ids use the regular db config\nclass PostgresScalaCurrentPersistenceIdsTest\n    extends CurrentPersistenceIdsTest(\"postgres-shared-db-application.conf\")\n    with PostgresCleaner\n\nclass MySQLScalaCurrentPersistenceIdsTest\n    extends CurrentPersistenceIdsTest(\"mysql-shared-db-application.conf\")\n    with MysqlCleaner\n\nclass OracleScalaCurrentPersistenceIdsTest\n    extends CurrentPersistenceIdsTest(\"oracle-shared-db-application.conf\")\n    with OracleCleaner\n\nclass SqlServerScalaCurrentPersistenceIdsTest\n    extends CurrentPersistenceIdsTest(\"sqlserver-application.conf\")\n    with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/EventAdapterTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{ EventAdapterTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner }\n\nclass PostgresScalaEventAdapterTest extends EventAdapterTest(\"postgres-application.conf\") with PostgresCleaner\n\nclass MySQLScalaEventAdapterTest extends EventAdapterTest(\"mysql-application.conf\") with MysqlCleaner\n\nclass OracleScalaEventAdapterTest extends EventAdapterTest(\"oracle-application.conf\") with OracleCleaner\n\nclass SqlServerScalaEventAdapterTest extends EventAdapterTest(\"sqlserver-application.conf\") with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/EventSourcedCleanupTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2024 Lightbend Inc. <https://www.lightbend.com>\n */\n\npackage akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.cleanup.scaladsl.EventSourcedCleanupTest\nimport akka.persistence.jdbc.query.{ MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner }\n\n// Note: these tests use the shared-db configs, the test for all (so not only current) events use the regular db config\n\nclass PostgresEventSourcedCleanupTest\n    extends EventSourcedCleanupTest(\"postgres-shared-db-application.conf\")\n    with PostgresCleaner\n\nclass MySQLEventSourcedCleanupTest extends EventSourcedCleanupTest(\"mysql-shared-db-application.conf\") with MysqlCleaner\n\nclass OracleEventSourcedCleanupTest\n    extends EventSourcedCleanupTest(\"oracle-shared-db-application.conf\")\n    with OracleCleaner\n\nclass SqlServerEventSourcedCleanupTest\n    extends EventSourcedCleanupTest(\"sqlserver-shared-db-application.conf\")\n    with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/EventsByPersistenceIdTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n  EventsByPersistenceIdTest,\n  MysqlCleaner,\n  OracleCleaner,\n  PostgresCleaner,\n  SqlServerCleaner\n}\n\nclass PostgresScalaEventsByPersistenceIdTest\n    extends EventsByPersistenceIdTest(\"postgres-application.conf\")\n    with PostgresCleaner\n\nclass MySQLScalaEventsByPersistenceIdTest extends EventsByPersistenceIdTest(\"mysql-application.conf\") with MysqlCleaner\n\nclass OracleScalaEventsByPersistenceIdTest\n    extends EventsByPersistenceIdTest(\"oracle-application.conf\")\n    with OracleCleaner\n\nclass SqlServerScalaEventsByPersistenceIdTest\n    extends EventsByPersistenceIdTest(\"sqlserver-application.conf\")\n    with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/EventsByTagMigrationTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2024 Lightbend Inc. <https://www.lightbend.com>\n */\n\npackage akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n  EventsByTagMigrationTest,\n  MysqlCleaner,\n  OracleCleaner,\n  PostgresCleaner,\n  SqlServerCleaner\n}\n\nclass PostgresScalaEventsByTagMigrationTest\n    extends EventsByTagMigrationTest(\"postgres-application.conf\")\n    with PostgresCleaner {}\n\nclass MySQLScalaEventByTagMigrationTest extends EventsByTagMigrationTest(\"mysql-application.conf\") with MysqlCleaner {\n\n  override def dropLegacyFKConstraint(): Unit =\n    dropConstraint(constraintType = \"FOREIGN KEY\", constraintDialect = \"FOREIGN KEY\")\n\n  override def dropLegacyPKConstraint(): Unit =\n    dropConstraint(constraintType = \"PRIMARY KEY\", constraintDialect = \"\", constraintNameDialect = \"KEY\")\n\n  override def addNewPKConstraint(): Unit =\n    addPKConstraint(constraintNameDialect = \"\")\n\n  override def addNewFKConstraint(): Unit =\n    addFKConstraint()\n\n  override def migrateLegacyRows(): Unit =\n    fillNewColumn(\n      joinDialect = joinSQL,\n      pidSetDialect =\n        s\"${tagTableCfg.tableName}.${tagTableCfg.columnNames.persistenceId} = ${journalTableName}.${journalTableCfg.columnNames.persistenceId}\",\n      seqNrSetDialect =\n        s\"${tagTableCfg.tableName}.${tagTableCfg.columnNames.sequenceNumber} = ${journalTableName}.${journalTableCfg.columnNames.sequenceNumber}\")\n}\n\nclass OracleScalaEventByTagMigrationTest\n    extends EventsByTagMigrationTest(\"oracle-application.conf\")\n    with OracleCleaner {\n\n  override def addNewColumn(): Unit = {\n    // mock event_id not null, in order to change it to null later\n    alterColumn(alterDialect = \"MODIFY\", changeToDialect = \"NOT NULL\")\n  }\n\n  override def dropLegacyFKConstraint(): Unit =\n    dropConstraint(constraintTableName = \"USER_CONSTRAINTS\", constraintType = \"R\")\n\n  override def dropLegacyPKConstraint(): Unit =\n    dropConstraint(constraintTableName = \"USER_CONSTRAINTS\", constraintType = \"P\")\n\n  override def migrateLegacyRows(): Unit =\n    withStatement { stmt =>\n      stmt.execute(s\"\"\"UPDATE ${tagTableCfg.tableName}\n                       |SET (${tagTableCfg.columnNames.persistenceId}, ${tagTableCfg.columnNames.sequenceNumber}) = (\n                       |    SELECT ${journalTableCfg.columnNames.persistenceId}, ${journalTableCfg.columnNames.sequenceNumber}\n                       |    ${fromSQL}\n                       |)\n                       |WHERE EXISTS (\n                       |    SELECT 1\n                       |    ${fromSQL}\n                       |)\"\"\".stripMargin)\n    }\n}\n\nclass SqlServerScalaEventByTagMigrationTest\n    extends EventsByTagMigrationTest(\"sqlserver-application.conf\")\n    with SqlServerCleaner {\n\n  override def addNewPKConstraint(): Unit = {\n    // Change new column not null\n    alterColumn(columnName = tagTableCfg.columnNames.persistenceId, changeToDialect = \"NVARCHAR(255) NOT NULL\")\n    alterColumn(columnName = tagTableCfg.columnNames.sequenceNumber, changeToDialect = \"NUMERIC(10,0) NOT NULL\")\n    super.addNewPKConstraint()\n  }\n}\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/EventsByTagTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{ EventsByTagTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner }\n\nclass PostgresScalaEventsByTagTest extends EventsByTagTest(\"postgres-application.conf\") with PostgresCleaner\n\nclass MySQLScalaEventByTagTest extends EventsByTagTest(\"mysql-application.conf\") with MysqlCleaner\n\nclass OracleScalaEventByTagTest extends EventsByTagTest(\"oracle-application.conf\") with OracleCleaner\n\nclass SqlServerScalaEventByTagTest extends EventsByTagTest(\"sqlserver-application.conf\") with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/HardDeleteQueryTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n  HardDeleteQueryTest,\n  MysqlCleaner,\n  OracleCleaner,\n  PostgresCleaner,\n  SqlServerCleaner\n}\n\nclass PostgresHardDeleteQueryTest extends HardDeleteQueryTest(\"postgres-application.conf\") with PostgresCleaner\n\nclass MySQLHardDeleteQueryTest extends HardDeleteQueryTest(\"mysql-application.conf\") with MysqlCleaner\n\nclass OracleHardDeleteQueryTest extends HardDeleteQueryTest(\"oracle-application.conf\") with OracleCleaner\n\nclass SqlServerHardDeleteQueryTest extends HardDeleteQueryTest(\"sqlserver-application.conf\") with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/JdbcJournalPerfSpec.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.journal.JdbcJournalPerfSpec\nimport akka.persistence.jdbc.testkit.internal.MySQL\nimport akka.persistence.jdbc.testkit.internal.Oracle\nimport akka.persistence.jdbc.testkit.internal.Postgres\nimport akka.persistence.jdbc.testkit.internal.SqlServer\nimport com.typesafe.config.ConfigFactory\n\nclass PostgresJournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load(\"postgres-application.conf\"), Postgres) {\n  override def eventsCount: Int = 100\n}\n\nclass PostgresJournalPerfSpecSharedDb\n    extends JdbcJournalPerfSpec(ConfigFactory.load(\"postgres-shared-db-application.conf\"), Postgres) {\n  override def eventsCount: Int = 100\n}\n\nclass MySQLJournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load(\"mysql-application.conf\"), MySQL) {\n  override def eventsCount: Int = 100\n}\n\nclass MySQLJournalPerfSpecSharedDb\n    extends JdbcJournalPerfSpec(ConfigFactory.load(\"mysql-shared-db-application.conf\"), MySQL) {\n  override def eventsCount: Int = 100\n}\n\nclass OracleJournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load(\"oracle-application.conf\"), Oracle) {\n  override def eventsCount: Int = 100\n}\n\nclass OracleJournalPerfSpecSharedDb\n    extends JdbcJournalPerfSpec(ConfigFactory.load(\"oracle-shared-db-application.conf\"), Oracle) {\n  override def eventsCount: Int = 100\n}\n\nclass SqlServerJournalPerfSpec\n    extends JdbcJournalPerfSpec(ConfigFactory.load(\"sqlserver-application.conf\"), SqlServer) {\n  override def eventsCount: Int = 100\n}\n\nclass SqlServerJournalPerfSpecSharedDb\n    extends JdbcJournalPerfSpec(ConfigFactory.load(\"sqlserver-shared-db-application.conf\"), SqlServer) {\n  override def eventsCount: Int = 100\n}\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/JdbcJournalSpec.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.journal.JdbcJournalSpec\nimport akka.persistence.jdbc.testkit.internal.{ MySQL, Oracle, Postgres, SqlServer }\nimport com.typesafe.config.ConfigFactory\n\nclass PostgresJournalSpec extends JdbcJournalSpec(ConfigFactory.load(\"postgres-application.conf\"), Postgres)\nclass PostgresJournalSpecSharedDb\n    extends JdbcJournalSpec(ConfigFactory.load(\"postgres-shared-db-application.conf\"), Postgres)\n\nclass MySQLJournalSpec extends JdbcJournalSpec(ConfigFactory.load(\"mysql-application.conf\"), MySQL)\nclass MySQLJournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load(\"mysql-shared-db-application.conf\"), MySQL)\n\nclass OracleJournalSpec extends JdbcJournalSpec(ConfigFactory.load(\"oracle-application.conf\"), Oracle)\nclass OracleJournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load(\"oracle-shared-db-application.conf\"), Oracle)\n\nclass SqlServerJournalSpec extends JdbcJournalSpec(ConfigFactory.load(\"sqlserver-application.conf\"), SqlServer)\nclass SqlServerJournalSpecSharedDb\n    extends JdbcJournalSpec(ConfigFactory.load(\"sqlserver-shared-db-application.conf\"), SqlServer)\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/JdbcSnapshotStoreSpec.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.snapshot.JdbcSnapshotStoreSpec\nimport akka.persistence.jdbc.testkit.internal.MySQL\nimport akka.persistence.jdbc.testkit.internal.Oracle\nimport akka.persistence.jdbc.testkit.internal.Postgres\nimport akka.persistence.jdbc.testkit.internal.SqlServer\nimport com.typesafe.config.ConfigFactory\n\nclass PostgresSnapshotStoreSpec extends JdbcSnapshotStoreSpec(ConfigFactory.load(\"postgres-application.conf\"), Postgres)\n\nclass MySQLSnapshotStoreSpec extends JdbcSnapshotStoreSpec(ConfigFactory.load(\"mysql-application.conf\"), MySQL)\n\nclass OracleSnapshotStoreSpec extends JdbcSnapshotStoreSpec(ConfigFactory.load(\"oracle-application.conf\"), Oracle)\n\nclass SqlServerSnapshotStoreSpec\n    extends JdbcSnapshotStoreSpec(ConfigFactory.load(\"sqlserver-application.conf\"), SqlServer)\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/JournalDaoStreamMessagesMemoryTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n  JournalDaoStreamMessagesMemoryTest,\n  MysqlCleaner,\n  OracleCleaner,\n  PostgresCleaner,\n  SqlServerCleaner\n}\n\nclass PostgresJournalDaoStreamMessagesMemoryTest\n    extends JournalDaoStreamMessagesMemoryTest(\"postgres-application.conf\")\n    with PostgresCleaner\n\nclass MySQLJournalDaoStreamMessagesMemoryTest\n    extends JournalDaoStreamMessagesMemoryTest(\"mysql-application.conf\")\n    with MysqlCleaner\n\nclass OracleJournalDaoStreamMessagesMemoryTest\n    extends JournalDaoStreamMessagesMemoryTest(\"oracle-application.conf\")\n    with OracleCleaner\n\nclass SqlServerJournalDaoStreamMessagesMemoryTest\n    extends JournalDaoStreamMessagesMemoryTest(\"sqlserver-application.conf\")\n    with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/JournalSequenceActorTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.query.{\n  JournalSequenceActorTest,\n  MysqlCleaner,\n  OracleCleaner,\n  PostgresCleaner,\n  SqlServerCleaner\n}\n\nclass PostgresJournalSequenceActorTest\n    extends JournalSequenceActorTest(\"postgres-application.conf\", isOracle = false)\n    with PostgresCleaner\n\nclass MySQLJournalSequenceActorTest\n    extends JournalSequenceActorTest(\"mysql-application.conf\", isOracle = false)\n    with MysqlCleaner\n\nclass OracleJournalSequenceActorTest\n    extends JournalSequenceActorTest(\"oracle-application.conf\", isOracle = true)\n    with OracleCleaner\n\nclass SqlServerJournalSequenceActorTest\n    extends JournalSequenceActorTest(\"sqlserver-application.conf\", isOracle = false)\n    with SqlServerCleaner\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/PostgresDurableStateStorePluginSpec.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport com.typesafe.config.ConfigFactory\nimport slick.jdbc.PostgresProfile\nimport akka.persistence.jdbc.state.scaladsl.DurableStateStorePluginSpec\n\nclass PostgresDurableStateStorePluginSpec\n    extends DurableStateStorePluginSpec(ConfigFactory.load(\"postgres-shared-db-application.conf\"), PostgresProfile) {}\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/PostgresScalaJdbcDurableStateChangesByTagTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport com.typesafe.config.ConfigFactory\nimport akka.actor.ActorSystem\nimport akka.persistence.jdbc.state.scaladsl.JdbcDurableStateSpec\nimport akka.persistence.jdbc.testkit.internal.Postgres\n\nclass PostgresScalaJdbcDurableStateStoreQueryTest\n    extends JdbcDurableStateSpec(ConfigFactory.load(\"postgres-shared-db-application.conf\"), Postgres) {\n  implicit lazy val system: ActorSystem =\n    ActorSystem(\"JdbcDurableStateSpec\", config.withFallback(customSerializers))\n}\n"
  },
  {
    "path": "integration/src/test/scala/akka/persistence/jdbc/integration/StoreOnlySerializableMessagesTest.scala",
    "content": "package akka.persistence.jdbc.integration\n\nimport akka.persistence.jdbc.serialization.StoreOnlySerializableMessagesTest\nimport akka.persistence.jdbc.testkit.internal.MySQL\nimport akka.persistence.jdbc.testkit.internal.Oracle\nimport akka.persistence.jdbc.testkit.internal.Postgres\nimport akka.persistence.jdbc.testkit.internal.SqlServer\n\nclass PostgresStoreOnlySerializableMessagesTest\n    extends StoreOnlySerializableMessagesTest(\"postgres-application.conf\", Postgres)\n\nclass MySQLStoreOnlySerializableMessagesTest extends StoreOnlySerializableMessagesTest(\"mysql-application.conf\", MySQL)\n\nclass OracleStoreOnlySerializableMessagesTest\n    extends StoreOnlySerializableMessagesTest(\"oracle-application.conf\", Oracle)\n\nclass SqlServerStoreOnlySerializableMessagesTest\n    extends StoreOnlySerializableMessagesTest(\"sqlserver-application.conf\", SqlServer)\n"
  },
  {
    "path": "migrator/src/main/scala/akka/persistence/jdbc/migrator/JournalMigrator.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.migrator\n\nimport akka.Done\nimport akka.actor.ActorSystem\nimport akka.persistence.PersistentRepr\nimport akka.persistence.jdbc.AkkaSerialization\nimport akka.persistence.jdbc.config.{ JournalConfig, ReadJournalConfig }\nimport akka.persistence.jdbc.db.SlickExtension\nimport akka.persistence.jdbc.journal.dao.JournalQueries\nimport akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalSerializer\nimport akka.persistence.jdbc.journal.dao.JournalTables.{ JournalAkkaSerializationRow, TagRow }\nimport akka.persistence.jdbc.query.dao.legacy.ReadJournalQueries\nimport akka.serialization.{ Serialization, SerializationExtension }\nimport akka.stream.scaladsl.Source\nimport org.slf4j.{ Logger, LoggerFactory }\nimport slick.jdbc._\n\nimport scala.concurrent.{ ExecutionContextExecutor, Future }\nimport scala.util.{ Failure, Success }\n\n/**\n * This will help migrate the legacy journal data onto the new journal schema with the\n * appropriate serialization\n *\n * @param system the actor system\n */\nfinal case class JournalMigrator(profile: JdbcProfile)(implicit system: ActorSystem) {\n  implicit val ec: ExecutionContextExecutor = system.dispatcher\n\n  import profile.api._\n\n  val log: Logger = LoggerFactory.getLogger(getClass)\n\n  // get the various configurations\n  private val journalConfig: JournalConfig = new JournalConfig(\n    system.settings.config.getConfig(JournalMigrator.JournalConfig))\n  private val readJournalConfig: ReadJournalConfig = new ReadJournalConfig(\n    system.settings.config.getConfig(JournalMigrator.ReadJournalConfig))\n\n  // the journal database\n  private val journalDB: JdbcBackend.Database =\n    SlickExtension(system).database(system.settings.config.getConfig(JournalMigrator.ReadJournalConfig)).database\n\n  // get an instance of the new journal queries\n  private val newJournalQueries: JournalQueries =\n    new JournalQueries(profile, journalConfig.eventJournalTableConfiguration, journalConfig.eventTagTableConfiguration)\n\n  // let us get the journal reader\n  private val serialization: Serialization = SerializationExtension(system)\n  private val legacyJournalQueries: ReadJournalQueries = new ReadJournalQueries(profile, readJournalConfig)\n  private val serializer: ByteArrayJournalSerializer =\n    new ByteArrayJournalSerializer(serialization, readJournalConfig.pluginConfig.tagSeparator)\n\n  private val bufferSize: Int = journalConfig.daoConfig.bufferSize\n\n  private val query =\n    legacyJournalQueries.JournalTable.result\n      .withStatementParameters(\n        rsType = ResultSetType.ForwardOnly,\n        rsConcurrency = ResultSetConcurrency.ReadOnly,\n        fetchSize = bufferSize)\n      .transactionally\n\n  /**\n   * write all legacy events into the new journal tables applying the proper serialization\n   */\n  def migrate(): Future[Done] = Source\n    .fromPublisher(journalDB.stream(query))\n    .via(serializer.deserializeFlow)\n    .map {\n      case Success((repr, tags, ordering)) => (repr, tags, ordering)\n      case Failure(exception)              => throw exception // blow-up on failure\n    }\n    .map { case (repr, tags, ordering) => serialize(repr, tags, ordering) }\n    // get pages of many records at once\n    .grouped(bufferSize)\n    .mapAsync(1)(records => {\n      val stmt: DBIO[Unit] = records\n        // get all the sql statements for this record as an option\n        .map { case (newRepr, newTags) =>\n          log.debug(s\"migrating event for PersistenceID: ${newRepr.persistenceId} with tags ${newTags.mkString(\",\")}\")\n          writeJournalRowsStatements(newRepr, newTags)\n        }\n        // reduce to 1 statement\n        .foldLeft[DBIO[Unit]](DBIO.successful[Unit] {})((priorStmt, nextStmt) => {\n          priorStmt.andThen(nextStmt)\n        })\n\n      journalDB.run(stmt)\n    })\n    .run()\n\n  /**\n   * serialize the PersistentRepr and construct a JournalAkkaSerializationRow and set of matching tags\n   *\n   * @param repr the PersistentRepr\n   * @param tags the tags\n   * @param ordering the ordering of the PersistentRepr\n   * @return the tuple of JournalAkkaSerializationRow and set of tags\n   */\n  private def serialize(\n      repr: PersistentRepr,\n      tags: Set[String],\n      ordering: Long): (JournalAkkaSerializationRow, Set[String]) = {\n\n    val serializedPayload: AkkaSerialization.AkkaSerialized =\n      AkkaSerialization.serialize(serialization, repr.payload).get\n\n    val serializedMetadata: Option[AkkaSerialization.AkkaSerialized] =\n      repr.metadata.flatMap(m => AkkaSerialization.serialize(serialization, m).toOption)\n    val row: JournalAkkaSerializationRow = JournalAkkaSerializationRow(\n      ordering,\n      repr.deleted,\n      repr.persistenceId,\n      repr.sequenceNr,\n      repr.writerUuid,\n      repr.timestamp,\n      repr.manifest,\n      serializedPayload.payload,\n      serializedPayload.serId,\n      serializedPayload.serManifest,\n      serializedMetadata.map(_.payload),\n      serializedMetadata.map(_.serId),\n      serializedMetadata.map(_.serManifest))\n\n    (row, tags)\n  }\n\n  private def writeJournalRowsStatements(\n      journalSerializedRow: JournalAkkaSerializationRow,\n      tags: Set[String]): DBIO[Unit] = {\n    val journalInsert: DBIO[Long] = newJournalQueries.JournalTable\n      .returning(newJournalQueries.JournalTable.map(_.ordering))\n      .forceInsert(journalSerializedRow)\n\n    val tagInserts =\n      newJournalQueries.TagTable ++= tags\n        .map(tag =>\n          TagRow(\n            Some(journalSerializedRow.ordering), // legacy tag key enabled by default.\n            Some(journalSerializedRow.persistenceId),\n            Some(journalSerializedRow.sequenceNumber),\n            tag))\n        .toSeq\n\n    journalInsert.flatMap(_ => tagInserts.asInstanceOf[DBIO[Unit]])\n  }\n}\n\ncase object JournalMigrator {\n  final val JournalConfig: String = \"jdbc-journal\"\n  final val ReadJournalConfig: String = \"jdbc-read-journal\"\n}\n"
  },
  {
    "path": "migrator/src/main/scala/akka/persistence/jdbc/migrator/SnapshotMigrator.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.migrator\n\nimport akka.actor.ActorSystem\nimport akka.persistence.SnapshotMetadata\nimport akka.persistence.jdbc.config.{ ReadJournalConfig, SnapshotConfig }\nimport akka.persistence.jdbc.db.SlickExtension\nimport akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao\nimport akka.persistence.jdbc.snapshot.dao.DefaultSnapshotDao\nimport akka.persistence.jdbc.snapshot.dao.legacy.{ ByteArraySnapshotSerializer, SnapshotQueries }\nimport akka.persistence.jdbc.snapshot.dao.legacy.SnapshotTables.SnapshotRow\nimport akka.serialization.{ Serialization, SerializationExtension }\nimport akka.stream.scaladsl.{ Sink, Source }\nimport akka.Done\nimport akka.persistence.jdbc.migrator.SnapshotMigrator.{ NoParallelism, SnapshotStoreConfig }\nimport org.slf4j.{ Logger, LoggerFactory }\nimport slick.jdbc\nimport slick.jdbc.{ JdbcBackend, JdbcProfile }\n\nimport scala.concurrent.Future\n\n/**\n * This will help migrate the legacy snapshot data onto the new snapshot schema with the\n * appropriate serialization\n *\n * @param system the actor system\n */\ncase class SnapshotMigrator(profile: JdbcProfile)(implicit system: ActorSystem) {\n  val log: Logger = LoggerFactory.getLogger(getClass)\n\n  import system.dispatcher\n  import profile.api._\n\n  private val snapshotConfig: SnapshotConfig = new SnapshotConfig(system.settings.config.getConfig(SnapshotStoreConfig))\n  private val readJournalConfig: ReadJournalConfig = new ReadJournalConfig(\n    system.settings.config.getConfig(JournalMigrator.ReadJournalConfig))\n\n  private val snapshotDB: jdbc.JdbcBackend.Database =\n    SlickExtension(system).database(system.settings.config.getConfig(SnapshotStoreConfig)).database\n\n  private val journalDB: JdbcBackend.Database =\n    SlickExtension(system).database(system.settings.config.getConfig(JournalMigrator.ReadJournalConfig)).database\n\n  private val serialization: Serialization = SerializationExtension(system)\n  private val queries: SnapshotQueries = new SnapshotQueries(profile, snapshotConfig.legacySnapshotTableConfiguration)\n  private val serializer: ByteArraySnapshotSerializer = new ByteArraySnapshotSerializer(serialization)\n\n  // get the instance if the default snapshot dao\n  private val defaultSnapshotDao: DefaultSnapshotDao =\n    new DefaultSnapshotDao(snapshotDB, profile, snapshotConfig, serialization)\n\n  // get the instance of the legacy journal DAO\n  private val legacyJournalDao: ByteArrayReadJournalDao =\n    new ByteArrayReadJournalDao(journalDB, profile, readJournalConfig, SerializationExtension(system))\n\n  private def toSnapshotData(row: SnapshotRow): (SnapshotMetadata, Any) = serializer.deserialize(row).get\n\n  /**\n   * migrate the latest snapshot data\n   */\n  def migrateLatest(): Future[Done] = {\n    legacyJournalDao\n      .allPersistenceIdsSource(Long.MaxValue)\n      .mapAsync(NoParallelism) { persistenceId =>\n        // let us fetch the latest snapshot for each persistenceId\n        snapshotDB.run(queries.selectLatestByPersistenceId(persistenceId).result).map { rows =>\n          rows.headOption.map(toSnapshotData).map { case (metadata, value) =>\n            log.debug(s\"migrating snapshot for ${metadata.toString}\")\n            defaultSnapshotDao.save(metadata, value)\n          }\n        }\n      }\n      .runWith(Sink.ignore)\n  }\n\n  /**\n   * migrate all the legacy snapshot schema data into the new snapshot schema\n   */\n  def migrateAll(): Future[Done] = Source\n    .fromPublisher(snapshotDB.stream(queries.SnapshotTable.result))\n    .mapAsync(NoParallelism) { record =>\n      val (metadata, value) = toSnapshotData(record)\n      log.debug(s\"migrating snapshot for ${metadata.toString}\")\n      defaultSnapshotDao.save(metadata, value)\n    }\n    .run()\n}\n\ncase object SnapshotMigrator {\n  final val SnapshotStoreConfig: String = \"jdbc-snapshot-store\"\n  final val NoParallelism: Int = 1\n}\n"
  },
  {
    "path": "migrator/src/test/LICENSE",
    "content": "﻿LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT\r\n\r\nTHIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS \"AGREEMENT\") IS A LEGAL AGREEMENT BETWEEN YOU (\"USER\") AND LIGHTBEND, INC. (\"LICENSOR\"). \r\nBY CLICKING THE \"I ACCEPT\" BUTTON, OR INSTALLING, COPYING OR OTHERWISE USING LIGHTBEND’S SOFTWARE (THE “SOFTWARE”) AND ITS ASSOCIATED USER DOCUMENTATION, USER ACKNOWLEDGES THAT USER HAS REVIEWED AND ACCEPTS THIS AGREEMENT AND AGREES TO BE BOUND BY ALL OF ITS TERMS. \r\nIF YOU ARE AGREEING TO THIS AGREEMENT AS AN INDIVIDUAL, “USER” REFERS TO YOU INDIVIDUALLY.  IF YOU ARE AGREEING TO THIS AGREEMENT AS A REPRESENTATIVE OF AN ENTITY, YOU REPRESENT THAT YOU HAVE THE AUTHORITY TO BIND THAT ENTITY AND “USER” REFERS TO THAT ENTITY AND ALL THE USERS ACCESSING THE SOFTWARE BY, THROUGH OR ON BEHALF OF THAT ENTITY.  \r\nIF USER DOES NOT AGREE WITH ALL OF THE TERMS OF THIS AGREEMENT, DO NOT INSTALL, COPY OR OTHERWISE USE THE SOFTWARE OR ITS DOCUMENTATION. \r\n\r\n1. DEFINITIONS. \r\n   1. “User System” means User’s website(s), computers, servers and other equipment and software upon and with which the Software is run.\r\n   2. “Documentation” means the user instructions and help files made available by Licensor for use with the Software, as may be updated from time to time by Licensor.\r\n   3. “Intellectual Property Rights” means all intellectual property rights or similar proprietary rights, including \r\n\t(a) patent rights and utility models, \r\n\t(b) copyrights and database rights, \r\n\t(c) trademarks, trade names, domain names and trade dress and the goodwill associated therewith, \r\n\t(d) trade secrets, \r\n\t(e) mask works, and \r\n\t(f) industrial design rights; in each case, including any registrations of, applications to register, and renewals and extensions of, any of the foregoing in any jurisdiction in the world.\r\n   4. “Open Source Software” means all software that is available under the GNU Affero General Public License (AGPL), GNU General Public License (GPL), GNU Lesser General Public License (LGPL), Mozilla Public License (MPL), Apache License, BSD licenses, or any other license that is approved by or similar to those approved by the Open Source Initiative (www.opensource.org).\r\n\r\n2. LICENSES AND RESTRICTIONS.  \r\n   1. License.  Subject to User’s compliance with the terms and conditions of this Agreement, Licensor hereby grants to User, during the term of this Agreement, a limited, non-exclusive, non-transferable and non-sublicensable right to \r\n\t(i) install and execute one (1) copy of the Software in accordance with the Documentation, solely in binary form, and not for the benefit of any other person or entity, and \r\n\t(ii) access and use the Documentation, solely for User’s own internally purposes in support of End User’s permitted use of the Software.  \r\n   2. Restrictions.  User shall not, directly or indirectly, or permit any User or third party to: \r\n\t(a) reverse engineer, decompile, disassemble or otherwise attempt to discover the source code or underlying ideas or algorithms of the Software;  \r\n\t(b) modify, translate, or create derivative works based on any element of the Software or any related Documentation (except to the extent applicable laws specifically prohibit such restriction for interoperability purposes, in which case you agree to first contact Licensor and provide Licensor an opportunity to create such changes as are needed for interoperability purposes); \r\n\t(c) use, rent, lease, distribute, sell, resell, assign, or otherwise transfer the Software or any copy thereof; \r\n\t(d) use the Software for timesharing purposes or otherwise for the benefit of any person or entity other than for the benefit of User and Users; \r\n\t(e) remove any proprietary notices from the Software or the Documentation or attempt to defeat any copy protection device included with the Software; or \r\n\t(f) use the Software for any purpose other than its intended purpose.\r\n   3. Reservation of Rights.  Nothing in this Agreement shall be construed to give User a right to use or otherwise obtain access to any source code from which the Software is compiled or interpreted.  Except as expressly granted in this Agreement, there are no other licenses granted to User, express, implied or by way of estoppel.  All rights not granted in this Agreement are reserved by Licensor.\r\n   4. Open Source Software.  Notwithstanding the foregoing, certain items of software included with the Software are Open Source Software and remains subject Open Source Software licenses.  Such Open Source Software is not subject to the terms and conditions of this Agreement.  \r\nInstead, each such item of Open Source Software is licensed under the terms of the end user license that accompanies such Open Source Software and nothing in this Agreement limits your rights under, or grants you rights that supersede, the terms and conditions of any applicable end user license for such Open Source Software.  If required by any license for particular Open Source Software, Licensor makes such Open Source Software, and any Licensor modifications to that Open Source Software, available as further described in the Documentation.\r\nUSE OF THE SOFTWARE IN ANY MANNER OTHER THAN AS PROVIDED IN THIS AGREEMENT IS STRICTLY PROHIBITED AND MAY INFRINGE ON THE INTELLECTUAL PROPERTY RIGHTS OF LICENSOR AND/OR ITS LICENSOR(S), SUBJECTING USER TO CIVIL AND CRIMINAL PENALTIES, INCLUDING WITHOUT LIMITATION MONETARY DAMAGES AND IMPRISONMENT FOR COPYRIGHT INFRINGEMENT.\r\n\r\n3. USER OBLIGATIONS.\r\n   1. User System.  User is responsible for \r\n\t(a) obtaining, deploying and maintaining the User System, and all computer hardware, software, modems, routers and other communications equipment necessary for User and its Users to install and use the Software; and \r\n\t(b) paying all third party fees and access charges incurred in connection with the foregoing.  Licensor shall not be responsible for supplying any hardware, software or other equipment to User under this Agreement.\r\n   2. Compliance with Laws.  User agrees to use the Software in compliance with all applicable laws, including local laws of the country or region in which User resides, and in compliance with all United States export laws and regulations.  User shall not use the Software for any purpose prohibited by applicable law.  \r\n   3. Trademarks and Tradenames.  With regard to all copies of the Software permitted herein, User shall reproduce on such copies all Licensor copyright notices, and other proprietary notices appearing on and in the original copy of the software received from Licensor. Except as set forth in the foregoing sentence, User will not, during the term of this Agreement or thereafter, use any trademark of Licensor, or any word and/or symbol likely to be confused with any Licensor trademark, either alone or in any combination with other words and/or symbols.\r\n\r\n4. SUPPORT AND MAINTENANCE.\r\n   1. Support.  Licensor is not responsible for maintenance or support of the Software, or the equipment on which the Software resides or is used, under this Agreement. By accepting the license granted under this Agreement, User agrees that Licensor will be under no obligation to provide any support, maintenance or service in connection with the Software or such equipment.  \r\n   2. Upgrades and Updates.  Licensor may from time to time in its sole discretion develop and provide updates for the Software, which may include upgrades, bug fixes, patches, other error corrections, and/or new features (collectively, including related documentation, “Updates”). Updates may also modify or delete in their entirety certain features and functionality. You agree that Licensor has no obligation to provide any Updates or to continue to provide or enable any particular features or functionality. \r\n\r\n5. REPRESENTATIONS AND WARRANTIES; DISCLAIMER.\r\n   1. Mutual Representations and Warranties.  Each party represents, warrants and covenants that: \r\n\t(a) it has the full power and authority to enter into this Agreement and to perform its obligations hereunder, without the need for any consents, approvals or immunities not yet obtained; and \r\n\t(b) its acceptance of and performance under this Agreement shall not breach any oral or written agreement with any third party or any obligation owed by it to any third party to keep any information or materials in confidence or in trust. \r\n   2. Disclaimer.  EXCEPT FOR THE WARRANTIES SET FORTH IN THIS SECTION 5, THE SOFTWARE AND THE DOCUMENTATION ARE PROVIDED ON AN AS-IS BASIS.  USER’S USE OF THE SOFTWARE AND THE DOCUMENTATION IS AT USER’S OWN RISK.  LICENSOR DOES NOT MAKE, AND HEREBY DISCLAIMS, ANY AND ALL OTHER EXPRESS, STATUTORY AND IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE, QUALITY, SUITABILITY, OPERABILITY, CONDITION, SYSTEM INTEGRATION, NON-INTERFERENCE, WORKMANSHIP, TRUTH, ACCURACY (OF DATA OR ANY OTHER INFORMATION OR CONTENT), ABSENCE OF DEFECTS, WHETHER LATENT OR PATENT, AND ANY WARRANTIES ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE.  LICENSOR ALSO DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN, PERFORMED AND/OR PROVIDED BY THE SOFTWARE WILL MEET USER’S REQUIREMENTS, THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, THAT THE SOFTWARE WILL BE COMPATIBLE OR WORK WITH ANY THIRD-PARTY SOFTWARE, APPLICATIONS OR DEVICES, OR THAT DEFECTS IN THE SOFTWARE WILL BE CORRECTED.  USER EXPRESSLY ACKNOWLEDGES AND AGREES THAT, TO THE EXTENT PERMITTED BY APPLICABLE LAW, ITS USE OF THE SOFTWARE IS AT ITS SOLE RISK AND THAT THE ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT IS WITH USER. USER FURTHER ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS NOT INTENDED OR SUITABLE FOR USE IN SITUATIONS OR ENVIRONMENTS WHERE THE FAILURE OR TIME DELAYS OF, OR ERRORS OR INACCURACIES IN THE CONTENT, DATA OR INFORMATION PROVIDED BY THE SOFTWARE COULD LEAD TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. ANY WARRANTIES MADE BY LICENSOR ARE FOR THE BENEFIT OF USER ONLY AND NOT FOR THE BENEFIT OF ANY THIRD PARTY.  THE SOFTWARE AND THE DOCUMENTATION ARE LICENSED AND NOT SOLD. NO AGENT OF LICENSOR IS AUTHORIZED TO ALTER OR EXPAND THE WARRANTIES OF LICENSOR AS SET FORTH HEREIN.  \r\n\r\n6. INDEMNIFICATION. User shall defend Licensor and its licensors and their respective officers, directors and employees (“Licensor Indemnified Parties”) from and against any and all Third-Party Claims which arise out of or relate to: \r\n\t(a) User’s use or alleged use of the Software other than as permitted under this Agreement; or \r\n\t(b) arising out of or relating to any violation of Section 2.2, or any violation of applicable laws.  User shall pay all damages, costs and expenses, including attorneys’ fees and costs (whether by settlement or award of by a final judicial judgment) incurred by the Licensor Indemnified Parties from any such Third-Party Claim.  In no event shall Licensor settle any claim without User’s prior written approval.  Licensor may, at its own expense, engage separate counsel to advise Licensor regarding a Third-Party Claim and to participate in the defense of the claim, subject to User’s right to control the defense and settlement. If you are a New Jersey resident, this indemnification clause is to be only as broad and inclusive as is permitted by the law of the state of New Jersey.\r\n\r\n7. CONFIDENTIALITY. \r\n   1. Confidential Information. User acknowledges that the Software contains valuable proprietary information and trade secrets and that unauthorized or improper use of the Software will result in irreparable harm to Licensor for which monetary damages would be inadequate and for which Licensor may be entitled to immediate injunctive relief. Accordingly, you will maintain the confidentiality of the proprietary information and not sell, license, publish, display, distribute, disclose or otherwise make available such proprietary information to any third party, nor use such information except as authorized by this Agreement.\r\n   2. Injunctive Relief.  User agrees that any unauthorized disclosure of confidential information may cause immediate and irreparable injury to Licensor and that, in the event of such breach, Licensor will be entitled, in addition to any other available remedies, to seek immediate injunctive and other equitable relief, without bond and without the necessity of showing actual monetary damages.\r\n\r\n8. PROPRIETARY RIGHTS. \r\n   1. Licensor.  As between Licensor and User, all right, title and interest in the Software, the Documentation, and any other Licensor materials furnished or made available hereunder, and all modifications and enhancements thereof, and all suggestions, ideas and feedback proposed by User regarding the Software, including all copyright rights, patent rights and other Intellectual Property Rights in each of the foregoing, belong to and are retained solely by Licensor or Licensor’s licensors and providers, as applicable.  User hereby does and will irrevocably assign to Licensor all evaluations, ideas, feedback and suggestions made by User to Licensor regarding the Software or the Documentation (collectively, “Feedback”) and all Intellectual Property Rights in the Feedback.  \r\n\r\n9. LIMITATION OF LIABILITY.\r\n   1. No Consequential Damages.  NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR ANY DAMAGES FOR LOST DATA, BUSINESS INTERRUPTION, LOST PROFITS, LOST REVENUE OR LOST BUSINESS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT, EVEN IF LICENSOR OR ITS LICENSORS OR USER HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, INCLUDING WITHOUT LIMITATION, ANY SUCH DAMAGES ARISING OUT OF THE LICENSING, PROVISION OR USE OF THE SOFTWARE OR THE RESULTS OF THE USE OF THE SOFTWARE.  LICENSOR WILL NOT BE LIABLE FOR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES.\r\n   2. LIMITS ON LIABILITY.  NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR CUMULATIVE, AGGREGATE DAMAGES GREATER THAN FIVE HUNDRED DOLLARS (US $500).  \r\n   3. ESSENTIAL PURPOSE.  USER ACKNOWLEDGES THAT THE TERMS IN THIS SECTION 9 (LIMITATION OF LIABILITY) SHALL APPLY TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW AND SHALL APPLY EVEN IF AN EXCLUSIVE OR LIMITED REMEDY STATED HEREIN FAILS OF ITS ESSENTIAL PURPOSE. SOME JURISDICTIONS DO NOT ALLOW CERTAIN LIMITATIONS OF LIABILITY, SO SOME OR ALL OF THE ABOVE LIMITATIONS OF LIABILITY MAY NOT APPLY TO YOU.  IF YOU ARE A NEW JERSEY RESIDENT, THIS LIMITATION OF LIABILITY SECTION IS TO BE ONLY AS BROAD AND INCLUSIVE AS IS PERMITTED BY THE LAW OF THE STATE OF NEW JERSEY.\r\n\r\n10. TERM AND TERMINATION.  \r\n   1. Term.  This Agreement and User’s right to use the Software commences on earlier of the date that User: \r\n\t(a) installs the Software, \r\n\t(b) begins using the Software or \r\n\t(c) otherwise demonstrates assent to this Agreement.  \r\n\tUser’s right to use the Software shall continue until such time as this Agreement is terminated (the “Term”).  \r\n   2. Termination for Cause.  A party may terminate this Agreement, upon written notice to the other party in the event the other party files a petition for bankruptcy or has a petition for bankruptcy filed against it that is not dismissed within sixty (60) calendar days after filing  or admits its inability to pay its debts as they mature, makes an assignment for the benefit of its creditors or ceases to function as a going concern or to conduct its operations in the normal course of business and such termination shall occur immediately upon notice.  Licensor may terminate this Agreement at any time without notice if it ceases to support the Software, which Licensor may do in its sole discretion. In addition, this Agreement will terminate immediately and automatically without any notice if User breaches any of its terms and conditions.\r\n   3. Termination for Convenience.  Either party may terminate this Agreement for convenience on at least thirty (30) calendar days prior written notice to the other party.  User may also terminate this Agreement by ceasing all use of the Software.\r\n   4. Effects of Termination.  Upon expiration or termination of this Agreement, User’s shall cease all use of the Software and the Documentation and shall destroy all copies of the Software in User’s possession or control.\r\n   5. Survival.  This Section and Sections 1, 2.2 (Restrictions), 2.3 (Reservation of Rights), 2.4 (Open Source Software), 5.2 (Disclaimer), 6 (Indemnification), 7 (Confidentiality), 8 (Proprietary Rights), 9 (Limitation of Liability), 10.4 (Effects of Termination) and 11 (Miscellaneous) shall survive any termination or expiration of this Agreement. \r\n\r\n11. MISCELLANEOUS.\r\n   1. Notices.  Licensor may give notice to User by means of a general notice through electronic mail to User’s e-mail address, or by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service to User’s address on record with Licensor. User may give notice to Licensor by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service addressed to Licensor, Lightbend Inc., 580 California, #1231, San Francisco, CA 94104, Attention: User Support.  Notice shall be deemed to have been given upon receipt or, if earlier, two (2) business days after mailing, as applicable. All communications and notices to be made or given pursuant to this Agreement shall be in the English language.  \r\n   2. Governing Law.  This Agreement and the rights and obligations of the parties to and under this agreement shall be governed by and construed under the laws of the United States and the State of California as applied to agreements entered into and to be performed in such State without giving effect to conflicts of laws rules or principles.  The parties agree that the United Nations Convention on Contracts for the International Sale of Goods is specifically excluded from application to this Agreement and that the application of the Uniform Computer Information Transactions Act (UCITA) is specifically disclaimed.  Any dispute arising out of or in connection with this Agreement, including but not limited to any question regarding its existence, interpretation, validity, performance, or termination, or any dispute between the parties arising from the parties' relationship created by this Agreement, shall be referred to and finally resolved by arbitration administered by the American Arbitration Association under its rules.  The number of arbitrators shall be one (1).  The parties shall endeavor to agree upon the sole arbitrator and jointly nominate the arbitrator.  If the parties cannot agree upon the sole arbitrator within a time prescribed by AAA, the parties shall request the AAA to propose five (5) arbitrators and each party shall rank the proposed arbitrators.  The AAA shall appoint an arbitrator from the list of five (5), based upon the parties' rankings.  \r\n   3. U.S. Government Users.  If User is a Federal Government entity, Licensor provides the Software and the Documentation, including related software and technology, for ultimate Federal Government end use solely in accordance with the following:  Government technical data rights include only those rights customarily provided to the public with a commercial item or process and Government software rights related to the Software and the Documentation include only those rights customarily provided to the public, as defined in this Agreement.  The technical data rights and customary commercial software license is provided in accordance with FAR 12.211 (Technical Data) and FAR 12.212 (Software) and, for Department of Defense transactions, DFAR 252.227-7015 (Technical Data – Commercial Items) and DFAR 227.7202-3 (Rights in Commercial Computer Software or Computer Software Documentation).  If greater rights are needed, a mutually acceptable written addendum specifically conveying such rights must be included in this Agreement. \r\n   4. Export.  The Software utilizes software and technology that may be subject to United States and foreign export controls. User acknowledges and agrees that the Software shall not be used, and none of the underlying information, software, or technology may be transferred or otherwise exported or re-exported to countries as to which the United States maintains an embargo (collectively, “Embargoed Countries”), or to or by a national or resident thereof, or any person or entity on the U.S. Department of Treasury’s List of Specially Designated Nationals or the U.S. Department of Commerce’s Table of Denial Orders (collectively, “Designated Nationals”). The lists of Embargoed Countries and Designated Nationals are subject to change without notice. By using the Software, User represents and warrants that it is not located in, under the control of, or a national or resident of an Embargoed Country or Designated National. The Software may use encryption technology that is subject to licensing requirements under the U.S. Export Administration Regulations, 15 C.F.R. Parts 730-774 and Council Regulation (EC) No. 1334/2000. User agrees to comply strictly with all applicable export laws and assume sole responsibility for obtaining licenses to export or re-export as may be required. Licensor and its licensors make no representation that the Software is appropriate or available for use in other locations. By using the Software, User represents and warrants that it is not located in any such country or on any such list.\r\n   5. General.  User shall not assign its rights hereunder, or delegate the performance of any of its duties or obligations hereunder, whether by merger, acquisition, sale of assets, operation of law, or otherwise, without the prior written consent of Licensor.  Any purported assignment in violation of the preceding sentence is null and void.  Subject to the foregoing, this Agreement shall be binding upon, and inure to the benefit of, the successors and assigns of the parties thereto.  Except as otherwise specified in this Agreement, this Agreement may be amended or supplemented only by a writing that refers explicitly to this Agreement and that is signed on behalf of both parties.  No waiver will be implied from conduct or failure to enforce rights.  No waiver will be effective unless in a writing signed on behalf of the party against whom the waiver is asserted.  If any of this Agreement is found invalid or unenforceable that term will be enforced to the maximum extent permitted by law and the remainder of this Agreement will remain in full force.  \r\nNothing contained herein shall be construed as creating an agency, partnership, or other form of joint enterprise between the parties.  \r\nThis Agreement constitutes the entire agreement between the parties relating to this subject matter and supersedes all prior or simultaneous understandings, representations, discussions, negotiations, and agreements, whether written or oral.  \r\nNeither party shall be liable to the other party or any third party for failure or delay in performing its obligations under this Agreement when such failure or delay is due to any cause beyond the control of the party concerned, including, without limitation, force majeure, governmental orders or restrictions, fire, or flood, provided that upon cessation of such events such party shall thereupon promptly perform or complete the performance of its obligations hereunder."
  },
  {
    "path": "migrator/src/test/resources/general.conf",
    "content": "#  Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n#\n\n// This file contains the general settings which are shared in all akka-persistence-jdbc tests\n\nakka {\n  stdout-loglevel = off // defaults to WARNING can be disabled with off. The stdout-loglevel is only in effect during system startup and shutdown\n  log-dead-letters-during-shutdown = on\n  loglevel = debug\n  log-dead-letters = on\n  log-config-on-start = off // Log the complete configuration at INFO level when the actor system is started\n\n  loggers = [\"akka.event.slf4j.Slf4jLogger\"]\n  logging-filter = \"akka.event.slf4j.Slf4jLoggingFilter\"\n\n  actor {\n    // Required until https://github.com/akka/akka/pull/28333 is available\n    allow-java-serialization = on\n    debug {\n      receive = on // log all messages sent to an actor if that actors receive method is a LoggingReceive\n      autoreceive = off // log all special messages like Kill, PoisoffPill etc sent to all actors\n      lifecycle = off // log all actor lifecycle events of all actors\n      fsm = off // enable logging of all events, transitioffs and timers of FSM Actors that extend LoggingFSM\n      event-stream = off // enable logging of subscriptions (subscribe/unsubscribe) on the ActorSystem.eventStream\n    }\n  }\n}\n\ndocker {\n  host = \"localhost\"\n  host = ${?VM_HOST}\n}\n\njdbc-journal {\n  event-adapters {\n    event-adapter = \"akka.persistence.jdbc.migrator.MigratorSpec$AccountEventAdapter\"\n  }\n\n  event-adapter-bindings {\n    \"akka.persistence.jdbc.migrator.MigratorSpec$AccountEvent\" = event-adapter\n  }\n}\n\n// Default configurations of legacy and non-legacy snapshot tables are both set with the same name (tableName = \"snapshot\"); So we have to distinguish them with a different name\njdbc-snapshot-store.tables.legacy_snapshot.tableName = \"legacy_snapshot\"\n\nslick.db.idleTimeout = 10000 // 10 seconds\n"
  },
  {
    "path": "migrator/src/test/resources/h2-application.conf",
    "content": "#  Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf is included only for shared settings used for the akka-persistence-jdbc tests\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n    }\n  }\n}\n\njdbc-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  slick = ${slick}\n}\n\nslick {\n  profile = \"slick.jdbc.H2Profile$\"\n  db {\n    url = \"jdbc:h2:mem:test-database;DATABASE_TO_UPPER=false;\"\n    user = \"root\"\n    password = \"root\"\n    driver = \"org.h2.Driver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "migrator/src/test/resources/mysql-application.conf",
    "content": "#  Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf is included only for shared settings used for the akka-persistence-jdbc tests\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\njdbc-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  slick = ${slick}\n}\n\nslick {\n  profile = \"slick.jdbc.MySQLProfile$\"\n  db {\n    host = ${docker.host}\n    host = ${?DB_HOST}\n    url = \"jdbc:mysql://\"${slick.db.host}\":3306/docker?cachePrepStmts=true&cacheCallableStmts=true&cacheServerConfiguration=true&useLocalSessionState=true&elideSetAutoCommits=true&alwaysSendSetIsolation=false&enableQueryTimeouts=false&connectionAttributes=none&verifyServerCertificate=false&useSSL=false&allowPublicKeyRetrieval=true&useUnicode=true&useLegacyDatetimeCode=false&serverTimezone=UTC&rewriteBatchedStatements=true\"\n    user = \"root\"\n    password = \"root\"\n    driver = \"com.mysql.cj.jdbc.Driver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "migrator/src/test/resources/oracle-application.conf",
    "content": "#  Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf is included only for shared settings used for the akka-persistence-jdbc tests\ninclude \"general.conf\"\ninclude \"oracle-schema-overrides.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\n\njdbc-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  slick = ${slick}\n}\n\nslick {\n  profile = \"slick.jdbc.OracleProfile$\"\n  db {\n    host = ${docker.host}\n    host = ${?DB_HOST}\n    url = \"jdbc:oracle:thin:@//\"${slick.db.host}\":1521/FREEPDB1\"\n    user = \"system\"\n    password = \"oracle\"\n    driver = \"oracle.jdbc.OracleDriver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "migrator/src/test/resources/postgres-application.conf",
    "content": "#  Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\n// general.conf is included only for shared settings used for the akka-persistence-jdbc tests\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\njdbc-journal {\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  slick = ${slick}\n}\n\nslick {\n  profile = \"slick.jdbc.PostgresProfile$\"\n  db {\n    host = \"localhost\"\n    host = ${?DB_HOST}\n    url = \"jdbc:postgresql://\"${slick.db.host}\":5432/docker?reWriteBatchedInserts=true\"\n    user = \"docker\"\n    password = \"docker\"\n    driver = \"org.postgresql.Driver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "migrator/src/test/resources/schema/h2/h2-create-schema-legacy.sql",
    "content": "CREATE TABLE IF NOT EXISTS PUBLIC.\"journal\" (\n  \"ordering\" BIGINT AUTO_INCREMENT,\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" BIGINT NOT NULL,\n  \"deleted\" BOOLEAN DEFAULT FALSE NOT NULL,\n  \"tags\" VARCHAR(255) DEFAULT NULL,\n  \"message\" BYTEA NOT NULL,\n  PRIMARY KEY(\"persistence_id\", \"sequence_number\")\n);\nCREATE UNIQUE INDEX IF NOT EXISTS  \"journal_ordering_idx\" ON PUBLIC.\"journal\"(\"ordering\");\n\nCREATE TABLE IF NOT EXISTS PUBLIC.\"legacy_snapshot\" (\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" BIGINT NOT NULL,\n  \"created\" BIGINT NOT NULL,\n  \"snapshot\" BYTEA NOT NULL,\n  PRIMARY KEY(\"persistence_id\", \"sequence_number\")\n);\n\n\nCREATE TABLE IF NOT EXISTS \"durable_state\" (\n    \"global_offset\" BIGINT NOT NULL AUTO_INCREMENT,\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"revision\" BIGINT NOT NULL,\n    \"state_payload\" BLOB NOT NULL,\n    \"state_serial_id\" INTEGER NOT NULL,\n    \"state_serial_manifest\" VARCHAR,\n    \"tag\" VARCHAR,\n    \"state_timestamp\" BIGINT NOT NULL,\n    PRIMARY KEY(\"persistence_id\")\n    );\n\nCREATE INDEX \"state_tag_idx\" on \"durable_state\" (\"tag\");\nCREATE INDEX \"state_global_offset_idx\" on \"durable_state\" (\"global_offset\");\n"
  },
  {
    "path": "migrator/src/test/resources/schema/h2/h2-create-schema.sql",
    "content": "CREATE TABLE IF NOT EXISTS \"event_journal\" (\n    \"ordering\" BIGINT UNIQUE NOT NULL AUTO_INCREMENT,\n    \"deleted\" BOOLEAN DEFAULT false NOT NULL,\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"sequence_number\" BIGINT NOT NULL,\n    \"writer\" VARCHAR NOT NULL,\n    \"write_timestamp\" BIGINT NOT NULL,\n    \"adapter_manifest\" VARCHAR NOT NULL,\n    \"event_payload\" BLOB NOT NULL,\n    \"event_ser_id\" INTEGER NOT NULL,\n    \"event_ser_manifest\" VARCHAR NOT NULL,\n    \"meta_payload\" BLOB,\n    \"meta_ser_id\" INTEGER,\n    \"meta_ser_manifest\" VARCHAR,\n    PRIMARY KEY(\"persistence_id\",\"sequence_number\")\n    );\n\nCREATE UNIQUE INDEX \"event_journal_ordering_idx\" on \"event_journal\" (\"ordering\");\n\nCREATE TABLE IF NOT EXISTS \"event_tag\" (\n    \"event_id\" BIGINT,\n    \"persistence_id\" VARCHAR(255),\n    \"sequence_number\" BIGINT,\n    \"tag\" VARCHAR NOT NULL,\n    PRIMARY KEY(\"persistence_id\", \"sequence_number\", \"tag\"),\n    CONSTRAINT fk_event_journal\n      FOREIGN KEY(\"persistence_id\", \"sequence_number\")\n      REFERENCES \"event_journal\"(\"persistence_id\", \"sequence_number\")\n      ON DELETE CASCADE\n);\n\nCREATE TABLE IF NOT EXISTS \"snapshot\" (\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"sequence_number\" BIGINT NOT NULL,\n    \"created\" BIGINT NOT NULL,\"snapshot_ser_id\" INTEGER NOT NULL,\n    \"snapshot_ser_manifest\" VARCHAR NOT NULL,\n    \"snapshot_payload\" BLOB NOT NULL,\n    \"meta_ser_id\" INTEGER,\n    \"meta_ser_manifest\" VARCHAR,\n    \"meta_payload\" BLOB,\n    PRIMARY KEY(\"persistence_id\",\"sequence_number\")\n    );\n\nCREATE SEQUENCE IF NOT EXISTS \"global_offset_seq\";\n\nCREATE TABLE IF NOT EXISTS \"durable_state\" (\n    \"global_offset\" BIGINT DEFAULT NEXT VALUE FOR \"global_offset_seq\",\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"revision\" BIGINT NOT NULL,\n    \"state_payload\" BLOB NOT NULL,\n    \"state_serial_id\" INTEGER NOT NULL,\n    \"state_serial_manifest\" VARCHAR,\n    \"tag\" VARCHAR,\n    \"state_timestamp\" BIGINT NOT NULL,\n    PRIMARY KEY(\"persistence_id\")\n    );\nCREATE INDEX IF NOT EXISTS \"state_tag_idx\" on \"durable_state\" (\"tag\");\nCREATE INDEX IF NOT EXISTS \"state_global_offset_idx\" on \"durable_state\" (\"global_offset\");\n"
  },
  {
    "path": "migrator/src/test/resources/schema/h2/h2-drop-schema-legacy.sql",
    "content": "DROP TABLE IF EXISTS PUBLIC.\"journal\";\nDROP TABLE IF EXISTS PUBLIC.\"legacy_snapshot\";\nDROP TABLE IF EXISTS PUBLIC.\"durable_state\";\n"
  },
  {
    "path": "migrator/src/test/resources/schema/h2/h2-drop-schema.sql",
    "content": "DROP TABLE IF EXISTS PUBLIC.\"event_tag\";\nDROP TABLE IF EXISTS PUBLIC.\"event_journal\";\nDROP TABLE IF EXISTS PUBLIC.\"snapshot\";\nDROP TABLE IF EXISTS PUBLIC.\"durable_state\";\nDROP SEQUENCE IF EXISTS PUBLIC.\"global_offset_seq\";\n"
  },
  {
    "path": "migrator/src/test/resources/schema/mysql/mysql-create-schema-legacy.sql",
    "content": "CREATE TABLE IF NOT EXISTS journal (\n  ordering SERIAL,\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  deleted BOOLEAN DEFAULT FALSE NOT NULL,\n  tags VARCHAR(255) DEFAULT NULL,\n  message BLOB NOT NULL,\n  PRIMARY KEY(persistence_id, sequence_number)\n);\nCREATE UNIQUE INDEX journal_ordering_idx ON journal(ordering);\n\nCREATE TABLE IF NOT EXISTS legacy_snapshot (\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  created BIGINT NOT NULL,\n  snapshot BLOB NOT NULL,\n  PRIMARY KEY (persistence_id, sequence_number)\n);\n"
  },
  {
    "path": "migrator/src/test/resources/schema/mysql/mysql-create-schema.sql",
    "content": "CREATE TABLE IF NOT EXISTS event_journal (\n    ordering SERIAL,\n    deleted BOOLEAN DEFAULT false NOT NULL,\n    persistence_id VARCHAR(255) NOT NULL,\n    sequence_number BIGINT NOT NULL,\n    writer TEXT NOT NULL,\n    write_timestamp BIGINT NOT NULL,\n    adapter_manifest TEXT NOT NULL,\n    event_payload BLOB NOT NULL,\n    event_ser_id INTEGER NOT NULL,\n    event_ser_manifest TEXT NOT NULL,\n    meta_payload BLOB,\n    meta_ser_id INTEGER,meta_ser_manifest TEXT,\n    PRIMARY KEY(persistence_id,sequence_number)\n);\n\nCREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering);\n\nCREATE TABLE IF NOT EXISTS event_tag (\n    event_id BIGINT UNSIGNED,\n    persistence_id VARCHAR(255),\n    sequence_number BIGINT,\n    tag VARCHAR(255) NOT NULL,\n    PRIMARY KEY(persistence_id, sequence_number, tag),\n    FOREIGN KEY (persistence_id, sequence_number)\n        REFERENCES event_journal(persistence_id, sequence_number)\n        ON DELETE CASCADE\n    );\n\nCREATE TABLE IF NOT EXISTS snapshot (\n    persistence_id VARCHAR(255) NOT NULL,\n    sequence_number BIGINT NOT NULL,\n    created BIGINT NOT NULL,\n    snapshot_ser_id INTEGER NOT NULL,\n    snapshot_ser_manifest TEXT NOT NULL,\n    snapshot_payload BLOB NOT NULL,\n    meta_ser_id INTEGER,\n    meta_ser_manifest TEXT,\n    meta_payload BLOB,\n  PRIMARY KEY (persistence_id, sequence_number));\n"
  },
  {
    "path": "migrator/src/test/resources/schema/mysql/mysql-drop-schema-legacy.sql",
    "content": "DROP TABLE IF EXISTS journal;\nDROP TABLE IF EXISTS legacy_snapshot;\n"
  },
  {
    "path": "migrator/src/test/resources/schema/mysql/mysql-drop-schema.sql",
    "content": "DROP TABLE IF EXISTS event_tag;\nDROP TABLE IF EXISTS event_journal;\nDROP TABLE IF EXISTS snapshot;\n"
  },
  {
    "path": "migrator/src/test/resources/schema/oracle/oracle-create-schema-legacy.sql",
    "content": "CREATE SEQUENCE \"ordering_seq\" START WITH 1 INCREMENT BY 1 NOMAXVALUE\n/\n\nCREATE TABLE \"journal\" (\n  \"ordering\" NUMERIC,\n  \"deleted\" char check (\"deleted\" in (0,1)) NOT NULL,\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" NUMERIC NOT NULL,\n  \"tags\" VARCHAR(255) DEFAULT NULL,\n  \"message\" BLOB NOT NULL,\n  PRIMARY KEY(\"persistence_id\", \"sequence_number\")\n)\n/\n\nCREATE UNIQUE INDEX \"journal_ordering_idx\" ON \"journal\"(\"ordering\")\n/\n\nCREATE OR REPLACE TRIGGER \"ordering_seq_trigger\"\nBEFORE INSERT ON \"journal\"\nFOR EACH ROW\nBEGIN\n  SELECT \"ordering_seq\".NEXTVAL INTO :NEW.\"ordering\" FROM DUAL;\nEND;\n/\n\nCREATE OR REPLACE PROCEDURE \"reset_sequence\"\nIS\n  l_value NUMBER;\nBEGIN\n  EXECUTE IMMEDIATE 'SELECT \"ordering_seq\".nextval FROM dual' INTO l_value;\n  EXECUTE IMMEDIATE 'ALTER SEQUENCE \"ordering_seq\" INCREMENT BY -' || l_value || ' MINVALUE 0';\n  EXECUTE IMMEDIATE 'SELECT \"ordering_seq\".nextval FROM dual' INTO l_value;\n  EXECUTE IMMEDIATE 'ALTER SEQUENCE \"ordering_seq\" INCREMENT BY 1 MINVALUE 0';\nEND;\n/\n\nCREATE TABLE \"legacy_snapshot\" (\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" NUMERIC NOT NULL,\n  \"created\" NUMERIC NOT NULL,\n  \"snapshot\" BLOB NOT NULL,\n  PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n)\n/"
  },
  {
    "path": "migrator/src/test/resources/schema/oracle/oracle-create-schema.sql",
    "content": "CREATE SEQUENCE EVENT_JOURNAL__ORDERING_SEQ START WITH 1 INCREMENT BY 1 NOMAXVALUE\n/\n\nCREATE TABLE EVENT_JOURNAL (\n    ORDERING NUMERIC UNIQUE,\n    DELETED CHAR(1) DEFAULT 0 NOT NULL check (DELETED in (0, 1)),\n    PERSISTENCE_ID VARCHAR(255) NOT NULL,\n    SEQUENCE_NUMBER NUMERIC NOT NULL,\n    WRITER VARCHAR(255) NOT NULL,\n    WRITE_TIMESTAMP NUMBER(19) NOT NULL,\n    ADAPTER_MANIFEST VARCHAR(255),\n    EVENT_PAYLOAD BLOB NOT NULL,\n    EVENT_SER_ID NUMBER(10) NOT NULL,\n    EVENT_SER_MANIFEST VARCHAR(255),\n    META_PAYLOAD BLOB,\n    META_SER_ID NUMBER(10),\n    META_SER_MANIFEST VARCHAR(255),\n    PRIMARY KEY(PERSISTENCE_ID, SEQUENCE_NUMBER)\n    )\n/\n\nCREATE OR REPLACE TRIGGER EVENT_JOURNAL__ORDERING_TRG before insert on EVENT_JOURNAL REFERENCING NEW AS NEW FOR EACH ROW WHEN (new.ORDERING is null) begin select EVENT_JOURNAL__ORDERING_seq.nextval into :new.ORDERING from sys.dual; end;\n/\n\nCREATE TABLE EVENT_TAG (\n    EVENT_ID NUMERIC,\n    PERSISTENCE_ID VARCHAR(255),\n    SEQUENCE_NUMBER NUMERIC,\n    TAG VARCHAR(255) NOT NULL,\n    PRIMARY KEY(PERSISTENCE_ID, SEQUENCE_NUMBER, TAG),\n    FOREIGN KEY(PERSISTENCE_ID, SEQUENCE_NUMBER) REFERENCES EVENT_JOURNAL(PERSISTENCE_ID, SEQUENCE_NUMBER)\n    ON DELETE CASCADE\n    )\n/\n\nCREATE TABLE SNAPSHOT (\n    PERSISTENCE_ID VARCHAR(255) NOT NULL,\n    SEQUENCE_NUMBER NUMERIC NOT NULL,\n    CREATED NUMERIC NOT NULL,\n    SNAPSHOT_SER_ID NUMBER(10) NOT NULL,\n    SNAPSHOT_SER_MANIFEST VARCHAR(255),\n    SNAPSHOT_PAYLOAD BLOB NOT NULL,\n    META_SER_ID NUMBER(10),\n    META_SER_MANIFEST VARCHAR(255),\n    META_PAYLOAD BLOB,\n    PRIMARY KEY(PERSISTENCE_ID,SEQUENCE_NUMBER)\n    )\n/\n\nCREATE OR REPLACE PROCEDURE \"reset_sequence\"\nIS\n  l_value NUMBER;\nBEGIN\n  EXECUTE IMMEDIATE 'SELECT EVENT_JOURNAL__ORDERING_SEQ.nextval FROM dual' INTO l_value;\n  EXECUTE IMMEDIATE 'ALTER SEQUENCE EVENT_JOURNAL__ORDERING_SEQ INCREMENT BY -' || l_value || ' MINVALUE 0';\n  EXECUTE IMMEDIATE 'SELECT EVENT_JOURNAL__ORDERING_SEQ.nextval FROM dual' INTO l_value;\n  EXECUTE IMMEDIATE 'ALTER SEQUENCE EVENT_JOURNAL__ORDERING_SEQ INCREMENT BY 1 MINVALUE 0';\nEND;\n/\n"
  },
  {
    "path": "migrator/src/test/resources/schema/oracle/oracle-drop-schema-legacy.sql",
    "content": "-- (ddl lock timeout in seconds) this allows tests which are still writing to the db to finish gracefully\nALTER SESSION SET ddl_lock_timeout = 150\n/\n\nDROP TABLE \"journal\" CASCADE CONSTRAINT\n/\n\nDROP TABLE \"legacy_snapshot\" CASCADE CONSTRAINT\n/\n\nDROP TABLE \"deleted_to\" CASCADE CONSTRAINT\n/\n\nDROP TRIGGER \"ordering_seq_trigger\"\n/\n\nDROP PROCEDURE \"reset_sequence\"\n/\n\nDROP SEQUENCE \"ordering_seq\"\n/\n"
  },
  {
    "path": "migrator/src/test/resources/schema/oracle/oracle-drop-schema.sql",
    "content": "ALTER SESSION SET ddl_lock_timeout = 15\n/\n\nDROP TABLE EVENT_TAG CASCADE CONSTRAINT\n/\n\nDROP TABLE EVENT_JOURNAL CASCADE CONSTRAINT\n/\n\nDROP TABLE SNAPSHOT CASCADE CONSTRAINT\n/\n\nDROP TABLE SNAPSHOT CASCADE CONSTRAINT\n/\n\nDROP SEQUENCE EVENT_JOURNAL__ORDERING_SEQ\n/\n\nDROP TRIGGER EVENT_JOURNAL__ORDERING_TRG\n/\n"
  },
  {
    "path": "migrator/src/test/resources/schema/postgres/postgres-create-schema-legacy.sql",
    "content": "CREATE TABLE IF NOT EXISTS public.journal (\n  ordering BIGSERIAL,\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  deleted BOOLEAN DEFAULT FALSE NOT NULL,\n  tags VARCHAR(255) DEFAULT NULL,\n  message BYTEA NOT NULL,\n  PRIMARY KEY(persistence_id, sequence_number)\n);\nCREATE UNIQUE INDEX IF NOT EXISTS journal_ordering_idx ON public.journal(ordering);\n\nCREATE TABLE IF NOT EXISTS public.legacy_snapshot (\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  created BIGINT NOT NULL,\n  snapshot BYTEA NOT NULL,\n  PRIMARY KEY(persistence_id, sequence_number)\n);\n\nCREATE TABLE IF NOT EXISTS public.durable_state (\n    global_offset BIGSERIAL,\n    persistence_id VARCHAR(255) NOT NULL,\n    revision BIGINT NOT NULL,\n    state_payload BYTEA NOT NULL,\n    state_serial_id INTEGER NOT NULL,\n    state_serial_manifest VARCHAR(255),\n    tag VARCHAR,\n    state_timestamp BIGINT NOT NULL,\n    PRIMARY KEY(persistence_id)\n    );\nCREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag);\nCREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_state (global_offset);\n"
  },
  {
    "path": "migrator/src/test/resources/schema/postgres/postgres-create-schema.sql",
    "content": "CREATE TABLE IF NOT EXISTS public.event_journal(\n  ordering BIGSERIAL,\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  deleted BOOLEAN DEFAULT FALSE NOT NULL,\n\n  writer VARCHAR(255) NOT NULL,\n  write_timestamp BIGINT,\n  adapter_manifest VARCHAR(255),\n\n  event_ser_id INTEGER NOT NULL,\n  event_ser_manifest VARCHAR(255) NOT NULL,\n  event_payload BYTEA NOT NULL,\n\n  meta_ser_id INTEGER,\n  meta_ser_manifest VARCHAR(255),\n  meta_payload BYTEA,\n\n  PRIMARY KEY(persistence_id, sequence_number)\n);\n\nCREATE UNIQUE INDEX event_journal_ordering_idx ON public.event_journal(ordering);\n\nCREATE TABLE IF NOT EXISTS public.event_tag(\n    event_id BIGINT,\n    persistence_id VARCHAR(255),\n    sequence_number BIGINT,\n    tag VARCHAR(256),\n    PRIMARY KEY(persistence_id, sequence_number, tag),\n    CONSTRAINT fk_event_journal\n      FOREIGN KEY(persistence_id, sequence_number)\n      REFERENCES event_journal(persistence_id, sequence_number)\n      ON DELETE CASCADE\n);\n\nCREATE TABLE IF NOT EXISTS public.snapshot (\n  persistence_id VARCHAR(255) NOT NULL,\n  sequence_number BIGINT NOT NULL,\n  created BIGINT NOT NULL,\n\n  snapshot_ser_id INTEGER NOT NULL,\n  snapshot_ser_manifest VARCHAR(255) NOT NULL,\n  snapshot_payload BYTEA NOT NULL,\n\n  meta_ser_id INTEGER,\n  meta_ser_manifest VARCHAR(255),\n  meta_payload BYTEA,\n\n  PRIMARY KEY(persistence_id, sequence_number)\n);\n\nCREATE TABLE IF NOT EXISTS public.durable_state (\n    global_offset BIGSERIAL,\n    persistence_id VARCHAR(255) NOT NULL,\n    revision BIGINT NOT NULL,\n    state_payload BYTEA NOT NULL,\n    state_serial_id INTEGER NOT NULL,\n    state_serial_manifest VARCHAR(255),\n    tag VARCHAR,\n    state_timestamp BIGINT NOT NULL,\n    PRIMARY KEY(persistence_id)\n    );\nCREATE INDEX CONCURRENTLY state_tag_idx on public.durable_state (tag);\nCREATE INDEX CONCURRENTLY state_global_offset_idx on public.durable_state (global_offset);\n"
  },
  {
    "path": "migrator/src/test/resources/schema/postgres/postgres-drop-schema-legacy.sql",
    "content": "DROP TABLE IF EXISTS public.journal;\nDROP TABLE IF EXISTS public.legacy_snapshot;\nDROP TABLE IF EXISTS public.durable_state;\n"
  },
  {
    "path": "migrator/src/test/resources/schema/postgres/postgres-drop-schema.sql",
    "content": "DROP TABLE IF EXISTS public.event_tag;\nDROP TABLE IF EXISTS public.event_journal;\nDROP TABLE IF EXISTS public.snapshot;\nDROP TABLE IF EXISTS public.durable_state;\n\n"
  },
  {
    "path": "migrator/src/test/resources/schema/sqlserver/sqlserver-create-schema-legacy.sql",
    "content": "IF  NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'\"journal\"') AND type in (N'U'))\nbegin\nCREATE TABLE journal (\n  \"ordering\" BIGINT IDENTITY(1,1) NOT NULL,\n  \"deleted\" BIT DEFAULT 0 NOT NULL,\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" NUMERIC(10,0) NOT NULL,\n  \"tags\" VARCHAR(255) NULL DEFAULT NULL,\n  \"message\" VARBINARY(max) NOT NULL,\n  PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n)\nCREATE UNIQUE INDEX journal_ordering_idx ON journal (ordering)\nend;\n\n\nIF  NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID(N'\"snapshot\"') AND type in (N'U'))\nCREATE TABLE legacy_snapshot (\n  \"persistence_id\" VARCHAR(255) NOT NULL,\n  \"sequence_number\" NUMERIC(10,0) NOT NULL,\n  \"created\" NUMERIC NOT NULL,\n  \"snapshot\" VARBINARY(max) NOT NULL,\n  PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n);\nend;\n"
  },
  {
    "path": "migrator/src/test/resources/schema/sqlserver/sqlserver-create-schema.sql",
    "content": "CREATE TABLE event_journal(\n    \"ordering\" BIGINT IDENTITY(1,1) NOT NULL,\n    \"deleted\" BIT DEFAULT 0 NOT NULL,\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"sequence_number\" NUMERIC(10,0) NOT NULL,\n    \"writer\" VARCHAR(255) NOT NULL,\n    \"write_timestamp\" BIGINT NOT NULL,\n    \"adapter_manifest\" VARCHAR(MAX) NOT NULL,\n    \"event_payload\" VARBINARY(MAX) NOT NULL,\n    \"event_ser_id\" INTEGER NOT NULL,\n    \"event_ser_manifest\" VARCHAR(MAX) NOT NULL,\n    \"meta_payload\" VARBINARY(MAX),\n    \"meta_ser_id\" INTEGER,\n    \"meta_ser_manifest\" VARCHAR(MAX)\n    PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n);\n\nCREATE UNIQUE INDEX event_journal_ordering_idx ON event_journal(ordering);\n\nCREATE TABLE event_tag (\n    \"event_id\" BIGINT,\n    \"persistence_id\" VARCHAR(255),\n    \"sequence_number\" NUMERIC(10,0),\n    \"tag\" VARCHAR(255) NOT NULL,\n    PRIMARY KEY (\"event_id\", \"tag\"),\n    constraint \"fk_event_journal\"\n        foreign key(\"event_id\")\n        references \"dbo\".\"event_journal\"(\"ordering\")\n        on delete CASCADE\n);\n\nCREATE TABLE \"snapshot\" (\n    \"persistence_id\" VARCHAR(255) NOT NULL,\n    \"sequence_number\" NUMERIC(10,0) NOT NULL,\n    \"created\" BIGINT NOT NULL,\n    \"snapshot_ser_id\" INTEGER NOT NULL,\n    \"snapshot_ser_manifest\" VARCHAR(255) NOT NULL,\n    \"snapshot_payload\" VARBINARY(MAX) NOT NULL,\n    \"meta_ser_id\" INTEGER,\n    \"meta_ser_manifest\" VARCHAR(255),\n    \"meta_payload\" VARBINARY(MAX),\n    PRIMARY KEY (\"persistence_id\", \"sequence_number\")\n  )\n\n"
  },
  {
    "path": "migrator/src/test/resources/schema/sqlserver/sqlserver-drop-schema-legacy.sql",
    "content": "DROP TABLE IF EXISTS journal;\nDROP TABLE IF EXISTS legacy_snapshot;\n"
  },
  {
    "path": "migrator/src/test/resources/schema/sqlserver/sqlserver-drop-schema.sql",
    "content": "DROP TABLE IF EXISTS event_tag;\nDROP TABLE IF EXISTS event_journal;\nDROP TABLE IF EXISTS snapshot;\n"
  },
  {
    "path": "migrator/src/test/resources/sqlserver-application.conf",
    "content": "#  Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n\ninclude \"general.conf\"\n\nakka {\n  persistence {\n    journal {\n      plugin = \"jdbc-journal\"\n      // Enable the line below to automatically start the journal when the actorsystem is started\n      // auto-start-journals = [\"jdbc-journal\"]\n    }\n    snapshot-store {\n      plugin = \"jdbc-snapshot-store\"\n      // Enable the line below to automatically start the snapshot-store when the actorsystem is started\n      // auto-start-snapshot-stores = [\"jdbc-snapshot-store\"]\n    }\n  }\n}\n\njdbc-journal {\n  tables {\n    journal {\n      schemaName = \"dbo\"\n    }\n  }\n\n  slick = ${slick}\n}\n\n# the akka-persistence-snapshot-store in use\njdbc-snapshot-store {\n  tables {\n    snapshot {\n      schemaName = \"dbo\"\n    }\n  }\n\n  slick = ${slick}\n}\n\n# the akka-persistence-query provider in use\njdbc-read-journal {\n  tables {\n    journal {\n      schemaName = \"dbo\"\n    }\n  }\n\n  slick = ${slick}\n}\n\nslick {\n  profile = \"slick.jdbc.SQLServerProfile$\"\n  db {\n    host = ${docker.host}\n    host = ${?DB_HOST}\n    url = \"jdbc:sqlserver://\"${slick.db.host}\":1433;databaseName=docker;integratedSecurity=false\"\n    user = \"sa\"\n    password = \"docker123abc#\"\n    driver = \"com.microsoft.sqlserver.jdbc.SQLServerDriver\"\n    numThreads = 5\n    maxConnections = 5\n    minConnections = 1\n  }\n}\n"
  },
  {
    "path": "migrator/src/test/scala/akka/persistence/jdbc/migrator/JournalMigratorTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.migrator\n\nimport akka.Done\nimport akka.pattern.ask\nimport akka.persistence.jdbc.db.SlickDatabase\nimport akka.persistence.jdbc.migrator.MigratorSpec._\n\nabstract class JournalMigratorTest(configName: String) extends MigratorSpec(configName) {\n\n  it should \"migrate the event journal\" in {\n    withLegacyActorSystem { implicit systemLegacy =>\n      withReadJournal { implicit readJournal =>\n        withTestActors() { (actorA1, actorA2, actorA3) =>\n          eventually {\n            countJournal().futureValue shouldBe 0\n            (actorA1 ? CreateAccount(1)).futureValue // balance 1\n            (actorA2 ? CreateAccount(2)).futureValue // balance 2\n            (actorA3 ? CreateAccount(3)).futureValue // balance 3\n            (actorA1 ? Deposit(3)).futureValue // balance 4\n            (actorA2 ? Deposit(2)).futureValue // balance 4\n            (actorA3 ? Deposit(1)).futureValue // balance 4\n            (actorA1 ? Withdraw(3)).futureValue // balance 1\n            (actorA2 ? Withdraw(2)).futureValue // balance 1\n            (actorA3 ? Withdraw(1)).futureValue // balance 1\n            (actorA1 ? State).mapTo[Int].futureValue shouldBe 1\n            (actorA2 ? State).mapTo[Int].futureValue shouldBe 2\n            (actorA3 ? State).mapTo[Int].futureValue shouldBe 3\n            countJournal().futureValue shouldBe 9\n          }\n        }\n      }\n    } // legacy persistence\n    withActorSystem { implicit systemNew =>\n      withReadJournal { implicit readJournal =>\n        eventually {\n          countJournal().futureValue shouldBe 0 // before migration\n          JournalMigrator(SlickDatabase.profile(config, \"slick\")).migrate().futureValue shouldBe Done\n          countJournal().futureValue shouldBe 9 // after migration\n        }\n        withTestActors() { (actorB1, actorB2, actorB3) =>\n          eventually {\n            (actorB1 ? State).mapTo[Int].futureValue shouldBe 1\n            (actorB2 ? State).mapTo[Int].futureValue shouldBe 2\n            (actorB3 ? State).mapTo[Int].futureValue shouldBe 3\n          }\n        }\n      }\n    } // new persistence\n  }\n\n  it should \"migrate the event journal preserving the order of events\" in {\n    withLegacyActorSystem { implicit systemLegacy =>\n      withReadJournal { implicit readJournal =>\n        withTestActors() { (actorA1, actorA2, actorA3) =>\n          (actorA1 ? CreateAccount(0)).futureValue\n          (actorA2 ? CreateAccount(0)).futureValue\n          (actorA3 ? CreateAccount(0)).futureValue\n          for (i <- 1 to 999) {\n            (actorA1 ? Deposit(i)).futureValue\n            (actorA2 ? Deposit(i)).futureValue\n            (actorA3 ? Deposit(i)).futureValue\n          }\n          eventually {\n            countJournal().futureValue shouldBe 3000\n          }\n        }\n      }\n    } // legacy persistence\n    withActorSystem { implicit systemNew =>\n      withReadJournal { implicit readJournal =>\n        eventually {\n          countJournal().futureValue shouldBe 0 // before migration\n          JournalMigrator(SlickDatabase.profile(config, \"slick\")).migrate().futureValue shouldBe Done\n          countJournal().futureValue shouldBe 3000 // after migration\n          val allEvents: Seq[Seq[AccountEvent]] = events().futureValue\n          allEvents.size shouldBe 3\n          val seq1: Seq[Int] = allEvents.head.map(_.amount)\n          val seq2: Seq[Int] = allEvents(1).map(_.amount)\n          val seq3: Seq[Int] = allEvents(2).map(_.amount)\n          val expectedResult: Seq[Int] = 0 to 999\n          seq1 shouldBe expectedResult\n          seq2 shouldBe expectedResult\n          seq3 shouldBe expectedResult\n        }\n      }\n    } // new persistence\n  }\n\n  it should \"migrate the event journal preserving tags\" in {\n    withLegacyActorSystem { implicit systemLegacy =>\n      withReadJournal { implicit readJournal =>\n        withTestActors() { (actorA1, actorA2, actorA3) =>\n          (actorA1 ? CreateAccount(0)).futureValue\n          (actorA2 ? CreateAccount(0)).futureValue\n          (actorA3 ? CreateAccount(0)).futureValue\n          for (i <- 1 to 999) {\n            (actorA1 ? Deposit(i)).futureValue\n            (actorA2 ? Deposit(i)).futureValue\n            (actorA3 ? Deposit(i)).futureValue\n          }\n          eventually {\n            countJournal().futureValue shouldBe 3000\n          }\n        }\n      }\n    } // legacy persistence\n    withActorSystem { implicit systemNew =>\n      withReadJournal { implicit readJournal =>\n        eventually {\n          countJournal().futureValue shouldBe 0 // before migration\n          JournalMigrator(SlickDatabase.profile(config, \"slick\")).migrate().futureValue shouldBe Done\n          countJournal().futureValue shouldBe 3000 // after migration\n          val evenEvents: Seq[AccountEvent] = eventsByTag(MigratorSpec.Even).futureValue\n          evenEvents.size shouldBe 1500\n          evenEvents.forall(e => e.amount % 2 == 0) shouldBe true\n\n          val oddEvents: Seq[AccountEvent] = eventsByTag(MigratorSpec.Odd).futureValue\n          oddEvents.size shouldBe 1500\n          oddEvents.forall(e => e.amount % 2 == 1) shouldBe true\n        }\n      }\n    } // new persistence\n  }\n}\n\nclass H2JournalMigratorTest extends JournalMigratorTest(\"h2-application.conf\") with MigratorSpec.H2Cleaner\n"
  },
  {
    "path": "migrator/src/test/scala/akka/persistence/jdbc/migrator/MigratorSpec.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.migrator\n\nimport akka.actor.{ ActorRef, ActorSystem, Props, Stash }\nimport akka.event.LoggingReceive\nimport akka.pattern.ask\nimport akka.persistence.jdbc.SimpleSpec\nimport akka.persistence.jdbc.config.{ JournalConfig, SlickConfiguration }\nimport akka.persistence.jdbc.db.SlickDatabase\nimport akka.persistence.jdbc.migrator.MigratorSpec._\nimport akka.persistence.jdbc.query.scaladsl.JdbcReadJournal\nimport akka.persistence.jdbc.testkit.internal._\nimport akka.persistence.journal.EventSeq.single\nimport akka.persistence.journal.{ EventAdapter, EventSeq, Tagged }\nimport akka.persistence.query.PersistenceQuery\nimport akka.persistence.{ PersistentActor, SaveSnapshotSuccess, SnapshotMetadata, SnapshotOffer }\nimport akka.stream.Materializer\nimport akka.stream.scaladsl.Sink\nimport akka.util.Timeout\nimport com.typesafe.config.{ Config, ConfigFactory, ConfigValue, ConfigValueFactory }\nimport org.scalatest.BeforeAndAfterEach\nimport org.slf4j.{ Logger, LoggerFactory }\nimport slick.jdbc.JdbcBackend.{ Database, Session }\n\nimport java.sql.Statement\nimport scala.concurrent.duration.DurationInt\nimport scala.concurrent.{ ExecutionContextExecutor, Future }\n\nabstract class MigratorSpec(val config: Config) extends SimpleSpec with BeforeAndAfterEach {\n\n  // The db is initialized in the before and after each bocks\n  var dbOpt: Option[Database] = None\n\n  implicit val pc: PatienceConfig = PatienceConfig(timeout = 10.seconds)\n  implicit val timeout: Timeout = Timeout(1.minute)\n\n  private val logger: Logger = LoggerFactory.getLogger(this.getClass)\n\n  private val cfg: Config = config.getConfig(\"jdbc-journal\")\n  private val journalConfig: JournalConfig = new JournalConfig(cfg)\n\n  protected val newJournalTableName: String = journalConfig.eventJournalTableConfiguration.tableName\n  protected val legacyJournalTableName: String = journalConfig.journalTableConfiguration.tableName\n\n  protected val newTables: Seq[String] =\n    List(journalConfig.eventTagTableConfiguration.tableName, journalConfig.eventJournalTableConfiguration.tableName)\n  protected val legacyTables: Seq[String] = List(journalConfig.journalTableConfiguration.tableName)\n  protected val tables: Seq[String] = legacyTables ++ newTables\n\n  def this(config: String = \"postgres-application.conf\", configOverrides: Map[String, ConfigValue] = Map.empty) =\n    this(configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) =>\n      conf.withValue(path, configValue)\n    })\n\n  def db: Database = dbOpt.getOrElse {\n    val db = SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig(\"slick\")), \"slick.db\")\n    dbOpt = Some(db)\n    db\n  }\n\n  protected def dropAndCreate(schemaType: SchemaType): Unit = {\n    // blocking calls, usually done in our before test methods\n    // legacy\n    SchemaUtilsImpl.dropWithSlick(schemaType, logger, db, legacy = true)\n    SchemaUtilsImpl.createWithSlick(schemaType, logger, db, legacy = true)\n    // new\n    SchemaUtilsImpl.dropWithSlick(schemaType, logger, db, legacy = false)\n    SchemaUtilsImpl.createWithSlick(schemaType, logger, db, legacy = false)\n  }\n\n  def withSession[A](f: Session => A)(db: Database): A = {\n    val session = db.createSession()\n    try f(session)\n    finally session.close()\n  }\n\n  def withStatement[A](f: Statement => A)(db: Database): A =\n    withSession(session => session.withStatement()(f))(db)\n\n  def closeDb(): Unit = {\n    dbOpt.foreach(_.close())\n    dbOpt = None\n  }\n\n  override protected def afterEach(): Unit = {\n    super.afterEach()\n    closeDb()\n  }\n\n  override protected def afterAll(): Unit = {\n    super.afterAll()\n    closeDb()\n  }\n\n  protected def setupEmpty(persistenceId: Int)(implicit system: ActorSystem): ActorRef =\n    system.actorOf(Props(new TestAccountActor(persistenceId)))\n\n  def withTestActors(seq: Int = 1)(f: (ActorRef, ActorRef, ActorRef) => Unit)(implicit system: ActorSystem): Unit = {\n    implicit val ec: ExecutionContextExecutor = system.dispatcher\n    val refs = (seq until seq + 3).map(setupEmpty).toList\n    try {\n      // make sure we notice early if the actors failed to start (because of issues with journal) makes debugging\n      // failing tests easier as we know it is not the actual interaction from the test that is the problem\n      Future.sequence(refs.map(_ ? State)).futureValue\n\n      f(refs.head, refs.drop(1).head, refs.drop(2).head)\n    } finally killActors(refs: _*)\n  }\n\n  def withActorSystem(f: ActorSystem => Unit): Unit = {\n    implicit val system: ActorSystem = ActorSystem(\"migrator-test\", config)\n    f(system)\n    system.terminate().futureValue\n  }\n\n  def withLegacyActorSystem(f: ActorSystem => Unit): Unit = {\n\n    val configOverrides: Map[String, ConfigValue] = Map(\n      \"jdbc-journal.dao\" -> ConfigValueFactory.fromAnyRef(\n        \"akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao\"),\n      \"jdbc-snapshot-store.dao\" -> ConfigValueFactory.fromAnyRef(\n        \"akka.persistence.jdbc.snapshot.dao.legacy.ByteArraySnapshotDao\"),\n      \"jdbc-read-journal.dao\" -> ConfigValueFactory.fromAnyRef(\n        \"akka.persistence.jdbc.query.dao.legacy.ByteArrayReadJournalDao\"))\n\n    val legacyDAOConfig = configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) =>\n      conf.withValue(path, configValue)\n    }\n\n    implicit val system: ActorSystem = ActorSystem(\"migrator-test\", legacyDAOConfig)\n    f(system)\n    system.terminate().futureValue\n  }\n\n  def withReadJournal(f: JdbcReadJournal => Unit)(implicit system: ActorSystem): Unit = {\n    val readJournal: JdbcReadJournal =\n      PersistenceQuery(system).readJournalFor[JdbcReadJournal](JdbcReadJournal.Identifier)\n    f(readJournal)\n  }\n\n  def countJournal(filterPid: String => Boolean = _ => true)(\n      implicit system: ActorSystem,\n      mat: Materializer,\n      readJournal: JdbcReadJournal): Future[Long] =\n    readJournal\n      .currentPersistenceIds()\n      .filter(filterPid(_))\n      .mapAsync(1) { pid =>\n        readJournal\n          .currentEventsByPersistenceId(pid, 0, Long.MaxValue)\n          .map(_ => 1L)\n          .runWith(Sink.seq)\n          .map(_.sum)(system.dispatcher)\n      }\n      .runWith(Sink.seq)\n      .map(_.sum)(system.dispatcher)\n\n  def eventsByTag(tag: String)(implicit mat: Materializer, readJournal: JdbcReadJournal): Future[Seq[AccountEvent]] =\n    readJournal\n      .currentEventsByTag(tag, offset = 0)\n      .map(_.event)\n      .collect { case e: AccountEvent =>\n        e\n      }\n      .runWith(Sink.seq)\n\n  def events(filterPid: String => Boolean = _ => true)(\n      implicit mat: Materializer,\n      readJournal: JdbcReadJournal): Future[Seq[Seq[AccountEvent]]] =\n    readJournal\n      .currentPersistenceIds()\n      .filter(filterPid(_))\n      .mapAsync(1) { pid =>\n        readJournal\n          .currentEventsByPersistenceId(pid, fromSequenceNr = 0, toSequenceNr = Long.MaxValue)\n          .map(e => e.event)\n          .collect { case e: AccountEvent =>\n            e\n          }\n          .runWith(Sink.seq)\n      }\n      .runWith(Sink.seq)\n\n}\n\nobject MigratorSpec {\n\n  private final val Zero: Int = 0\n\n  private final val SnapshotInterval: Int = 10\n\n  val Even: String = \"EVEN\"\n  val Odd: String = \"ODD\"\n\n  /** Commands */\n  sealed trait AccountCommand extends Serializable\n\n  final case class CreateAccount(amount: Int) extends AccountCommand\n\n  final case class Deposit(amount: Int) extends AccountCommand\n\n  final case class Withdraw(amount: Int) extends AccountCommand\n\n  object State extends AccountCommand\n\n  /** Events */\n  sealed trait AccountEvent extends Serializable {\n    val amount: Int\n  }\n\n  final case class AccountCreated(override val amount: Int) extends AccountEvent\n\n  final case class Deposited(override val amount: Int) extends AccountEvent\n\n  final case class Withdrawn(override val amount: Int) extends AccountEvent\n\n  /** Reply */\n  final case class CurrentBalance(balance: Int)\n\n  class AccountEventAdapter extends EventAdapter {\n\n    override def manifest(event: Any): String = event.getClass.getSimpleName\n\n    def fromJournal(event: Any, manifest: String): EventSeq = event match {\n      case event: AccountEvent => single(event)\n      case _                   => sys.error(s\"Unexpected case '${event.getClass.getName}'\")\n    }\n\n    def toJournal(event: Any): Any = event match {\n      case event: AccountEvent =>\n        val tag: String = if (event.amount % 2 == 0) Even else Odd\n        Tagged(event, Set(tag))\n      case _ => sys.error(s\"Unexpected case '${event.getClass.getName}'\")\n    }\n  }\n\n  /** Actor */\n  class TestAccountActor(id: Int) extends PersistentActor with Stash {\n    override val persistenceId: String = s\"test-account-$id\"\n\n    var state: Int = Zero\n\n    private def saveSnapshot(): Unit = {\n      if (state % SnapshotInterval == 0) {\n        saveSnapshot(state)\n      }\n    }\n\n    override def receiveCommand: Receive =\n      LoggingReceive {\n\n        case SaveSnapshotSuccess(_: SnapshotMetadata) => ()\n\n        case CreateAccount(balance) =>\n          persist(AccountCreated(balance)) { (event: AccountCreated) =>\n            updateState(event)\n            saveSnapshot()\n            sender() ! akka.actor.Status.Success(event)\n          }\n        case Deposit(balance) =>\n          persist(Deposited(balance)) { (event: Deposited) =>\n            updateState(event)\n            saveSnapshot()\n            sender() ! akka.actor.Status.Success(event)\n          }\n        case Withdraw(balance) =>\n          persist(Withdrawn(balance)) { (event: Withdrawn) =>\n            updateState(event)\n            saveSnapshot()\n            sender() ! akka.actor.Status.Success(event)\n          }\n        case State =>\n          sender() ! akka.actor.Status.Success(state)\n      }\n\n    def updateState(event: AccountEvent): Unit = event match {\n      case AccountCreated(amount) => state = state + amount\n      case Deposited(amount)      => state = state + amount\n      case Withdrawn(amount)      => state = state - amount\n    }\n\n    override def receiveRecover: Receive =\n      LoggingReceive {\n        case SnapshotOffer(_, snapshot: Int) =>\n          state = snapshot\n        case event: AccountEvent => updateState(event)\n      }\n  }\n\n  trait PostgresCleaner extends MigratorSpec {\n\n    def clearPostgres(): Unit = {\n      tables.foreach { name =>\n        withStatement(stmt => stmt.executeUpdate(s\"DELETE FROM $name\"))(db)\n      }\n    }\n\n    override def beforeAll(): Unit = {\n      dropAndCreate(Postgres)\n      super.beforeAll()\n    }\n\n    override def beforeEach(): Unit = {\n      dropAndCreate(Postgres)\n      super.beforeEach()\n    }\n  }\n\n  trait MysqlCleaner extends MigratorSpec {\n\n    def clearMySQL(): Unit = {\n      withStatement { stmt =>\n        stmt.execute(\"SET FOREIGN_KEY_CHECKS = 0\")\n        tables.foreach { name => stmt.executeUpdate(s\"TRUNCATE $name\") }\n        stmt.execute(\"SET FOREIGN_KEY_CHECKS = 1\")\n      }(db)\n    }\n\n    override def beforeAll(): Unit = {\n      dropAndCreate(MySQL)\n      super.beforeAll()\n    }\n\n    override def beforeEach(): Unit = {\n      clearMySQL()\n      super.beforeEach()\n    }\n  }\n\n  trait OracleCleaner extends MigratorSpec {\n\n    def clearOracle(): Unit = {\n      tables.foreach { name =>\n        withStatement(stmt => stmt.executeUpdate(s\"\"\"DELETE FROM \"$name\" \"\"\"))(db)\n      }\n      withStatement(stmt => stmt.executeUpdate(\"\"\"BEGIN \"reset_sequence\"; END; \"\"\"))(db)\n    }\n\n    override def beforeAll(): Unit = {\n      dropAndCreate(Oracle)\n      super.beforeAll()\n    }\n\n    override def beforeEach(): Unit = {\n      clearOracle()\n      super.beforeEach()\n    }\n  }\n\n  trait SqlServerCleaner extends MigratorSpec {\n\n    var initial = true\n\n    def clearSqlServer(): Unit = {\n      val reset = if (initial) {\n        initial = false\n        1\n      } else {\n        0\n      }\n      withStatement { stmt =>\n        tables.foreach { name => stmt.executeUpdate(s\"DELETE FROM $name\") }\n        stmt.executeUpdate(s\"DBCC CHECKIDENT('$legacyJournalTableName', RESEED, $reset)\")\n        stmt.executeUpdate(s\"DBCC CHECKIDENT('$newJournalTableName', RESEED, $reset)\")\n      }(db)\n    }\n\n    override def beforeAll(): Unit = {\n      dropAndCreate(SqlServer)\n      super.beforeAll()\n    }\n\n    override def afterAll(): Unit = {\n      dropAndCreate(SqlServer)\n      super.afterAll()\n    }\n\n    override def beforeEach(): Unit = {\n      clearSqlServer()\n      super.beforeEach()\n    }\n  }\n\n  trait H2Cleaner extends MigratorSpec {\n\n    def clearH2(): Unit = {\n      tables.foreach { name =>\n        withStatement(stmt => stmt.executeUpdate(s\"DELETE FROM $name\"))(db)\n      }\n    }\n\n    override def beforeEach(): Unit = {\n      dropAndCreate(H2)\n      super.beforeEach()\n    }\n  }\n}\n"
  },
  {
    "path": "migrator/src/test/scala/akka/persistence/jdbc/migrator/SnapshotMigratorTest.scala",
    "content": "/*\n * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n * Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n */\n\npackage akka.persistence.jdbc.migrator\n\nimport akka.Done\nimport akka.pattern.ask\nimport akka.persistence.jdbc.db.SlickDatabase\nimport akka.persistence.jdbc.migrator.MigratorSpec._\n\nabstract class SnapshotMigratorTest(configName: String) extends MigratorSpec(configName) {\n\n  it should \"migrate snapshots\" in {\n    withLegacyActorSystem { implicit systemLegacy =>\n      withReadJournal { implicit readJournal =>\n        withTestActors() { (actorA1, actorA2, actorA3) =>\n          (actorA1 ? CreateAccount(1)).futureValue\n          (actorA2 ? CreateAccount(1)).futureValue\n          (actorA3 ? CreateAccount(1)).futureValue\n          for (_ <- 1 to 99) {\n            (actorA1 ? Deposit(1)).futureValue\n            (actorA2 ? Deposit(1)).futureValue\n            (actorA3 ? Deposit(1)).futureValue\n          }\n          eventually {\n            (actorA1 ? State).mapTo[Int].futureValue shouldBe 100\n            (actorA2 ? State).mapTo[Int].futureValue shouldBe 100\n            (actorA3 ? State).mapTo[Int].futureValue shouldBe 100\n            countJournal().futureValue shouldBe 300\n          }\n        }\n      }\n    } // legacy persistence\n    withActorSystem { implicit systemNew =>\n      withReadJournal { implicit readJournal =>\n        eventually {\n          countJournal().futureValue shouldBe 0 // before migration\n          SnapshotMigrator(SlickDatabase.profile(config, \"slick\")).migrateAll().futureValue shouldBe Done\n          countJournal().futureValue shouldBe 0 // after migration\n        }\n        withTestActors() { (actorB1, actorB2, actorB3) =>\n          eventually {\n            (actorB1 ? State).mapTo[Int].futureValue shouldBe 100\n            (actorB2 ? State).mapTo[Int].futureValue shouldBe 100\n            (actorB3 ? State).mapTo[Int].futureValue shouldBe 100\n          }\n        }\n      }\n    } // new persistence\n  }\n}\n\nclass H2SnapshotMigratorTest extends SnapshotMigratorTest(\"h2-application.conf\") with MigratorSpec.H2Cleaner\n"
  },
  {
    "path": "migrator-integration/LICENSE",
    "content": "﻿LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT\r\n\r\nTHIS LIGHTBEND COMMERCIAL SOFTWARE LICENSE AGREEMENT (THIS \"AGREEMENT\") IS A LEGAL AGREEMENT BETWEEN YOU (\"USER\") AND LIGHTBEND, INC. (\"LICENSOR\"). \r\nBY CLICKING THE \"I ACCEPT\" BUTTON, OR INSTALLING, COPYING OR OTHERWISE USING LIGHTBEND’S SOFTWARE (THE “SOFTWARE”) AND ITS ASSOCIATED USER DOCUMENTATION, USER ACKNOWLEDGES THAT USER HAS REVIEWED AND ACCEPTS THIS AGREEMENT AND AGREES TO BE BOUND BY ALL OF ITS TERMS. \r\nIF YOU ARE AGREEING TO THIS AGREEMENT AS AN INDIVIDUAL, “USER” REFERS TO YOU INDIVIDUALLY.  IF YOU ARE AGREEING TO THIS AGREEMENT AS A REPRESENTATIVE OF AN ENTITY, YOU REPRESENT THAT YOU HAVE THE AUTHORITY TO BIND THAT ENTITY AND “USER” REFERS TO THAT ENTITY AND ALL THE USERS ACCESSING THE SOFTWARE BY, THROUGH OR ON BEHALF OF THAT ENTITY.  \r\nIF USER DOES NOT AGREE WITH ALL OF THE TERMS OF THIS AGREEMENT, DO NOT INSTALL, COPY OR OTHERWISE USE THE SOFTWARE OR ITS DOCUMENTATION. \r\n\r\n1. DEFINITIONS. \r\n   1. “User System” means User’s website(s), computers, servers and other equipment and software upon and with which the Software is run.\r\n   2. “Documentation” means the user instructions and help files made available by Licensor for use with the Software, as may be updated from time to time by Licensor.\r\n   3. “Intellectual Property Rights” means all intellectual property rights or similar proprietary rights, including \r\n\t(a) patent rights and utility models, \r\n\t(b) copyrights and database rights, \r\n\t(c) trademarks, trade names, domain names and trade dress and the goodwill associated therewith, \r\n\t(d) trade secrets, \r\n\t(e) mask works, and \r\n\t(f) industrial design rights; in each case, including any registrations of, applications to register, and renewals and extensions of, any of the foregoing in any jurisdiction in the world.\r\n   4. “Open Source Software” means all software that is available under the GNU Affero General Public License (AGPL), GNU General Public License (GPL), GNU Lesser General Public License (LGPL), Mozilla Public License (MPL), Apache License, BSD licenses, or any other license that is approved by or similar to those approved by the Open Source Initiative (www.opensource.org).\r\n\r\n2. LICENSES AND RESTRICTIONS.  \r\n   1. License.  Subject to User’s compliance with the terms and conditions of this Agreement, Licensor hereby grants to User, during the term of this Agreement, a limited, non-exclusive, non-transferable and non-sublicensable right to \r\n\t(i) install and execute one (1) copy of the Software in accordance with the Documentation, solely in binary form, and not for the benefit of any other person or entity, and \r\n\t(ii) access and use the Documentation, solely for User’s own internally purposes in support of End User’s permitted use of the Software.  \r\n   2. Restrictions.  User shall not, directly or indirectly, or permit any User or third party to: \r\n\t(a) reverse engineer, decompile, disassemble or otherwise attempt to discover the source code or underlying ideas or algorithms of the Software;  \r\n\t(b) modify, translate, or create derivative works based on any element of the Software or any related Documentation (except to the extent applicable laws specifically prohibit such restriction for interoperability purposes, in which case you agree to first contact Licensor and provide Licensor an opportunity to create such changes as are needed for interoperability purposes); \r\n\t(c) use, rent, lease, distribute, sell, resell, assign, or otherwise transfer the Software or any copy thereof; \r\n\t(d) use the Software for timesharing purposes or otherwise for the benefit of any person or entity other than for the benefit of User and Users; \r\n\t(e) remove any proprietary notices from the Software or the Documentation or attempt to defeat any copy protection device included with the Software; or \r\n\t(f) use the Software for any purpose other than its intended purpose.\r\n   3. Reservation of Rights.  Nothing in this Agreement shall be construed to give User a right to use or otherwise obtain access to any source code from which the Software is compiled or interpreted.  Except as expressly granted in this Agreement, there are no other licenses granted to User, express, implied or by way of estoppel.  All rights not granted in this Agreement are reserved by Licensor.\r\n   4. Open Source Software.  Notwithstanding the foregoing, certain items of software included with the Software are Open Source Software and remains subject Open Source Software licenses.  Such Open Source Software is not subject to the terms and conditions of this Agreement.  \r\nInstead, each such item of Open Source Software is licensed under the terms of the end user license that accompanies such Open Source Software and nothing in this Agreement limits your rights under, or grants you rights that supersede, the terms and conditions of any applicable end user license for such Open Source Software.  If required by any license for particular Open Source Software, Licensor makes such Open Source Software, and any Licensor modifications to that Open Source Software, available as further described in the Documentation.\r\nUSE OF THE SOFTWARE IN ANY MANNER OTHER THAN AS PROVIDED IN THIS AGREEMENT IS STRICTLY PROHIBITED AND MAY INFRINGE ON THE INTELLECTUAL PROPERTY RIGHTS OF LICENSOR AND/OR ITS LICENSOR(S), SUBJECTING USER TO CIVIL AND CRIMINAL PENALTIES, INCLUDING WITHOUT LIMITATION MONETARY DAMAGES AND IMPRISONMENT FOR COPYRIGHT INFRINGEMENT.\r\n\r\n3. USER OBLIGATIONS.\r\n   1. User System.  User is responsible for \r\n\t(a) obtaining, deploying and maintaining the User System, and all computer hardware, software, modems, routers and other communications equipment necessary for User and its Users to install and use the Software; and \r\n\t(b) paying all third party fees and access charges incurred in connection with the foregoing.  Licensor shall not be responsible for supplying any hardware, software or other equipment to User under this Agreement.\r\n   2. Compliance with Laws.  User agrees to use the Software in compliance with all applicable laws, including local laws of the country or region in which User resides, and in compliance with all United States export laws and regulations.  User shall not use the Software for any purpose prohibited by applicable law.  \r\n   3. Trademarks and Tradenames.  With regard to all copies of the Software permitted herein, User shall reproduce on such copies all Licensor copyright notices, and other proprietary notices appearing on and in the original copy of the software received from Licensor. Except as set forth in the foregoing sentence, User will not, during the term of this Agreement or thereafter, use any trademark of Licensor, or any word and/or symbol likely to be confused with any Licensor trademark, either alone or in any combination with other words and/or symbols.\r\n\r\n4. SUPPORT AND MAINTENANCE.\r\n   1. Support.  Licensor is not responsible for maintenance or support of the Software, or the equipment on which the Software resides or is used, under this Agreement. By accepting the license granted under this Agreement, User agrees that Licensor will be under no obligation to provide any support, maintenance or service in connection with the Software or such equipment.  \r\n   2. Upgrades and Updates.  Licensor may from time to time in its sole discretion develop and provide updates for the Software, which may include upgrades, bug fixes, patches, other error corrections, and/or new features (collectively, including related documentation, “Updates”). Updates may also modify or delete in their entirety certain features and functionality. You agree that Licensor has no obligation to provide any Updates or to continue to provide or enable any particular features or functionality. \r\n\r\n5. REPRESENTATIONS AND WARRANTIES; DISCLAIMER.\r\n   1. Mutual Representations and Warranties.  Each party represents, warrants and covenants that: \r\n\t(a) it has the full power and authority to enter into this Agreement and to perform its obligations hereunder, without the need for any consents, approvals or immunities not yet obtained; and \r\n\t(b) its acceptance of and performance under this Agreement shall not breach any oral or written agreement with any third party or any obligation owed by it to any third party to keep any information or materials in confidence or in trust. \r\n   2. Disclaimer.  EXCEPT FOR THE WARRANTIES SET FORTH IN THIS SECTION 5, THE SOFTWARE AND THE DOCUMENTATION ARE PROVIDED ON AN AS-IS BASIS.  USER’S USE OF THE SOFTWARE AND THE DOCUMENTATION IS AT USER’S OWN RISK.  LICENSOR DOES NOT MAKE, AND HEREBY DISCLAIMS, ANY AND ALL OTHER EXPRESS, STATUTORY AND IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE, QUALITY, SUITABILITY, OPERABILITY, CONDITION, SYSTEM INTEGRATION, NON-INTERFERENCE, WORKMANSHIP, TRUTH, ACCURACY (OF DATA OR ANY OTHER INFORMATION OR CONTENT), ABSENCE OF DEFECTS, WHETHER LATENT OR PATENT, AND ANY WARRANTIES ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE.  LICENSOR ALSO DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN, PERFORMED AND/OR PROVIDED BY THE SOFTWARE WILL MEET USER’S REQUIREMENTS, THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, THAT THE SOFTWARE WILL BE COMPATIBLE OR WORK WITH ANY THIRD-PARTY SOFTWARE, APPLICATIONS OR DEVICES, OR THAT DEFECTS IN THE SOFTWARE WILL BE CORRECTED.  USER EXPRESSLY ACKNOWLEDGES AND AGREES THAT, TO THE EXTENT PERMITTED BY APPLICABLE LAW, ITS USE OF THE SOFTWARE IS AT ITS SOLE RISK AND THAT THE ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT IS WITH USER. USER FURTHER ACKNOWLEDGES AND AGREES THAT THE SOFTWARE IS NOT INTENDED OR SUITABLE FOR USE IN SITUATIONS OR ENVIRONMENTS WHERE THE FAILURE OR TIME DELAYS OF, OR ERRORS OR INACCURACIES IN THE CONTENT, DATA OR INFORMATION PROVIDED BY THE SOFTWARE COULD LEAD TO DEATH, PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE. ANY WARRANTIES MADE BY LICENSOR ARE FOR THE BENEFIT OF USER ONLY AND NOT FOR THE BENEFIT OF ANY THIRD PARTY.  THE SOFTWARE AND THE DOCUMENTATION ARE LICENSED AND NOT SOLD. NO AGENT OF LICENSOR IS AUTHORIZED TO ALTER OR EXPAND THE WARRANTIES OF LICENSOR AS SET FORTH HEREIN.  \r\n\r\n6. INDEMNIFICATION. User shall defend Licensor and its licensors and their respective officers, directors and employees (“Licensor Indemnified Parties”) from and against any and all Third-Party Claims which arise out of or relate to: \r\n\t(a) User’s use or alleged use of the Software other than as permitted under this Agreement; or \r\n\t(b) arising out of or relating to any violation of Section 2.2, or any violation of applicable laws.  User shall pay all damages, costs and expenses, including attorneys’ fees and costs (whether by settlement or award of by a final judicial judgment) incurred by the Licensor Indemnified Parties from any such Third-Party Claim.  In no event shall Licensor settle any claim without User’s prior written approval.  Licensor may, at its own expense, engage separate counsel to advise Licensor regarding a Third-Party Claim and to participate in the defense of the claim, subject to User’s right to control the defense and settlement. If you are a New Jersey resident, this indemnification clause is to be only as broad and inclusive as is permitted by the law of the state of New Jersey.\r\n\r\n7. CONFIDENTIALITY. \r\n   1. Confidential Information. User acknowledges that the Software contains valuable proprietary information and trade secrets and that unauthorized or improper use of the Software will result in irreparable harm to Licensor for which monetary damages would be inadequate and for which Licensor may be entitled to immediate injunctive relief. Accordingly, you will maintain the confidentiality of the proprietary information and not sell, license, publish, display, distribute, disclose or otherwise make available such proprietary information to any third party, nor use such information except as authorized by this Agreement.\r\n   2. Injunctive Relief.  User agrees that any unauthorized disclosure of confidential information may cause immediate and irreparable injury to Licensor and that, in the event of such breach, Licensor will be entitled, in addition to any other available remedies, to seek immediate injunctive and other equitable relief, without bond and without the necessity of showing actual monetary damages.\r\n\r\n8. PROPRIETARY RIGHTS. \r\n   1. Licensor.  As between Licensor and User, all right, title and interest in the Software, the Documentation, and any other Licensor materials furnished or made available hereunder, and all modifications and enhancements thereof, and all suggestions, ideas and feedback proposed by User regarding the Software, including all copyright rights, patent rights and other Intellectual Property Rights in each of the foregoing, belong to and are retained solely by Licensor or Licensor’s licensors and providers, as applicable.  User hereby does and will irrevocably assign to Licensor all evaluations, ideas, feedback and suggestions made by User to Licensor regarding the Software or the Documentation (collectively, “Feedback”) and all Intellectual Property Rights in the Feedback.  \r\n\r\n9. LIMITATION OF LIABILITY.\r\n   1. No Consequential Damages.  NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR ANY INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR ANY DAMAGES FOR LOST DATA, BUSINESS INTERRUPTION, LOST PROFITS, LOST REVENUE OR LOST BUSINESS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT, EVEN IF LICENSOR OR ITS LICENSORS OR USER HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, INCLUDING WITHOUT LIMITATION, ANY SUCH DAMAGES ARISING OUT OF THE LICENSING, PROVISION OR USE OF THE SOFTWARE OR THE RESULTS OF THE USE OF THE SOFTWARE.  LICENSOR WILL NOT BE LIABLE FOR THE COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES.\r\n   2. LIMITS ON LIABILITY.  NEITHER LICENSOR NOR ITS LICENSORS SHALL BE LIABLE FOR CUMULATIVE, AGGREGATE DAMAGES GREATER THAN FIVE HUNDRED DOLLARS (US $500).  \r\n   3. ESSENTIAL PURPOSE.  USER ACKNOWLEDGES THAT THE TERMS IN THIS SECTION 9 (LIMITATION OF LIABILITY) SHALL APPLY TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW AND SHALL APPLY EVEN IF AN EXCLUSIVE OR LIMITED REMEDY STATED HEREIN FAILS OF ITS ESSENTIAL PURPOSE. SOME JURISDICTIONS DO NOT ALLOW CERTAIN LIMITATIONS OF LIABILITY, SO SOME OR ALL OF THE ABOVE LIMITATIONS OF LIABILITY MAY NOT APPLY TO YOU.  IF YOU ARE A NEW JERSEY RESIDENT, THIS LIMITATION OF LIABILITY SECTION IS TO BE ONLY AS BROAD AND INCLUSIVE AS IS PERMITTED BY THE LAW OF THE STATE OF NEW JERSEY.\r\n\r\n10. TERM AND TERMINATION.  \r\n   1. Term.  This Agreement and User’s right to use the Software commences on earlier of the date that User: \r\n\t(a) installs the Software, \r\n\t(b) begins using the Software or \r\n\t(c) otherwise demonstrates assent to this Agreement.  \r\n\tUser’s right to use the Software shall continue until such time as this Agreement is terminated (the “Term”).  \r\n   2. Termination for Cause.  A party may terminate this Agreement, upon written notice to the other party in the event the other party files a petition for bankruptcy or has a petition for bankruptcy filed against it that is not dismissed within sixty (60) calendar days after filing  or admits its inability to pay its debts as they mature, makes an assignment for the benefit of its creditors or ceases to function as a going concern or to conduct its operations in the normal course of business and such termination shall occur immediately upon notice.  Licensor may terminate this Agreement at any time without notice if it ceases to support the Software, which Licensor may do in its sole discretion. In addition, this Agreement will terminate immediately and automatically without any notice if User breaches any of its terms and conditions.\r\n   3. Termination for Convenience.  Either party may terminate this Agreement for convenience on at least thirty (30) calendar days prior written notice to the other party.  User may also terminate this Agreement by ceasing all use of the Software.\r\n   4. Effects of Termination.  Upon expiration or termination of this Agreement, User’s shall cease all use of the Software and the Documentation and shall destroy all copies of the Software in User’s possession or control.\r\n   5. Survival.  This Section and Sections 1, 2.2 (Restrictions), 2.3 (Reservation of Rights), 2.4 (Open Source Software), 5.2 (Disclaimer), 6 (Indemnification), 7 (Confidentiality), 8 (Proprietary Rights), 9 (Limitation of Liability), 10.4 (Effects of Termination) and 11 (Miscellaneous) shall survive any termination or expiration of this Agreement. \r\n\r\n11. MISCELLANEOUS.\r\n   1. Notices.  Licensor may give notice to User by means of a general notice through electronic mail to User’s e-mail address, or by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service to User’s address on record with Licensor. User may give notice to Licensor by written communication sent by first class postage prepaid mail or nationally recognized overnight delivery service addressed to Licensor, Lightbend Inc., 580 California, #1231, San Francisco, CA 94104, Attention: User Support.  Notice shall be deemed to have been given upon receipt or, if earlier, two (2) business days after mailing, as applicable. All communications and notices to be made or given pursuant to this Agreement shall be in the English language.  \r\n   2. Governing Law.  This Agreement and the rights and obligations of the parties to and under this agreement shall be governed by and construed under the laws of the United States and the State of California as applied to agreements entered into and to be performed in such State without giving effect to conflicts of laws rules or principles.  The parties agree that the United Nations Convention on Contracts for the International Sale of Goods is specifically excluded from application to this Agreement and that the application of the Uniform Computer Information Transactions Act (UCITA) is specifically disclaimed.  Any dispute arising out of or in connection with this Agreement, including but not limited to any question regarding its existence, interpretation, validity, performance, or termination, or any dispute between the parties arising from the parties' relationship created by this Agreement, shall be referred to and finally resolved by arbitration administered by the American Arbitration Association under its rules.  The number of arbitrators shall be one (1).  The parties shall endeavor to agree upon the sole arbitrator and jointly nominate the arbitrator.  If the parties cannot agree upon the sole arbitrator within a time prescribed by AAA, the parties shall request the AAA to propose five (5) arbitrators and each party shall rank the proposed arbitrators.  The AAA shall appoint an arbitrator from the list of five (5), based upon the parties' rankings.  \r\n   3. U.S. Government Users.  If User is a Federal Government entity, Licensor provides the Software and the Documentation, including related software and technology, for ultimate Federal Government end use solely in accordance with the following:  Government technical data rights include only those rights customarily provided to the public with a commercial item or process and Government software rights related to the Software and the Documentation include only those rights customarily provided to the public, as defined in this Agreement.  The technical data rights and customary commercial software license is provided in accordance with FAR 12.211 (Technical Data) and FAR 12.212 (Software) and, for Department of Defense transactions, DFAR 252.227-7015 (Technical Data – Commercial Items) and DFAR 227.7202-3 (Rights in Commercial Computer Software or Computer Software Documentation).  If greater rights are needed, a mutually acceptable written addendum specifically conveying such rights must be included in this Agreement. \r\n   4. Export.  The Software utilizes software and technology that may be subject to United States and foreign export controls. User acknowledges and agrees that the Software shall not be used, and none of the underlying information, software, or technology may be transferred or otherwise exported or re-exported to countries as to which the United States maintains an embargo (collectively, “Embargoed Countries”), or to or by a national or resident thereof, or any person or entity on the U.S. Department of Treasury’s List of Specially Designated Nationals or the U.S. Department of Commerce’s Table of Denial Orders (collectively, “Designated Nationals”). The lists of Embargoed Countries and Designated Nationals are subject to change without notice. By using the Software, User represents and warrants that it is not located in, under the control of, or a national or resident of an Embargoed Country or Designated National. The Software may use encryption technology that is subject to licensing requirements under the U.S. Export Administration Regulations, 15 C.F.R. Parts 730-774 and Council Regulation (EC) No. 1334/2000. User agrees to comply strictly with all applicable export laws and assume sole responsibility for obtaining licenses to export or re-export as may be required. Licensor and its licensors make no representation that the Software is appropriate or available for use in other locations. By using the Software, User represents and warrants that it is not located in any such country or on any such list.\r\n   5. General.  User shall not assign its rights hereunder, or delegate the performance of any of its duties or obligations hereunder, whether by merger, acquisition, sale of assets, operation of law, or otherwise, without the prior written consent of Licensor.  Any purported assignment in violation of the preceding sentence is null and void.  Subject to the foregoing, this Agreement shall be binding upon, and inure to the benefit of, the successors and assigns of the parties thereto.  Except as otherwise specified in this Agreement, this Agreement may be amended or supplemented only by a writing that refers explicitly to this Agreement and that is signed on behalf of both parties.  No waiver will be implied from conduct or failure to enforce rights.  No waiver will be effective unless in a writing signed on behalf of the party against whom the waiver is asserted.  If any of this Agreement is found invalid or unenforceable that term will be enforced to the maximum extent permitted by law and the remainder of this Agreement will remain in full force.  \r\nNothing contained herein shall be construed as creating an agency, partnership, or other form of joint enterprise between the parties.  \r\nThis Agreement constitutes the entire agreement between the parties relating to this subject matter and supersedes all prior or simultaneous understandings, representations, discussions, negotiations, and agreements, whether written or oral.  \r\nNeither party shall be liable to the other party or any third party for failure or delay in performing its obligations under this Agreement when such failure or delay is due to any cause beyond the control of the party concerned, including, without limitation, force majeure, governmental orders or restrictions, fire, or flood, provided that upon cessation of such events such party shall thereupon promptly perform or complete the performance of its obligations hereunder."
  },
  {
    "path": "migrator-integration/src/test/scala/akka/persistence/jdbc/migrator/integration/JournalMigratorTest.scala",
    "content": "package akka.persistence.jdbc.migrator.integration\n\nimport akka.persistence.jdbc.migrator.MigratorSpec._\nimport akka.persistence.jdbc.migrator.JournalMigratorTest\n\nclass PostgresJournalMigratorTest extends JournalMigratorTest(\"postgres-application.conf\") with PostgresCleaner\n\nclass MySQLJournalMigratorTest extends JournalMigratorTest(\"mysql-application.conf\") with MysqlCleaner\n\nclass OracleJournalMigratorTest extends JournalMigratorTest(\"oracle-application.conf\") with OracleCleaner\n\nclass SqlServerJournalMigratorTest extends JournalMigratorTest(\"sqlserver-application.conf\") with SqlServerCleaner\n"
  },
  {
    "path": "migrator-integration/src/test/scala/akka/persistence/jdbc/migrator/integration/SnapshotMigratorTest.scala",
    "content": "package akka.persistence.jdbc.migrator.integration\n\nimport akka.persistence.jdbc.migrator.MigratorSpec._\nimport akka.persistence.jdbc.migrator.SnapshotMigratorTest\n\nclass PostgresSnapshotMigratorTest extends SnapshotMigratorTest(\"postgres-application.conf\") with PostgresCleaner\n\nclass MySQLSnapshotMigratorTest extends SnapshotMigratorTest(\"mysql-application.conf\") with MysqlCleaner\n\nclass OracleSnapshotMigratorTest extends SnapshotMigratorTest(\"oracle-application.conf\") with OracleCleaner\n\nclass SqlServerSnapshotMigratorTest extends SnapshotMigratorTest(\"sqlserver-application.conf\") with SqlServerCleaner\n"
  },
  {
    "path": "project/AutomaticModuleName.scala",
    "content": "/**\n * Copyright (C) 2009-2018 Lightbend Inc. <http://www.lightbend.com>\n */\nimport sbt.Keys._\nimport sbt.{ Def, _ }\n\n/**\n * Helper to set Automatic-Module-Name in projects.\n *\n * !! DO NOT BE TEMPTED INTO AUTOMATICALLY DERIVING THE NAMES FROM PROJECT NAMES !!\n *\n * The names carry a lot of implications and DO NOT have to always align 1:1 with the group ids or package names,\n * though there should be of course a strong relationship between them.\n */\nobject AutomaticModuleName {\n  private val AutomaticModuleName = \"Automatic-Module-Name\"\n\n  def settings(name: String): Seq[Def.Setting[Task[Seq[PackageOption]]]] =\n    Seq(Compile / packageBin / packageOptions += Package.ManifestAttributes(AutomaticModuleName -> name))\n}\n"
  },
  {
    "path": "project/Dependencies.scala",
    "content": "import sbt._\n\nobject Dependencies {\n\n  // Java Platform version for JavaDoc creation\n  lazy val JavaDocLinkVersion = scala.util.Properties.javaSpecVersion\n\n  val Scala213 = \"2.13.17\"\n  val Scala3 = \"3.3.7\"\n\n  val ScalaVersions = Seq(Scala213, Scala3)\n\n  val AkkaVersion = \"2.10.11\"\n  val AkkaBinaryVersion = VersionNumber(AkkaVersion).numbers match { case Seq(major, minor, _*) => s\"$major.$minor\" }\n\n  val SlickVersion = \"3.6.1\"\n  val ScalaTestVersion = \"3.2.19\"\n\n  val JdbcDrivers = Seq(\n    \"org.postgresql\" % \"postgresql\" % \"42.7.7\",\n    \"com.h2database\" % \"h2\" % \"2.3.232\",\n    \"com.mysql\" % \"mysql-connector-j\" % \"9.4.0\",\n    \"com.microsoft.sqlserver\" % \"mssql-jdbc\" % \"7.4.1.jre8\")\n\n  val Libraries: Seq[ModuleID] = Seq(\n    \"com.typesafe.akka\" %% \"akka-persistence-query\" % AkkaVersion,\n    \"com.typesafe.slick\" %% \"slick\" % SlickVersion,\n    \"org.slf4j\" % \"slf4j-api\" % \"2.0.17\",\n    \"com.typesafe.slick\" %% \"slick-hikaricp\" % SlickVersion,\n    \"ch.qos.logback\" % \"logback-classic\" % \"1.5.18\" % Test,\n    \"com.typesafe.akka\" %% \"akka-slf4j\" % AkkaVersion % Test,\n    \"com.typesafe.akka\" %% \"akka-persistence-tck\" % AkkaVersion % Test,\n    \"com.typesafe.akka\" %% \"akka-stream-testkit\" % AkkaVersion % Test,\n    \"com.typesafe.akka\" %% \"akka-testkit\" % AkkaVersion % Test,\n    \"org.scalatest\" %% \"scalatest\" % ScalaTestVersion % Test) ++ JdbcDrivers.map(_ % Test)\n\n  val Migration: Seq[ModuleID] = Seq(\n    \"com.typesafe\" % \"config\" % \"1.4.5\",\n    \"ch.qos.logback\" % \"logback-classic\" % \"1.5.18\",\n    \"org.testcontainers\" % \"postgresql\" % \"1.21.3\" % Test,\n    \"org.scalatest\" %% \"scalatest\" % ScalaTestVersion % Test) ++ JdbcDrivers.map(_ % Provided)\n}\n"
  },
  {
    "path": "project/IntegrationTests.scala",
    "content": "import de.heikoseeberger.sbtheader.HeaderPlugin.autoImport.headerSettings\nimport sbt._\nimport sbt.Keys._\n\nobject IntegrationTests {\n\n  def settings: Seq[Def.Setting[_]] =\n    Seq(publish / skip := true, doc / sources := Seq.empty, Test / fork := true)\n\n}\n"
  },
  {
    "path": "project/ProjectAutoPlugin.scala",
    "content": "import com.geirsson.CiReleasePlugin\nimport de.heikoseeberger.sbtheader.HeaderPlugin\nimport de.heikoseeberger.sbtheader.HeaderPlugin.autoImport.{ headerLicense, HeaderLicense }\nimport sbt.Keys._\nimport sbt._\nimport sbt.plugins.JvmPlugin\nimport sbtdynver.DynVerPlugin.autoImport.dynverSonatypeSnapshots\n\nobject ProjectAutoPlugin extends AutoPlugin {\n  object autoImport {}\n\n  override val requires = JvmPlugin && HeaderPlugin\n\n  override def globalSettings =\n    Seq(\n      organization := \"com.lightbend.akka\",\n      organizationName := \"Lightbend Inc.\",\n      organizationHomepage := Some(url(\"https://akka.io\")),\n      homepage := Some(url(\"https://doc.akka.io/libraries/akka-persistence-jdbc/current/\")),\n      scmInfo := Some(\n        ScmInfo(url(\"https://github.com/akka/akka-persistence-jdbc\"), \"git@github.com:akka/akka-persistence-jdbc.git\")),\n      developers += Developer(\n        \"contributors\",\n        \"Contributors\",\n        \"akka.official@gmail.com\",\n        url(\"https://github.com/akka/akka-persistence-jdbc/graphs/contributors\")),\n      releaseNotesURL := (\n        if ((ThisBuild / isSnapshot).value) None\n        else Some(url(s\"https://github.com/akka/akka-persistence-jdbc/releases/tag/v${version.value}\"))\n      ),\n      licenses := {\n        val tagOrBranch =\n          if (version.value.endsWith(\"SNAPSHOT\")) \"master\"\n          else \"v\" + version.value\n        Seq((\"BUSL-1.1\", url(s\"https://raw.githubusercontent.com/akka/akka-persistence-jdbc/${tagOrBranch}/LICENSE\")))\n      },\n      description := \"A plugin for storing events in an event journal akka-persistence-jdbc\",\n      startYear := Some(2014))\n\n  override val trigger: PluginTrigger = allRequirements\n\n  override val projectSettings: Seq[Setting[_]] = Seq(\n    crossVersion := CrossVersion.binary,\n    crossScalaVersions := Dependencies.ScalaVersions,\n    scalaVersion := Dependencies.Scala213,\n    // append -SNAPSHOT to version when isSnapshot\n    ThisBuild / dynverSonatypeSnapshots := true,\n    Test / fork := false,\n    Test / parallelExecution := false,\n    Test / logBuffered := true,\n    javacOptions ++= Seq(\"--release\", \"11\"),\n    scalacOptions ++=\n      (CrossVersion.partialVersion(scalaVersion.value) match {\n        case Some((2, _)) =>\n          Seq(\n            \"-encoding\",\n            \"UTF-8\",\n            \"-unchecked\",\n            \"-Xlog-reflective-calls\",\n            \"-language:higherKinds\",\n            \"-language:implicitConversions\",\n            \"-Ydelambdafy:method\",\n            \"-release\",\n            \"11\")\n        case Some((3, _)) =>\n          Seq(\n            \"-encoding\",\n            \"UTF-8\",\n            \"-unchecked\",\n            \"-language:higherKinds\",\n            \"-language:implicitConversions\",\n            \"-release\",\n            \"11\")\n        case _ => Seq.empty\n      }),\n    Compile / scalacOptions ++= (CrossVersion.partialVersion(scalaVersion.value) match {\n      case Some((2, 13)) => disciplineScalacOptions\n      case _             => Nil\n    }).toSeq,\n    Compile / doc / scalacOptions := scalacOptions.value ++ (CrossVersion.partialVersion(scalaVersion.value) match {\n      case Some((2, _)) =>\n        Seq(\n          \"-doc-title\",\n          \"Akka Persistence JDBC\",\n          \"-doc-version\",\n          version.value,\n          \"-sourcepath\",\n          (ThisBuild / baseDirectory).value.toString,\n          \"-skip-packages\",\n          \"akka.pattern\", // for some reason Scaladoc creates this\n          \"-doc-source-url\", {\n            val branch = if (isSnapshot.value) \"master\" else s\"v${version.value}\"\n            s\"https://github.com/akka/akka-persistence-jdbc/tree/${branch}€{FILE_PATH_EXT}#L€{FILE_LINE}\"\n          },\n          \"-doc-canonical-base-url\",\n          \"https://doc.akka.io/api/akka-persistence-jdbc/current/\",\n          \"-jdk-api-doc-base\",\n          s\"https://docs.oracle.com/en/java/javase/${Dependencies.JavaDocLinkVersion}/docs/api\")\n\n      case Some((3, _)) =>\n        Seq(\n          \"-doc-title\",\n          \"Akka Persistence JDBC\",\n          \"-doc-version\",\n          version.value,\n          \"-sourcepath\",\n          (ThisBuild / baseDirectory).value.toString,\n          \"-doc-source-url\", {\n            val branch = if (isSnapshot.value) \"master\" else s\"v${version.value}\"\n            s\"https://github.com/akka/akka-persistence-jdbc/tree/${branch}€{FILE_PATH_EXT}#L€{FILE_LINE}\"\n          },\n          \"-doc-canonical-base-url\",\n          \"https://doc.akka.io/api/akka-persistence-jdbc/current/\",\n          s\"-external-mappings:https://docs.oracle.com/en/java/javase/${Dependencies.JavaDocLinkVersion}/docs/api\")\n      case _ => throw new IllegalArgumentException(\"Unsupported Major Scala Version\")\n    }),\n    // show full stack traces and test case durations\n    Test / testOptions += Tests.Argument(\"-oDF\"),\n    headerLicense := Some(HeaderLicense.Custom(\"\"\"|Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend>\n           |Copyright (C) 2019 - 2025 Lightbend Inc. <https://akka.io>\n           |\"\"\".stripMargin)),\n    resolvers += Resolver.jcenterRepo)\n\n  val disciplineScalacOptions = Set(\n//    \"-Xfatal-warnings\",\n    \"-feature\",\n    \"-deprecation\",\n    \"-Xlint\",\n    \"-Ywarn-dead-code\",\n    \"-Ywarn-unused:_\",\n    \"-Ywarn-extra-implicit\")\n\n}\n"
  },
  {
    "path": "project/Publish.scala",
    "content": "/*\n * Copyright (C) 2023 Lightbend Inc. <https://www.lightbend.com>\n */\n\nimport java.util.concurrent.atomic.AtomicBoolean\n\nimport scala.language.postfixOps\n\nimport sbt.{ Def, _ }\nimport Keys._\nimport com.geirsson.CiReleasePlugin\nimport com.jsuereth.sbtpgp.PgpKeys.publishSigned\nimport xerial.sbt.Sonatype.autoImport.sonatypeProfileName\n\n/**\n * For projects that are not published.\n */\nobject NoPublish extends AutoPlugin {\n  override def requires = plugins.JvmPlugin\n\n  override def projectSettings =\n    Seq(publish / skip := true, publishArtifact := false, publish := {}, publishLocal := {})\n}\n\nobject Publish extends AutoPlugin {\n  override def requires = plugins.JvmPlugin && ProjectAutoPlugin\n  override def trigger = AllRequirements\n\n  lazy val beforePublishTask = taskKey[Unit](\"setup before publish\")\n\n  lazy val beforePublishDone = new AtomicBoolean(false)\n\n  def beforePublish(snapshot: Boolean) = {\n    if (beforePublishDone.compareAndSet(false, true)) {\n      CiReleasePlugin.setupGpg()\n      if (!snapshot)\n        cloudsmithCredentials(validate = true)\n    }\n  }\n\n  override def projectSettings: Seq[Def.Setting[_]] = Seq(\n    sonatypeProfileName := \"com.lightbend\",\n    beforePublishTask := beforePublish(isSnapshot.value),\n    publishSigned := publishSigned.dependsOn(beforePublishTask).value,\n    publishTo :=\n      (if (isSnapshot.value)\n         Some(\"Cloudsmith API\".at(\"https://maven.cloudsmith.io/lightbend/akka-snapshots/\"))\n       else\n         Some(\"Cloudsmith API\".at(\"https://maven.cloudsmith.io/lightbend/akka/\"))),\n    credentials ++= cloudsmithCredentials(validate = false))\n\n  def cloudsmithCredentials(validate: Boolean): Seq[Credentials] = {\n    (sys.env.get(\"PUBLISH_USER\"), sys.env.get(\"PUBLISH_PASSWORD\")) match {\n      case (Some(user), Some(password)) =>\n        Seq(Credentials(\"Cloudsmith API\", \"maven.cloudsmith.io\", user, password))\n      case _ =>\n        if (validate)\n          throw new Exception(\"Publishing credentials expected in `PUBLISH_USER` and `PUBLISH_PASSWORD`.\")\n        else\n          Nil\n    }\n  }\n}\n"
  },
  {
    "path": "project/build.properties",
    "content": "#\n# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n#\nsbt.version=1.11.7\n"
  },
  {
    "path": "project/plugins.sbt",
    "content": "// compliance\naddSbtPlugin(\"de.heikoseeberger\" % \"sbt-header\" % \"5.10.0\")\naddSbtPlugin(\"org.scalameta\" % \"sbt-scalafmt\" % \"2.4.6\")\naddSbtPlugin(\"com.typesafe\" % \"sbt-mima-plugin\" % \"1.1.4\")\naddSbtPlugin(\"com.lightbend.sbt\" % \"sbt-java-formatter\" % \"0.8.0\")\n\n// for dependency analysis\naddDependencyTreePlugin\n\n// release\naddSbtPlugin(\"com.github.sbt\" % \"sbt-ci-release\" % \"1.9.3\")\naddSbtPlugin(\"com.github.sbt\" % \"sbt-dynver\" % \"5.1.1\")\n\n// docs\naddSbtPlugin(\"io.akka\" % \"sbt-paradox-akka\" % \"25.10.2\")\naddSbtPlugin(\"com.github.sbt\" % \"sbt-site\" % \"1.7.0\")\naddSbtPlugin(\"com.github.sbt\" % \"sbt-site-paradox\" % \"1.7.0\")\naddSbtPlugin(\"com.github.sbt\" % \"sbt-unidoc\" % \"0.6.0\")\naddSbtPlugin(\"com.lightbend.sbt\" % \"sbt-publish-rsync\" % \"0.3\")\n"
  },
  {
    "path": "project/project-info.conf",
    "content": "project-info {\n  version: \"current\"\n  scaladoc: \"https://doc.akka.io/api/akka-persistence-jdbc/\"${project-info.version}\"/akka/persistence/jdbc/\"\n  scala-versions: [\"2.13\", \"3.3\"]\n  shared-info {\n    jdk-versions: [\"Eclipse Temurin JDK 11\", \"Eclipse Temurin JDK 17\", \"Eclipse Temurin JDK 21\"]\n    snapshots: {\n      url: \"snapshots.html\"\n      text: \"Snapshots are available\"\n      new-tab: false\n    }\n    issues: {\n      url: \"https://github.com/akka/akka-persistence-jdbc/issues\"\n      text: \"GitHub issues\"\n    }\n    release-notes: {\n      url: \"https://github.com/akka/akka-persistence-jdbc/releases\"\n      text: \"GitHub releases\"\n    }\n    forums: [\n      {\n        text: \"Lightbend Discuss\"\n        url: \"https://discuss.akka.io/c/akka/\"\n      }\n    ]\n  }\n  core: ${project-info.shared-info} {\n    title: \"Akka Persistence JDBC\"\n    jpms-name: \"akka.persistence.jdbc\"\n    levels: [\n      {\n        readiness: Supported\n        since: \"2022-10-05\"\n        since-version: \"5.1.0\"\n      },\n      {\n        readiness: Supported\n        since: \"2021-01-21\"\n        since-version: \"5.0.0\"\n      },\n      {\n        readiness: Supported\n        since: \"2020-06-09\"\n        since-version: \"4.0.0\"\n      },\n      {\n        readiness: CommunityDriven\n        since: \"2014-07-04\"\n        since-version: \"1.0.0\"\n      }\n    ]\n  }\n}\n"
  },
  {
    "path": "scripts/cat-log.sh",
    "content": "#!/bin/sh\n# ---------- helper script to separate log files in build\nprintf \"\\n\\n\\n\"\nls -lh $1\nprintf \"\\n\\n\"\ncat $1"
  },
  {
    "path": "scripts/create-release-issue.sh",
    "content": "#!/bin/bash\n\nVERSION=$1\nif [ -z $VERSION ]\nthen\n  echo specify the version name to be released, eg. 1.0.0\nelse\n  sed -e 's/\\$VERSION\\$/'$VERSION'/g' docs/release-train-issue-template.md > /tmp/release-$VERSION.md\n  echo Created $(gh issue create -F /tmp/release-$VERSION.md --title \"Release $VERSION\" --milestone $VERSION --web)\nfi\n"
  },
  {
    "path": "scripts/docker-compose.yml",
    "content": "services:\n  postgres:\n    image: postgres:latest\n    container_name: postgres-test\n    environment:\n      - \"TZ=Europe/Amsterdam\"\n      - \"POSTGRES_USER=docker\"\n      - \"POSTGRES_PASSWORD=docker\"\n    ports:\n      - \"5432:5432\"  # credentials (docker:docker)\n\n  mysql:\n    image: mysql:latest\n    container_name: mysql-test\n    environment:\n      - \"TZ=Europe/Amsterdam\"\n      - \"MYSQL_ROOT_PASSWORD=root\"\n      - \"MYSQL_DATABASE=docker\"\n    ports:\n      - \"3306:3306\" # credentials (root:root)\n\n  oracle:\n    image: gvenzl/oracle-free:slim\n    container_name: oracle-test\n    environment:\n      - \"TZ=Europe/Amsterdam\"\n      - \"DBCA_TOTAL_MEMORY=1024\"\n      - \"ORACLE_PASSWORD=oracle\"\n    ports:\n      - \"1521:1521\" # DB_CONN: credentials (system:oracle)\n\n  sqlserver:\n    image: mcr.microsoft.com/mssql/server:2019-latest\n    container_name: sqlserver-test\n    environment:\n      - \"TZ=Europe/Amsterdam\"\n      - \"ACCEPT_EULA=Y\"\n      - \"MSSQL_SA_PASSWORD=docker123abc#\"\n    ports:\n      - \"1433:1433\" # credentials (sa:docker123abc#)\n"
  },
  {
    "path": "scripts/launch-all.sh",
    "content": "#!/bin/bash\n#\n# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n#\nexport VM_HOST=\"${VM_HOST:-localhost}\"\n\n# Wait for a certain service to become available\n# Usage: wait 3306 Mysql\nwait() {\nwhile true; do\n  if ! nc -z $VM_HOST $1\n  then\n    echo \"$2 not available, retrying...\"\n    sleep 1\n  else\n    echo \"$2 is available\"\n    break;\n  fi\ndone;\n}\n\ndocker compose -f scripts/docker-compose.yml kill\ndocker compose -f scripts/docker-compose.yml rm -f\ndocker compose -f scripts/docker-compose.yml up -d\nwait 3306 MySQL\nwait 5432 Postgres\nwait 1521 Oracle\nwait 1433 SqlServer\n"
  },
  {
    "path": "scripts/launch-mysql.sh",
    "content": "#!/bin/bash\n#\n# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n#\nexport VM_HOST=\"${VM_HOST:-localhost}\"\n\n# Wait for a certain service to become available\n# Usage: wait 3306 Mysql\nwait() {\nwhile true; do\n  if ! nc -z $VM_HOST $1\n  then\n    echo \"$2 not available, retrying...\"\n    sleep 1\n  else\n    echo \"$2 is available\"\n    break;\n  fi\ndone;\n}\n\ndocker compose -f scripts/docker-compose.yml kill mysql\ndocker compose -f scripts/docker-compose.yml rm -f mysql\ndocker compose -f scripts/docker-compose.yml up -d mysql\nwait 3306 MySQL\n"
  },
  {
    "path": "scripts/launch-oracle.sh",
    "content": "#!/bin/bash\n#\n# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n#\nexport VM_HOST=\"${VM_HOST:-localhost}\"\n\n# Wait for a certain service to become available\n# Usage: wait 3306 Mysql\nwait() {\nwhile true; do\n  if ! nc -z $VM_HOST $1\n  then\n    echo \"$2 not available, retrying...\"\n    sleep 1\n  else\n    echo \"$2 is available\"\n    break;\n  fi\ndone;\n}\n\ndocker compose -f scripts/docker-compose.yml kill oracle\ndocker compose -f scripts/docker-compose.yml rm -f oracle\ndocker compose -f scripts/docker-compose.yml up -d oracle\nwait 1521 Oracle\n"
  },
  {
    "path": "scripts/launch-postgres.sh",
    "content": "#!/bin/bash\n#\n# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n#\nexport VM_HOST=\"${VM_HOST:-localhost}\"\n\n# Wait for a certain service to become available\n# Usage: wait 3306 Mysql\nwait() {\nwhile true; do\n  if ! nc -z $VM_HOST $1\n  then\n    echo \"$2 not available, retrying...\"\n    sleep 1\n  else\n    echo \"$2 is available\"\n    break;\n  fi\ndone;\n}\n\ndocker compose -f scripts/docker-compose.yml kill postgres\ndocker compose -f scripts/docker-compose.yml rm -f postgres\ndocker compose -f scripts/docker-compose.yml up -d postgres\nwait 5432 Postgres\n"
  },
  {
    "path": "scripts/launch-sqlserver.sh",
    "content": "#!/bin/bash\n#\n# Copyright 2016 Dennis Vriend\n# Copyright (C) 2019 - 2022 Lightbend Inc. <https://www.lightbend.com>\n#\nexport VM_HOST=\"${VM_HOST:-localhost}\"\n\n# Wait for a certain service to become available\n# Usage: wait 1433 SqlServer\nwait() {\nwhile true; do\n  if ! nc -z $VM_HOST $1\n  then\n    echo \"$2 not available, retrying...\"\n    sleep 1\n  else\n    echo \"$2 is available\"\n    break;\n  fi\ndone;\n}\n\ndocker compose -f scripts/docker-compose.yml kill sqlserver\ndocker compose -f scripts/docker-compose.yml rm -f sqlserver\ndocker compose -f scripts/docker-compose.yml up -d sqlserver\nwait 1433 SqlServer\ndocker exec sqlserver-test /opt/mssql-tools18/bin/sqlcmd -N o -S localhost -U sa -P docker123abc# -Q \"create database docker\""
  },
  {
    "path": "scripts/link-validator.conf",
    "content": "// config for https://github.com/ennru/site-link-validator/\nsite-link-validator {\n  root-dir = \"./docs/target/site/\"\n  # relative to `root-dir`\n  start-file = \"libraries/akka-persistence-jdbc/snapshot/index.html\"\n\n  # Resolves URLs with the given prefix as local files instead\n  link-mappings = [\n    {\n      prefix = \"https://doc.akka.io/libraries/akka-persistence-jdbc/snapshot/\"\n      replace = \"/libraries/akka-persistence-jdbc/snapshot/\"\n    }\n    {\n      prefix = \"https://doc.akka.io/api/akka-persistence-jdbc/snapshot/\"\n      replace = \"/api/akka-persistence-jdbc/snapshot/\"\n    }\n  ]\n\n  ignore-missing-local-files-regex = \"\"\n\n  ignore-prefixes = [\n    # GitHub will block with \"429 Too Many Requests\"\n    \"https://github.com/akka/akka-persistence-jdbc/\"\n    # MVN repository forbids access after a few requests\n    \"https://mvnrepository.com/artifact/\",\n    \"https://repo.akka.io/\"\n  ]\n\n  non-https-whitelist = [\n    \"http://logback.qos.ch/\"\n    \"http://www.slf4j.org/\"\n  ]\n}\n"
  },
  {
    "path": "scripts/mysql-cli.sh",
    "content": "#!/bin/bash\necho \"==================   Help for mysql cli  =========================\"\necho \"=================================================================\"\ndocker exec -it mysql-test mysql --user=root --password=root mysql\n"
  },
  {
    "path": "scripts/oracle-cli.sh",
    "content": "#!/bin/bash\necho \"==================  Help for oracle cli  ========================\"\necho \"=================================================================\"\ndocker exec -it oracle-test sqlplus system/oracle\n"
  },
  {
    "path": "scripts/psql-cli.sh",
    "content": "#!/bin/bash\necho \"==================     Help for psql    =========================\"\necho \"\\l or \\list                : shows all databases\"\necho \"\\d                         : shows all tables, views and sequences\"\necho \"\\dn                        : shows all schemas\"\necho \"\\d table_name              : describe table, view, sequence, or index\"\necho \"\\c database_name           : connect to a database\"\necho \"\\q                         : quit\"\necho \"\\?                         : for more commands\"\necho \"====================    Extensions    ===========================\"\necho \"create extension pgcrypto; : installs cryptographic functions\"\necho \"====================    Some SQL    =============================\"\necho \"select gen_random_uuid();  : returns a random uuid (pgcrypto)\"\necho \"select version();          : return the server version\"\necho \"select current_date;       : returns the current date\"\necho \"=================================================================\"\ndocker exec -it postgres-test psql --dbname=docker --username=docker"
  },
  {
    "path": "scripts/sqlserver-cli.sh",
    "content": "#!/bin/bash\necho \"==================  Help for SqlServer cli  ========================\"\necho \"=================================================================\"\n\ndocker exec -it sqlserver-test /opt/mssql-tools18/bin/sqlcmd -N o -S localhost -U sa -P docker123abc# -d docker\n"
  }
]