[
  {
    "path": ".git-blame-ignore-revs",
    "content": "# Commit that replaced interface{} with any across the codebase.\n# Purely mechanical rename with no behavioral change.\n3ab64422067e24efb0b6b30eea0396d0e9395aee\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  - package-ecosystem: \"uv\"\n    directory: \"/docs\"\n    schedule:\n      interval: \"daily\"\n    allow:\n      - dependency-name: \"sphinx-scylladb-theme\"\n      - dependency-name: \"sphinx-multiversion-scylla\""
  },
  {
    "path": ".github/issue_template.md",
    "content": "Please answer these questions before submitting your issue. Thanks!\n\n### What version of ScyllaDB or Cassandra are you using?\n\n\n### What version of ScyllaDB Gocql driver are you using?\n\n\n### What version of Go are you using?\n\n\n### What did you do?\n\n\n### What did you expect to see?\n\n\n### What did you see instead?\n\n---\n\nIf you are having connectivity related issues please share the following additional information\n\n### Describe your Cassandra cluster\nplease provide the following information\n\n- output of `nodetool status`\n- output of `SELECT peer, rpc_address FROM system.peers`\n- rebuild your application with the `gocql_debug` tag and post the output\n"
  },
  {
    "path": ".github/workflows/bench-tests.yml",
    "content": "name: Run benchmark tests\n\non:\n  push:\n    branches:\n      - master\n  pull_request:\n    types: [opened, synchronize, reopened]\n\njobs:\n  bench-tests:\n    if: contains(github.event.pull_request.labels.*.name, 'run-benchmark-tests')\n    name: Run benchmark tests\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6\n      - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6\n        with:\n          go-version-file: go.mod\n          cache-dependency-path: |\n            lz4/go.sum\n            tests/bench/go.sum\n            go.sum\n\n\n      - name: Run benchmark tests\n        run: make test-bench\n"
  },
  {
    "path": ".github/workflows/call_jira_sync.yml",
    "content": "name: Sync Jira Based on PR Events\n\non:\n  pull_request_target:\n    types: [opened, edited, ready_for_review, review_requested, labeled, unlabeled, closed]\n\npermissions:\n  contents: read\n  pull-requests: write\n  issues: write\n\njobs:\n  jira-sync:\n    uses: scylladb/github-automation/.github/workflows/main_pr_events_jira_sync.yml@main\n    with:\n      caller_action: ${{ github.event.action }}\n    secrets:\n      caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}\n"
  },
  {
    "path": ".github/workflows/clean_dockerhub_images.yml",
    "content": "name: Docker Cleanup\n\non:\n  schedule:\n    - cron: '0 12 * * 1'  # Runs every Monday at noon (UTC)\n  workflow_dispatch:\n\njobs:\n  cleanup:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Check out the repository\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6\n\n      - name: Set up Python\n        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6\n        with:\n          python-version: \"3.x\"\n\n      - name: Install dependencies\n        run: |\n          python -m pip install --upgrade pip\n          pip install requests\n\n      - name: Run Docker image cleanup\n        run: make clean-old-temporary-docker-images\n        env:\n          DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}\n          DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}\n          DELETE_AFTER_DAYS: 30\n"
  },
  {
    "path": ".github/workflows/docs-pages.yml",
    "content": "name: \"Docs / Publish\"\n# For more information,\n# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows\n\non:\n  push:\n    branches:\n      - master\n      - 'branch-**'\n    paths:\n      - \"docs/**\"\n  workflow_dispatch:\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6\n        with:\n          ref: ${{ github.event.repository.default_branch }}\n          persist-credentials: false\n          fetch-depth: 0\n\n      - uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5\n        # actions/setup-python@v6 poetry cache feature requires poetry to be installed beforehand\n        # which makes use of it extremely awkward.\n        with:\n          path: |\n            /home/runner/.cache/pip\n            /home/runner/.cache/uv\n          key: docs-cache-${{ runner.os }}-${{ hashFiles('docs/pyproject.toml', 'docs/Makefile') }}\n\n      - name: Set up Python\n        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6\n        with:\n          python-version: '3.12'\n\n      - name: Install uv\n        uses: astral-sh/setup-uv@v6\n\n      - name: Set up env\n        run: make -C docs setupenv\n\n      - name: Build docs\n        run: make -C docs multiversion\n\n      - name: Build redirects\n        run: make -C docs redirects\n\n      - name: Tar folder\n        run: |\n          tar \\\n            --dereference --hard-dereference \\\n            --directory docs/_build/dirhtml/ \\\n            -cvf ${{ runner.temp }}/artifact.tar \\\n            .\n\n      - name: Upload artifact\n        uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7\n        with:\n          name: github-pages\n          path: ${{ runner.temp }}/artifact.tar\n          retention-days: \"1\"\n\n  release:\n    # Add a dependency to the build job\n    needs: build\n\n    # Grant GITHUB_TOKEN the permissions required to make a Pages deployment\n    permissions:\n      pages: write # to deploy to Pages\n      id-token: write # to verify the deployment originates from an appropriate source\n      contents: read # to read private repo\n\n    # Deploy to the github-pages environment\n    environment:\n      name: github-pages\n      url: ${{ steps.deployment.outputs.page_url }}\n\n    # Specify runner + deployment step\n    runs-on: ubuntu-latest\n    steps:\n      - name: Deploy to GitHub Pages\n        id: deployment\n        uses: actions/deploy-pages@cd2ce8fcbc39b97be8ca5fce6e763baed58fa128 # v5\n"
  },
  {
    "path": ".github/workflows/docs-pr.yml",
    "content": "name: \"Docs / Build PR\"\n# For more information,\n# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows\n\non:\n  push:\n    paths:\n      - \"docs/**\"\n      - \".github/workflows/docs-pr.yml\"\n  pull_request:\n    types: [opened, synchronize, reopened]\n    paths:\n      - \"docs/**\"\n      - \".github/workflows/docs-pr.yml\"\n  workflow_dispatch:\n\npermissions:\n  contents: read\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6\n        with:\n          persist-credentials: false\n          fetch-depth: 0\n\n      - uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5\n        # actions/setup-python@v6 poetry cache feature requires poetry to be installed beforehand\n        # which makes use of it extremely awkward.\n        with:\n          path: |\n            /home/runner/.cache/pip\n            /home/runner/.cache/uv\n          key: docs-cache-${{ runner.os }}-${{ hashFiles('docs/pyproject.toml', 'docs/Makefile') }}\n\n      - name: Set up Python\n        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6\n        with:\n          python-version: '3.12'\n\n      - name: Install uv\n        uses: astral-sh/setup-uv@v6\n\n      - name: Set up env\n        run: make -C docs setupenv\n\n      - name: Build docs\n        run: make -C docs test\n"
  },
  {
    "path": ".github/workflows/extended-ci-longevity-large-partitions-with-network-nemesis-1h-test.yml",
    "content": "name: Build scylla-bench docker image with gocql PR\n\non:\n  pull_request_target:\n    types: [labeled]\n\njobs:\n  trigger-longevity-large-partitions-with-network-nemesis-1h-test:\n    if: contains(github.event.pull_request.labels.*.name, 'extended-ci')\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        scylla-version: [ENTERPRISE-RELEASE, OSS-RELEASE]\n    steps:\n      - name: Login to Docker Hub\n        uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n\n      - name: Check out the scylla-bench repository\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6\n        with:\n          repository: scylladb/scylla-bench\n          path: scylla-bench\n\n      - name: Checkout GoCQL PR Repository\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6\n        with:\n          repository: ${{ github.event.pull_request.head.repo.full_name }}\n          ref: ${{ github.event.pull_request.head.sha }}\n          path: gocql\n\n      - name: Build and push Scylla-bench Docker Image with gocql from PR\n        run: |\n          cd scylla-bench\n          GOCQL_REPO=\"github.com/${{ github.event.pull_request.head.repo.full_name }}\" GOCQL_VERSION=\"${{ github.event.pull_request.head.sha }}\" make build-with-custom-gocql-version\n          DOCKER_IMAGE_TAG=\"scylladb/gocql-extended-ci:scylla-bench-${{ github.event.pull_request.head.sha }}\" DOCKER_IMAGE_LABELS=\"com.scylladb.gocql-version=${{ github.event.pull_request.head.sha }}\" make build-sct-docker-image\n          docker push \"scylladb/gocql-extended-ci:scylla-bench-${{ github.event.pull_request.head.sha }}\"\n\n      - name: Install get-version CLI\n        run: |\n          git clone https://github.com/scylladb-actions/get-version.git\n          cd get-version\n          go mod tidy\n          go build -o get-version\n\n      - name: Get scylla version\n        id: scylla-version\n        run: |\n          cd get-version\n          if [[ \"${{ matrix.scylla-version }}\" == \"ENTERPRISE-RELEASE\" ]]; then\n            echo \"value=$(./get-version --source dockerhub-imagetag --repo scylladb/scylla-enterprise -filters \"^[0-9]{4}$.^[0-9]+$.^[0-9]+$ and LAST.LAST.LAST\" | tr -d '\\\"')\" >> $GITHUB_ENV\n          elif [[ \"${{ matrix.scylla-version }}\" == \"OSS-RELEASE\" ]]; then\n            echo \"value=$(./get-version --source dockerhub-imagetag --repo scylladb/scylla -filters \"^[0-9]$.^[0-9]+$.^[0-9]+$ and LAST.LAST.LAST\" | tr -d '\\\"')\" >> $GITHUB_ENV\n          elif echo \"${{ matrix.scylla-version }}\" | grep -P '^[0-9\\.]+'; then # If you want to run specific version do just that\n            echo \"value=${{ matrix.scylla-version }}\" | tee -a $GITHUB_OUTPUT\n          else\n            echo \"Unknown scylla version name `${{ matrix.scylla-version }}`\"\n            exit 1\n          fi\n\n      - name: Start Jenkins job\n        uses: scylladb-actions/jenkins-client@b947e07e8b588db2a8028313274992d3eda73360 # v0.2.0\n        with:\n          job_name: scylla-drivers/job/gocql/job/extended-ci/job/longevity-large-partitions-with-network-nemesis-1h-test\n          job_parameters: '{\"email_recipients\": \"scylla-drivers@scylladb.com\", \"scylla_version\": \"${{ steps.scylla-version.outputs.value }}\", \"extra_environment_variables\": \"SCT_STRESS_IMAGE.scylla-bench=scylladb/gocql-extended-ci:scylla-bench-${{ github.event.pull_request.head.sha }}\"}'\n          base_url: https://jenkins.scylladb.com\n          user: ${{ secrets.JENKINS_USERNAME }}\n          password: ${{ secrets.JENKINS_TOKEN }}\n          wait_timeout: 3h\n          polling_interval: 1s\n"
  },
  {
    "path": ".github/workflows/main.yml",
    "content": "name: Build\n\non:\n  push:\n    branches:\n      - master\n    paths-ignore:\n      - \"*.md\"\n      - \"docs/**\"\n      - .github/workflows/docs-*\n      - .github/workflows/bench-tests.yml\n      - .github/workflows/extended-ci-longevity-large-partitions-with-network-nemesis-1h-test.yml\n  pull_request:\n    types: [opened, synchronize, reopened]\n    paths-ignore:\n      - \"*.md\"\n      - \"docs/**\"\n      - .github/workflows/docs-*\n      - .github/workflows/bench-tests.yml\n      - .github/workflows/extended-ci-longevity-large-partitions-with-network-nemesis-1h-test.yml\n\njobs:\n  build:\n    name: Build\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6\n\n      - uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5\n        with:\n          path: |\n            /home/runner/.cache/pip\n            /home/runner/.local\n            /home/runner/.ccm/scylla-repository\n            /home/runner/.ccm/repository\n            /home/runner/.sdkman\n            testdata/pki\n            bin/\n          # CCM, scylla, cassandra and java versions are in Makefile\n          key: pr-check-${{ runner.os }}-${{ hashFiles('Makefile') }}\n\n      - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6\n        with:\n          go-version-file: go.mod\n          cache-dependency-path: |\n            lz4/go.sum\n            tests/bench/go.sum\n            go.sum\n\n      - name: Run linters\n        run: make check\n\n      - name: Run unit tests\n        run: make test-unit\n\n      - run: sudo sh -c \"echo 2097152 >> /proc/sys/fs/aio-max-nr\"\n\n  test-integration-scylla:\n    name: Integration Tests On Scylla\n    runs-on: ubuntu-latest\n    needs: build\n    strategy:\n      matrix:\n        scylla-version: [LATEST, PRIOR, LTS-LATEST, LTS-PRIOR]\n      fail-fast: false\n    env:\n      SCYLLA_VERSION: ${{ matrix.scylla-version }}\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6\n\n      - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6\n        with:\n          python-version: '3.11'\n\n      - uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5\n        with:\n          path: |\n            /home/runner/.cache/pip\n            /home/runner/.local\n            /home/runner/.sdkman\n            testdata/pki\n            bin/\n          # CCM, pip and java versions are in Makefile\n          key: pr-check-${{ runner.os }}-${{ hashFiles('Makefile') }}\n\n      - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6\n        with:\n          go-version-file: go.mod\n          cache-dependency-path: |\n            lz4/go.sum\n            tests/bench/go.sum\n            go.sum\n\n      - name: Get scylla version\n        id: scylla-version\n        run: make resolve-scylla-version\n\n      - name: Pull CCM image from the cache\n        uses: actions/cache/restore@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5\n        if: steps.scylla-version.outputs.value != 'NOT-FOUND'\n        id: ccm-cache\n        with:\n          path: ~/.ccm/repository\n          key: ccm-scylla-${{ runner.os }}-${{ steps.scylla-version.outputs.value }}\n\n      - name: Download ScyllaDB (${{ steps.scylla-version.outputs.value }}) image\n        if: steps.ccm-cache.outputs.cache-hit != 'true' && steps.scylla-version.outputs.value != 'NOT-FOUND'\n        run: make download-scylla\n\n      - name: Save CCM ScyllaDB image into the cache\n        if: steps.ccm-cache.outputs.cache-hit != 'true' && steps.scylla-version.outputs.value != 'NOT-FOUND'\n        uses: actions/cache/save@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5\n        with:\n          path: ~/.ccm/repository\n          key: ccm-scylla-${{ runner.os }}-${{ steps.scylla-version.outputs.value }}\n\n      - name: Run integration suite with ScyllaDB ${{ matrix.scylla-version }}(${{ steps.scylla-version.outputs.value }})\n        if: steps.scylla-version.outputs.value != 'NOT-FOUND'\n        run: make test-integration-scylla\n\n      - name: Run CCM integration suite with ScyllaDB ${{ matrix.scylla-version }}(${{ steps.scylla-version.outputs.value }})\n        if: steps.scylla-version.outputs.value != 'NOT-FOUND'\n        run: TEST_INTEGRATION_TAGS=\"ccm gocql_debug\" make test-integration-scylla\n\n  test-integration-cassandra:\n    name: Integration Tests On Cassandra\n    runs-on: ubuntu-latest\n    needs: build\n    strategy:\n      matrix:\n        cassandra-version: [5-LATEST, 4-LATEST]\n      fail-fast: false\n    env:\n      CASSANDRA_VERSION: ${{ matrix.cassandra-version }}\n\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6\n\n      - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6\n        with:\n          python-version: '3.11'\n\n      - uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5\n        with:\n          path: |\n            /home/runner/.cache/pip\n            /home/runner/.local\n            /home/runner/.sdkman\n            testdata/pki\n            bin/\n          # CCM, python and java versions are in Makefile\n          key: pr-check-${{ runner.os }}-${{ hashFiles('Makefile') }}\n\n      - uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6\n        with:\n          go-version-file: go.mod\n          cache-dependency-path: |\n            lz4/go.sum\n            tests/bench/go.sum\n            go.sum\n\n      - name: Get cassandra version\n        id: cassandra-version\n        run: make resolve-cassandra-version\n\n      - name: Pull CCM image from the cache\n        uses: actions/cache/restore@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5\n        if: steps.cassandra-version.outputs.value != 'NOT-FOUND'\n        id: ccm-cache\n        with:\n          path: ~/.ccm/repository\n          key: ccm-cassandra-${{ runner.os }}-${{ steps.cassandra-version.outputs.value }}\n\n      - name: Download Cassandra (${{ steps.cassandra-version.outputs.value }}) image\n        if: steps.ccm-cache.outputs.cache-hit != 'true' && steps.cassandra-version.outputs.value != 'NOT-FOUND'\n        run: make download-cassandra\n\n      - name: Save CCM Cassandra image into the cache\n        if: steps.ccm-cache.outputs.cache-hit != 'true' && steps.cassandra-version.outputs.value != 'NOT-FOUND'\n        uses: actions/cache/save@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5\n        with:\n          path: ~/.ccm/repository\n          key: ccm-cassandra-${{ runner.os }}-${{ steps.cassandra-version.outputs.value }}\n\n      - name: Run integration suite with Cassandra ${{ matrix.cassandra-version }}(${{ steps.cassandra-version.outputs.value }})\n        if: steps.cassandra-version.outputs.value != 'NOT-FOUND'\n        run: make test-integration-cassandra\n\n      - name: Run CCM integration suite with Cassandra ${{ matrix.cassandra-version }}(${{ steps.cassandra-version.outputs.value }})\n        if: steps.cassandra-version.outputs.value != 'NOT-FOUND'\n        run: TEST_INTEGRATION_TAGS=\"ccm gocql_debug\" make test-integration-cassandra\n\n"
  },
  {
    "path": ".gitignore",
    "content": "gocql-fuzz\nfuzz-corpus\nfuzz-work\ngocql.test\n.idea\n\nbin/\n\ntestdata/pki/.keystore\ntestdata/pki/.truststore\ntestdata/pki/*.crt\ntestdata/pki/*.key\ntestdata/pki/*.p12\n\ndocs/_build/\ndocs/source/.doctrees\n\nTODO*.md\n\n# Codex - AI assistant metadata\n.codex\n.codex/\n.codex-cache/\n.codex-config.json\n.codex-settings.json\ncodex.log\nAGENTS.md\n\n# Claude - AI assistant metadata\n.anthropic/\n.claude/\nclaude.log\nclaude_history.json\nclaude_config.json\nCLAUDE.md"
  },
  {
    "path": ".golangci.yml",
    "content": "version: \"2\"\n\nformatters:\n  enable:\n    - goimports\n\n  settings:\n    goimports:\n      local-prefixes:\n        - github.com/gocql/gocql\n        - github.com/apache/cassandra-gocql-driver\n        - github.com/apache/cassandra-gocql-driver/v2\n    golines:\n      max-len: 120\n\nlinters:\n  exclusions:\n    rules:\n      - path: '(.+)_test\\.go'\n        text: \"fieldalignment\"\n        linters:\n          - govet\n\n  default: none\n  enable:\n    - nolintlint\n    - govet\n  settings:\n    govet:\n      enable-all: true\n      disable:\n        - shadow\n\n    nolintlint:\n      allow-no-explanation: [ golines ]\n      require-explanation: true\n      require-specific: true\n\nrun:\n  build-tags:\n    - integration\n    - unit"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing to the ScyllaDB GoCQL Driver\n\n**TL;DR** - this manifesto sets out the bare minimum requirements for submitting a patch to gocql.\n\nThis guide outlines the process of landing patches in gocql and the general approach to maintaining the code base.\n\n## Background\n\nThe goal of the gocql project is to provide a stable and robust CQL driver for Go. This is a community driven project that is coordinated by a small team of core developers.\n\n## Minimum Requirement Checklist\n\nThe following is a check list of requirements that need to be satisfied in order for us to merge your patch:\n\n* You should raise a pull request to scylladb/gocql on Github\n* The pull request has a title that clearly summarizes the purpose of the patch\n* The motivation behind the patch is clearly defined in the pull request summary\n* You agree that your contribution is donated to the Apache Software Foundation (appropriate copyright is on all new files)\n* The patch will merge cleanly\n* The test coverage does not fall\n* The merge commit passes the regression test suite on GitHub Actions\n* `go fmt` has been applied to the submitted code\n* A correctly formatted commit message, see below\n* Notable changes (i.e. new features or changed behavior, bugfixes) are appropriately documented in CHANGELOG.md, functional changes also in godoc\n\nIf there are any requirements that can't be reasonably satisfied, please state this either on the pull request or as part of discussion on the mailing list. Where appropriate, the core team may apply discretion and make an exception to these requirements.\n\n## Commit Message\n\nThe commit message format should be:\n\n```\n<short description>\n\n<reason why the change is needed>\n\nPatch by <authors>; reviewed by <Reviewers> for #####\n```\n\nShort description should:\n* Be a short sentence.\n* Start with a capital letter.\n* Be written in the present tense.\n* Summarize what is changed, not why it is changed.\n\nShort description should not:\n* End with a period.\n* Use the word Fixes . Most commits fix something.\n\nLong description / Reason:\n* Should describe why the change is needed. What is fixed by the change? Why it it was broken before? What use case does the new feature solve?\n* Consider adding details of other options that you considered when implementing the change and why you made the design decisions you made.\n\n## Beyond The Checklist\n\nIn addition to stating the hard requirements, there are a bunch of things that we consider when assessing changes to the library. These soft requirements are helpful pointers of how to get a patch landed quicker and with less fuss.\n\n### General QA Approach\n\nThe Scylla project needs to consider the ongoing maintainability of the library at all times. Patches that look like they will introduce maintenance issues for the team will not be accepted.\n\nYour patch will get merged quicker if you have decent test cases that provide test coverage for the new behavior you wish to introduce.\n\nUnit tests are good, integration tests are even better. An example of a unit test is `marshal_test.go` - this tests the serialization code in isolation. `cassandra_test.go` is an integration test suite that is executed against every version of Cassandra that gocql supports as part of the CI process on Travis.\n\nThat said, the point of writing tests is to provide a safety net to catch regressions, so there is no need to go overboard with tests. Remember that the more tests you write, the more code we will have to maintain. So there's a balance to strike there.\n\n### Sign Off Procedure\n\nGenerally speaking, a pull request can get merged by any one of the project's committers. If your change is minor, chances are that one team member will just go ahead and merge it there and then. As stated earlier, suitable test coverage will increase the likelihood that a single reviewer will assess and merge your change. If your change has no test coverage, or looks like it may have wider implications for the health and stability of the library, the reviewer may elect to refer the change to another team member to achieve consensus before proceeding. Therefore, the tighter and cleaner your patch is, the quicker it will go through the review process.\n\n### Supported Features\n\ngocql is a low level wire driver for Cassandra CQL. By and large, we would like to keep the functional scope of the library as narrow as possible. We think that gocql should be tight and focused, and we will be naturally skeptical of things that could just as easily be implemented in a higher layer. Inevitably you will come across something that could be implemented in a higher layer, save for a minor change to the core API. In this instance, please strike up a conversation in the Cassandra community. Chances are we will understand what you are trying to achieve and will try to accommodate this in a maintainable way.\n"
  },
  {
    "path": "LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n\n"
  },
  {
    "path": "Makefile",
    "content": "SHELL := bash\n.ONESHELL:\n.SHELLFLAGS := -eo pipefail -c\n\nMAKEFILE_PATH := $(abspath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))\nKEY_PATH = ${MAKEFILE_PATH}/testdata/pki\nBIN_DIR := \"${MAKEFILE_PATH}/bin\"\n\nCASSANDRA_VERSION ?= LATEST\nSCYLLA_VERSION ?= LATEST\n\nGOLANGCI_VERSION = 2.5.0\n\nTEST_CQL_PROTOCOL ?= 4\nTEST_COMPRESSOR ?= snappy\nTEST_OPTS ?=\nTEST_INTEGRATION_TAGS ?= integration gocql_debug\nJVM_EXTRA_OPTS ?= -Dcassandra.test.fail_writes_ks=test -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler\n\nCCM_CASSANDRA_CLUSTER_NAME = gocql_cassandra_integration_test\nCCM_CASSANDRA_IP_PREFIX = 127.0.1.\nCCM_CASSANDRA_REPO ?= github.com/apache/cassandra-ccm\nCCM_CASSANDRA_VERSION ?= trunk\n\nCCM_SCYLLA_CLUSTER_NAME = gocql_scylla_integration_test\nCCM_SCYLLA_IP_PREFIX = 127.0.2.\nCCM_SCYLLA_REPO ?= github.com/scylladb/scylla-ccm\nCCM_SCYLLA_VERSION ?= master\n\nifeq (${CCM_CONFIG_DIR},)\n\tCCM_CONFIG_DIR = ~/.ccm\nendif\nCCM_CONFIG_DIR := $(shell readlink --canonicalize ${CCM_CONFIG_DIR})\n\nCASSANDRA_CONFIG ?= \"client_encryption_options.enabled: true\" \\\n\"client_encryption_options.keystore: ${KEY_PATH}/.keystore\" \\\n\"client_encryption_options.keystore_password: cassandra\" \\\n\"client_encryption_options.require_client_auth: true\" \\\n\"client_encryption_options.truststore: ${KEY_PATH}/.truststore\" \\\n\"client_encryption_options.truststore_password: cassandra\" \\\n\"concurrent_reads: 2\" \\\n\"concurrent_writes: 2\" \\\n\"write_request_timeout_in_ms: 5000\" \\\n\"read_request_timeout_in_ms: 5000\"\n\nifeq (${CASSANDRA_VERSION},3-LATEST)\n\tCASSANDRA_CONFIG += \"rpc_server_type: sync\" \\\n\"rpc_min_threads: 2\" \\\n\"rpc_max_threads: 2\" \\\n\"enable_user_defined_functions: true\" \\\n\"enable_materialized_views: true\" \\\n\nelse ifeq (${CASSANDRA_VERSION},4-LATEST)\n\tCASSANDRA_CONFIG +=\t\"enable_user_defined_functions: true\" \\\n\"enable_materialized_views: true\"\nelse\n\tCASSANDRA_CONFIG += \"user_defined_functions_enabled: true\" \\\n\"materialized_views_enabled: true\"\nendif\n\nSCYLLA_CONFIG = \"native_transport_port_ssl: 9142\" \\\n\"native_transport_port: 9042\" \\\n\"native_shard_aware_transport_port: 19042\" \\\n\"native_shard_aware_transport_port_ssl: 19142\" \\\n\"client_encryption_options.enabled: true\" \\\n\"client_encryption_options.certificate: ${KEY_PATH}/cassandra.crt\" \\\n\"client_encryption_options.keyfile: ${KEY_PATH}/cassandra.key\" \\\n\"client_encryption_options.truststore: ${KEY_PATH}/ca.crt\" \\\n\"client_encryption_options.require_client_auth: true\" \\\n\"maintenance_socket: workdir\" \\\n\"enable_tablets: true\" \\\n\"enable_user_defined_functions: true\" \\\n\"experimental_features: [udf]\"\n\nexport JVM_EXTRA_OPTS\nexport JAVA11_HOME=${JAVA_HOME_11_X64}\nexport JAVA17_HOME=${JAVA_HOME_17_X64}\nexport JAVA_HOME=${JAVA_HOME_11_X64}\nexport PATH := $(MAKEFILE_PATH)/bin:~/.sdkman/bin:$(PATH)\n\nprint-config:\n\techo ${CASSANDRA_CONFIG}\n\n.prepare-bin:\n\t@[[ -d \"$(MAKEFILE_PATH)/bin\" ]] || mkdir \"$(MAKEFILE_PATH)/bin\"\n\n.prepare-get-version: .prepare-bin\n\t@if [[ ! -f \"$(MAKEFILE_PATH)/bin/get-version\" ]]; then\n\t\techo \"bin/get-version is not found, installing it\"\n\t\tcurl -sSLo /tmp/get-version.zip https://github.com/scylladb-actions/get-version/releases/download/v0.4.5/get-version_0.4.5_linux_amd64v3.zip\n\t\tunzip /tmp/get-version.zip get-version -d \"$(MAKEFILE_PATH)/bin\" >/dev/null\n\tfi\n\n.prepare-environment-update-aio-max-nr:\n\t@if (( $$(< /proc/sys/fs/aio-max-nr) < 2097152 )); then\n\t\techo 2097152 | sudo tee /proc/sys/fs/aio-max-nr >/dev/null\n\tfi\n\nclean-old-temporary-docker-images:\n\t@echo \"Running Docker Hub image cleanup script...\"\n\tpython ci/clean-old-temporary-docker-images.py\n\nCASSANDRA_VERSION_FILE=/tmp/cassandra-version-${CASSANDRA_VERSION}.resolved\nresolve-cassandra-version: .prepare-get-version\n\t@find \"${CASSANDRA_VERSION_FILE}\" -mtime +0 -delete 2>/dev/null 1>&1 || true\n\tif [[ -f \"${CASSANDRA_VERSION_FILE}\" ]]; then\n\t\techo \"Resolved Cassandra ${CASSANDRA_VERSION} to $$(cat ${CASSANDRA_VERSION_FILE})\"\n\t\texit 0\n\tfi\n\n\tif [[ \"${CASSANDRA_VERSION}\" == \"LATEST\" ]]; then\n\t\tCASSANDRA_VERSION_RESOLVED=`get-version -source github-tag -repo apache/cassandra -prefix \"cassandra-\" -out-no-prefix -filters \"^[0-9]+$$.^[0-9]+$$.^[0-9]+$$ and LAST.LAST.LAST\" | tr -d '\\\"'`\n\telif [[ \"${CASSANDRA_VERSION}\" == \"5-LATEST\" ]]; then\n\t\tCASSANDRA_VERSION_RESOLVED=`get-version -source github-tag -repo apache/cassandra -prefix \"cassandra-\" -out-no-prefix -filters \"^[0-9]+$$.^[0-9]+$$.^[0-9]+$$ and 5.LAST.LAST\" | tr -d '\\\"'`\n\telif [[ \"${CASSANDRA_VERSION}\" == \"4-LATEST\" ]]; then\n\t\tCASSANDRA_VERSION_RESOLVED=`get-version -source github-tag -repo apache/cassandra -prefix \"cassandra-\" -out-no-prefix -filters \"^[0-9]+$$.^[0-9]+$$.^[0-9]+$$ and 4.LAST.LAST\" | tr -d '\\\"'`\n\telif [[ \"${CASSANDRA_VERSION}\" == \"3-LATEST\" ]]; then\n\t\tCASSANDRA_VERSION_RESOLVED=`get-version -source github-tag -repo apache/cassandra -prefix \"cassandra-\" -out-no-prefix -filters \"^[0-9]+$$.^[0-9]+$$.^[0-9]+$$ and 3.LAST.LAST\" | tr -d '\\\"'`\n\telif echo \"${CASSANDRA_VERSION}\" | grep -P '^[0-9\\.]+'; then\n\t\tCASSANDRA_VERSION_RESOLVED=${CASSANDRA_VERSION}\n\telse\n\t\techo \"Unknown Cassandra version name '${CASSANDRA_VERSION}'\"\n\t\texit 1\n\tfi\n\n\tif [[ -z \"$${CASSANDRA_VERSION_RESOLVED}\" ]]; then\n\t\techo \"There is no ${CASSANDRA_VERSION} Cassandra version\"\n\t\tif [[ -n \"$${GITHUB_ENV}\" ]]; then\n\t\t\techo \"value=NOT-FOUND\" >>$${GITHUB_OUTPUT}\n\t\t\techo \"CASSANDRA_VERSION_RESOLVED=NOT-FOUND\" >>$${GITHUB_ENV}\n\t\t\texit 0\n\t\tfi\n\t\texit 2\n\tfi\n\n\techo \"Resolved Cassandra ${CASSANDRA_VERSION} to $${CASSANDRA_VERSION_RESOLVED}\"\n\tif [[ -n \"$${GITHUB_OUTPUT}\" ]]; then\n\t\techo \"value=$${CASSANDRA_VERSION_RESOLVED}\" >>$${GITHUB_OUTPUT}\n\tfi\n\tif [[ -n \"$${GITHUB_ENV}\" ]]; then\n\t\techo \"CASSANDRA_VERSION_RESOLVED=$${CASSANDRA_VERSION_RESOLVED}\" >>$${GITHUB_ENV}\n\tfi\n\techo \"$${CASSANDRA_VERSION_RESOLVED}\" >${CASSANDRA_VERSION_FILE}\n\nSCYLLA_VERSION_FILE=/tmp/scylla-version-${SCYLLA_VERSION}.resolved\nresolve-scylla-version: .prepare-get-version\n\t@find \"${SCYLLA_VERSION_FILE}\" -mtime +0 -delete 2>/dev/null 1>&1 || true\n\tif [[ -f \"${SCYLLA_VERSION_FILE}\" ]]; then\n\t\techo \"Resolved ScyllaDB ${SCYLLA_VERSION} to $$(cat ${SCYLLA_VERSION_FILE})\"\n\t\texit 0\n\tfi\n\n\tif [[ \"${SCYLLA_VERSION}\" == \"LTS-LATEST\" ]]; then\n\t\tSCYLLA_VERSION_RESOLVED=`get-version --source dockerhub-imagetag --repo scylladb/scylla -filters \"^[0-9]{4}$$.^[0-9]+$$.^[0-9]+$$ and LAST.1.LAST\" | tr -d '\\\"'`\n\telif [[ \"${SCYLLA_VERSION}\" == \"LTS-PRIOR\" ]]; then\n\t\tSCYLLA_VERSION_RESOLVED=`get-version --source dockerhub-imagetag --repo scylladb/scylla -filters \"^[0-9]{4}$$.^[0-9]+$$.^[0-9]+$$ and LAST-1.1.LAST\" | tr -d '\\\"'`\n\t\tif [[ -z \"$${SCYLLA_VERSION_RESOLVED}\" ]]; then\n\t\t\tSCYLLA_VERSION_RESOLVED=`get-version --source dockerhub-imagetag --repo scylladb/scylla-enterprise -filters \"^[0-9]{4}$$.^[0-9]+$$.^[0-9]+$$ and LAST-1.1.LAST\" | tr -d '\\\"'`\n\t\tfi\n\telif [[ \"${SCYLLA_VERSION}\" == \"LATEST\" ]]; then\n\t\tSCYLLA_VERSION_RESOLVED=`get-version --source dockerhub-imagetag --repo scylladb/scylla -filters \"^[0-9]{4}$$.^[0-9]+$$.^[0-9]+$$ and LAST.LAST.LAST\" | tr -d '\\\"'`\n\telif [[ \"${SCYLLA_VERSION}\" == \"PRIOR\" ]]; then\n\t\tSCYLLA_VERSION_RESOLVED=`get-version --source dockerhub-imagetag --repo scylladb/scylla -filters \"^[0-9]{4}$$.^[0-9]+$$.^[0-9]+$$ and LAST.LAST.LAST-1\" | tr -d '\\\"'`\n\telif echo \"${SCYLLA_VERSION}\" | grep -P '^[0-9\\.]+'; then\n\t\tSCYLLA_VERSION_RESOLVED=${SCYLLA_VERSION}\n\telse\n\t\techo \"Unknown ScyllaDB version name '${SCYLLA_VERSION}'\"\n\t\texit 1\n\tfi\n\n\tif [[ -z \"$${SCYLLA_VERSION_RESOLVED}\" ]]; then\n\t\techo \"There is no ${SCYLLA_VERSION} ScyllaDB version\"\n\t\tif [[ -n \"$${GITHUB_ENV}\" ]]; then\n\t\t\techo \"value=NOT-FOUND\" >>$${GITHUB_OUTPUT}\n\t\t\techo \"SCYLLA_VERSION_RESOLVED=NOT-FOUND\" >>$${GITHUB_ENV}\n\t\t\texit 0\n\t\tfi\n\t\texit 2\n\tfi\n\n\techo \"Resolved ScyllaDB ${SCYLLA_VERSION} to $${SCYLLA_VERSION_RESOLVED}\"\n\tif [[ -n \"$${GITHUB_OUTPUT}\" ]]; then\n\t\techo \"value=$${SCYLLA_VERSION_RESOLVED}\" >>$${GITHUB_OUTPUT}\n\tfi\n\tif [[ -n \"$${GITHUB_ENV}\" ]]; then\n\t\techo \"SCYLLA_VERSION_RESOLVED=$${SCYLLA_VERSION_RESOLVED}\" >>$${GITHUB_ENV}\n\tfi\n\techo \"$${SCYLLA_VERSION_RESOLVED}\" >${SCYLLA_VERSION_FILE}\n\ncassandra-start: .prepare-pki .prepare-cassandra-ccm .prepare-java resolve-cassandra-version\n\t@if [ -d ${CCM_CONFIG_DIR}/${CCM_CASSANDRA_CLUSTER_NAME} ] && ccm switch ${CCM_CASSANDRA_CLUSTER_NAME} 2>/dev/null 1>&2 && ccm status | grep UP 2>/dev/null 1>&2; then\n\t\techo \"Cassandra cluster is already started\"\n\t\texit 0\n\tfi\n\tif [[ -z \"$${CASSANDRA_VERSION_RESOLVED}\" ]]; then\n\t\tCASSANDRA_VERSION_RESOLVED=$$(cat '${CASSANDRA_VERSION_FILE}')\n\tfi\n\tif [[ -z \"$${CASSANDRA_VERSION_RESOLVED}\" ]]; then\n\t\techo \"Cassandra version ${CASSANDRA_VERSION} was not resolved\"\n\t\texit 1\n\tfi\n\tsource ~/.sdkman/bin/sdkman-init.sh;\n\techo \"Start Cassandra ${CASSANDRA_VERSION}($${CASSANDRA_VERSION_RESOLVED}) cluster\"\n\tccm stop ${CCM_CASSANDRA_CLUSTER_NAME} 2>/dev/null 1>&2 || true\n\tccm remove ${CCM_CASSANDRA_CLUSTER_NAME} 2>/dev/null 1>&2 || true\n\tccm create ${CCM_CASSANDRA_CLUSTER_NAME} -i ${CCM_CASSANDRA_IP_PREFIX} -v \"$${CASSANDRA_VERSION_RESOLVED}\" -n3 -d --vnodes --jvm_arg=\"-Xmx256m -XX:NewSize=100m\"\n\tccm updateconf ${CASSANDRA_CONFIG}\n\tfor conf_dir in ${CCM_CONFIG_DIR}/${CCM_CASSANDRA_CLUSTER_NAME}/node*/conf; do \\\n\t\tsed -i 's/^#MAX_HEAP_SIZE=.*/MAX_HEAP_SIZE=\"256M\"/' \"$$conf_dir/cassandra-env.sh\"; \\\n\tdone\n\tccm start --wait-for-binary-proto --wait-other-notice --verbose\n\tccm status\n\tccm node1 nodetool status\n\nscylla-start: .prepare-pki .prepare-scylla-ccm .prepare-environment-update-aio-max-nr resolve-scylla-version\n\t@if [ -d ${CCM_CONFIG_DIR}/${CCM_SCYLLA_CLUSTER_NAME} ] && ccm switch ${CCM_SCYLLA_CLUSTER_NAME} 2>/dev/null 1>&2 && ccm status | grep UP 2>/dev/null 1>&2; then\n\t\techo \"Scylla cluster is already started\";\n\t\texit 0;\n\tfi\n\tif [[ -z \"$${SCYLLA_VERSION_RESOLVED}\" ]]; then\n\t\tSCYLLA_VERSION_RESOLVED=$$(cat '${SCYLLA_VERSION_FILE}')\n\tfi\n\tif [[ -z \"$${SCYLLA_VERSION_RESOLVED}\" ]]; then\n\t\techo \"ScyllaDB version ${SCYLLA_VERSION} was not resolved\"\n\t\texit 1\n\tfi\n\techo \"Start scylla $(SCYLLA_VERSION)($${SCYLLA_VERSION_RESOLVED}) cluster\"\n\tccm stop ${CCM_SCYLLA_CLUSTER_NAME} 2>/dev/null 1>&2 || true\n\tccm remove ${CCM_SCYLLA_CLUSTER_NAME} 2>/dev/null 1>&2 || true\n\tif [[ \"$${SCYLLA_VERSION_RESOLVED}\" != *:* ]]; then\n\t\tSCYLLA_VERSION_RESOLVED=\"release:$${SCYLLA_VERSION_RESOLVED}\"\n\tfi\n\tccm create ${CCM_SCYLLA_CLUSTER_NAME} -i ${CCM_SCYLLA_IP_PREFIX} --scylla -v $${SCYLLA_VERSION_RESOLVED} -n 3 -d --jvm_arg=\"--smp 2 --memory 1G --experimental-features udf --enable-user-defined-functions true\"\n\tccm updateconf ${SCYLLA_CONFIG}\n\tccm start --wait-for-binary-proto --wait-other-notice --verbose\n\tccm status\n\tccm node1 nodetool status\n\tsudo chmod 0777 ${CCM_CONFIG_DIR}/${CCM_SCYLLA_CLUSTER_NAME}/*/cql.m || true\n\ndownload-cassandra: .prepare-cassandra-ccm resolve-cassandra-version\n\t@if [[ -z \"$${CASSANDRA_VERSION_RESOLVED}\" ]]; then\n\t\tCASSANDRA_VERSION_RESOLVED=$$(cat '${CASSANDRA_VERSION_FILE}')\n\tfi\n\tif [[ -z \"$${CASSANDRA_VERSION_RESOLVED}\" ]]; then\n\t\techo \"Cassandra version ${CASSANDRA_VERSION} was not resolved\"\n\t\texit 1\n\tfi\n\trm -rf /tmp/download.ccm || true\n\tmkdir /tmp/download.ccm || true\n\tccm create ccm_1 -i 127.0.254. -n 1:0 -v \"$${CASSANDRA_VERSION_RESOLVED}\" --config-dir=/tmp/download.ccm\n\trm -rf /tmp/download.ccm\n\ndownload-scylla: .prepare-scylla-ccm resolve-scylla-version\n\t@if [[ -z \"$${SCYLLA_VERSION_RESOLVED}\" ]]; then\n\t\tSCYLLA_VERSION_RESOLVED=$$(cat '${SCYLLA_VERSION_FILE}')\n\tfi\n\tif [[ -z \"$${SCYLLA_VERSION_RESOLVED}\" ]]; then\n\t\techo \"ScyllaDB version ${SCYLLA_VERSION} was not resolved\"\n\t\texit 1\n\tfi\n\trm -rf /tmp/download.ccm || true\n\tmkdir /tmp/download.ccm || true\n\tif [[ \"$${SCYLLA_VERSION_RESOLVED}\" != *:* ]]; then\n\t\tSCYLLA_VERSION_RESOLVED=\"release:$${SCYLLA_VERSION_RESOLVED}\"\n\tfi\n\tccm create ccm_1 -i 127.0.254. -n 1:0 -v \"$${SCYLLA_VERSION_RESOLVED}\" --scylla --config-dir=/tmp/download.ccm\n\trm -rf /tmp/download.ccm\n\ncassandra-stop: .prepare-cassandra-ccm\n\t@echo \"Stop cassandra cluster\"\n\t@ccm stop --not-gently ${CCM_CASSANDRA_CLUSTER_NAME} 2>/dev/null 1>&2 || true\n\t@ccm remove ${CCM_CASSANDRA_CLUSTER_NAME} 2>/dev/null 1>&2 || true\n\nscylla-stop: .prepare-scylla-ccm\n\t@echo \"Stop scylla cluster\"\n\t@ccm stop --not-gently ${CCM_SCYLLA_CLUSTER_NAME} 2>/dev/null 1>&2 || true\n\t@ccm remove ${CCM_SCYLLA_CLUSTER_NAME} 2>/dev/null 1>&2 || true\n\ntest-integration-cassandra: cassandra-start\n\t@echo \"Run integration tests for proto ${TEST_CQL_PROTOCOL} on cassandra ${CASSANDRA_VERSION}\"\n\tif [[ -z \"$${CASSANDRA_VERSION_RESOLVED}\" ]]; then\n\t\tCASSANDRA_VERSION_RESOLVED=$$(cat '${CASSANDRA_VERSION_FILE}')\n\tfi\n\tif [[ -z \"$${CASSANDRA_VERSION_RESOLVED}\" ]]; then\n\t\techo \"Cassandra version ${CASSANDRA_VERSION} was not resolved\"\n\t\texit 1\n\tfi\n\techo \"go test -v ${TEST_OPTS} -tags \\\"${TEST_INTEGRATION_TAGS}\\\" -distribution cassandra -timeout=10m -runauth -gocql.timeout=60s -runssl -proto=${TEST_CQL_PROTOCOL} -rf=3 -clusterSize=3 -autowait=2000ms -compressor=${TEST_COMPRESSOR} -gocql.cversion=$${CASSANDRA_VERSION_RESOLVED} -cluster=$$(ccm liveset) ./...\"\n\tgo test -v ${TEST_OPTS} -tags \"${TEST_INTEGRATION_TAGS}\" -distribution cassandra -timeout=10m -runauth -gocql.timeout=60s -runssl -proto=${TEST_CQL_PROTOCOL} -rf=3 -clusterSize=3 -autowait=2000ms -compressor=${TEST_COMPRESSOR} -gocql.cversion=$$(ccm node1 versionfrombuild) -cluster=$$(ccm liveset) ./...\n\ntest-integration-scylla: scylla-start\n\t@echo \"Run integration tests for proto ${TEST_CQL_PROTOCOL} on scylla ${SCYLLA_VERSION}\"\n\tif [ -S \"${CCM_CONFIG_DIR}/${CCM_SCYLLA_CLUSTER_NAME}/node1/cql.m\" ]; then\n\t\tCLUSTER_SOCKET=\"-cluster-socket ${CCM_CONFIG_DIR}/${CCM_SCYLLA_CLUSTER_NAME}/node1/cql.m\"\n\telse\n\t\techo \"Cluster socket is not found\"\n\tfi\n\tif [[ -z \"$${SCYLLA_VERSION_RESOLVED}\" ]]; then\n\t\tSCYLLA_VERSION_RESOLVED=$$(cat '${SCYLLA_VERSION_FILE}')\n\tfi\n\tif [[ -z \"$${SCYLLA_VERSION_RESOLVED}\" ]]; then\n\t\techo \"ScyllaDB version ${SCYLLA_VERSION} was not resolved\"\n\t\texit 1\n\tfi\n\techo \"go test -v ${TEST_OPTS} -tags \\\"${TEST_INTEGRATION_TAGS}\\\" -distribution scylla $${CLUSTER_SOCKET} -timeout=5m -gocql.timeout=60s -proto=${TEST_CQL_PROTOCOL} -rf=3 -clusterSize=3 -autowait=2000ms -compressor=${TEST_COMPRESSOR} -gocql.cversion=$${SCYLLA_VERSION_RESOLVED} -cluster=$$(ccm liveset) ./...\"\n\tgo test -v ${TEST_OPTS} -tags \"${TEST_INTEGRATION_TAGS}\" -distribution scylla $${CLUSTER_SOCKET} -timeout=5m -gocql.timeout=60s -proto=${TEST_CQL_PROTOCOL} -rf=3 -clusterSize=3 -autowait=2000ms -compressor=${TEST_COMPRESSOR} -gocql.cversion=$${SCYLLA_VERSION_RESOLVED} -cluster=$$(ccm liveset) ./...\n\ntest-unit: .prepare-pki\n\t@echo \"Run unit tests\"\n\tgo clean -testcache\nifeq ($(shell if [[ -n \"$${GITHUB_STEP_SUMMARY}\" ]]; then echo \"running-in-workflow\"; else echo \"running-in-shell\"; fi), running-in-workflow)\n\techo \"### Unit Test Results\" >>$${GITHUB_STEP_SUMMARY}\n\techo '```' >>$${GITHUB_STEP_SUMMARY}\n\techo go test -tags unit -timeout=5m -race ./...\n\tgo test -tags unit -timeout=5m -race ./... | tee -a \"$${GITHUB_STEP_SUMMARY}\"; TEST_STATUS=$${PIPESTATUS[0]}; echo '```' >>\"$${GITHUB_STEP_SUMMARY}\"; exit \"$${TEST_STATUS}\"\nelse\n\tgo test -v -tags unit -timeout=5m -race ./...\nendif\n\ntest-bench:\n\t@echo \"Run benchmark tests\"\nifeq ($(shell if [[ -n \"$${GITHUB_STEP_SUMMARY}\" ]]; then echo \"running-in-workflow\"; else echo \"running-in-shell\"; fi), running-in-workflow)\n\techo \"### Benchmark Results\" >>$${GITHUB_STEP_SUMMARY}\n\techo '```' >>\"$${GITHUB_STEP_SUMMARY}\"\n\techo go test -bench=. -benchmem ./...\n\tgo test -bench=. -benchmem ./... | tee -a >>\"$${GITHUB_STEP_SUMMARY}\"\n\techo '```' >>\"$${GITHUB_STEP_SUMMARY}\"\nelse\n\tgo test -bench=. -benchmem ./...\nendif\n\ncheck-go-mod-drift:\n\t@echo \"Check Go module drift\"\n\tgo mod tidy -diff\n\tgo mod tidy -C lz4 -diff\n\tgo mod tidy -C tests/bench -diff\n\ncheck: .prepare-golangci check-go-mod-drift\n\t@echo \"Build\"\n\tgo build -tags all .\n\techo \"Check linting\"\n\t${BIN_DIR}/golangci-lint run\n\nfix-go-mod-drift:\n\t@echo \"Fix Go module drift\"\n\tgo mod tidy\n\tgo mod tidy -C lz4\n\tgo mod tidy -C tests/bench\n\nfix: .prepare-golangci fix-go-mod-drift\n\t@echo \"Fix linting\"\n\t${BIN_DIR}/golangci-lint run --fix\n\n.prepare-java:\nifeq ($(shell if [ -f ~/.sdkman/bin/sdkman-init.sh ]; then echo \"installed\"; else echo \"not-installed\"; fi), not-installed)\n\t@$(MAKE) install-java\nendif\n\ninstall-java:\n\t@echo \"Installing SDKMAN...\"\n\tcurl -s \"https://get.sdkman.io\" | bash\n\techo \"sdkman_auto_answer=true\" >> ~/.sdkman/etc/config\n\tsource ~/.sdkman/bin/sdkman-init.sh;\n\techo \"Installing Java versions...\";\n\tsdk install java 11.0.30-zulu;\n\tsdk install java 17.0.18-zulu;\n\tsdk default java 11.0.30-zulu;\n\tsdk use java 11.0.30-zulu;\n\n.prepare-cassandra-ccm:\n\t@if command -v ccm >/dev/null 2>&1 && grep CASSANDRA ${CCM_CONFIG_DIR}/ccm-type 2>/dev/null 1>&2 && grep ${CCM_CASSANDRA_VERSION} ${CCM_CONFIG_DIR}/ccm-version 2>/dev//null  1>&2; then\n\t\techo \"Cassandra CCM ${CCM_CASSANDRA_VERSION} is already installed\";\n\t\texit 0\n\tfi\n\t$(MAKE) install-cassandra-ccm\n\ninstall-cassandra-ccm:\n\t@echo \"Install CCM ${CCM_CASSANDRA_VERSION}\"\n\tpip install \"git+https://${CCM_CASSANDRA_REPO}.git@${CCM_CASSANDRA_VERSION}\"\n\tmkdir ${CCM_CONFIG_DIR} 2>/dev/null || true\n\techo CASSANDRA > ${CCM_CONFIG_DIR}/ccm-type\n\techo ${CCM_CASSANDRA_VERSION} > ${CCM_CONFIG_DIR}/ccm-version\n\n.prepare-scylla-ccm:\n\t@if command -v ccm >/dev/null 2>&1 && grep SCYLLA ${CCM_CONFIG_DIR}/ccm-type 2>/dev/null 1>&2 && grep ${CCM_SCYLLA_VERSION} ${CCM_CONFIG_DIR}/ccm-version 2>/dev//null  1>&2; then\n\t\techo \"Scylla CCM ${CCM_SCYLLA_VERSION} is already installed\";\n\t\texit 0\n\tfi\n\t$(MAKE) install-scylla-ccm\n\ninstall-scylla-ccm:\n\t@echo \"Installing Scylla CCM ${CCM_SCYLLA_VERSION}\"\n\tpip install \"git+https://${CCM_SCYLLA_REPO}.git@${CCM_SCYLLA_VERSION}\"\n\tmkdir ${CCM_CONFIG_DIR} 2>/dev/null || true\n\techo SCYLLA > ${CCM_CONFIG_DIR}/ccm-type\n\techo ${CCM_SCYLLA_VERSION} > ${CCM_CONFIG_DIR}/ccm-version\n\n.prepare-pki:\n\t@[ -f \"testdata/pki/cassandra.key\" ] || (echo \"Generating new PKI\" && cd testdata/pki/ && bash ./generate_certs.sh)\n\ngenerate-pki:\n\t@echo \"Generating new PKI\"\n\trm -f testdata/pki/.keystore testdata/pki/.truststore testdata/pki/*.p12 testdata/pki/*.key testdata/pki/*.crt || true\n\tcd testdata/pki/ && bash ./generate_certs.sh\n\n.prepare-golangci:\n\t@if ! \"${BIN_DIR}/golangci-lint\" --version | grep '${GOLANGCI_VERSION}' >/dev/null 2>&1 ; then\n\t\tmkdir -p \"${BIN_DIR}\"\n\t\techo \"Installing golangci-lint to '${BIN_DIR}'\"\n\t\tcurl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b bin/ v$(GOLANGCI_VERSION)\n\tfi\n"
  },
  {
    "path": "NOTICE",
    "content": "ScyllaDB GoCQL Driver\nCopyright 2024 The Apache Software Foundation\n\nThis product includes software developed at\nThe Apache Software Foundation (http://www.apache.org/).\n\n\nThis product originates, before git sha\n34fdeebefcbf183ed7f916f931aa0586fdaa1b40, from software from the\nGocql Authors, with copyright and license as follows:\n\nCopyright (c) 2016, The Gocql authors\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nWhere The Gocql Authors for copyright purposes are below. Those marked with\nasterisk have agreed to donate (copyright assign) their contributions to the\nApache Software Foundation, signing CLAs when appropriate.\n\nChristoph Hack <christoph@tux21b.org>\nJonathan Rudenberg <jonathan@titanous.com> *\nThorsten von Eicken <tve@rightscale.com> *\nMatt Robenolt <mattr@disqus.com>\nPhillip Couto <phillip.couto@stemstudios.com> *\nNiklas Korz <korz.niklask@gmail.com>\nNimi Wariboko Jr <nimi@channelmeter.com>\nGhais Issa <ghais.issa@gmail.com> *\nSasha Klizhentas <klizhentas@gmail.com>\nKonstantin Cherkasov <k.cherkasoff@gmail.com>\nBen Hood <0x6e6562@gmail.com>\nPete Hopkins <phopkins@gmail.com>\nChris Bannister <c.bannister@gmail.com> *\nMaxim Bublis <b@codemonkey.ru>\nAlex Zorin <git@zor.io>\nKasper Middelboe Petersen <me@phant.dk>\nHarpreet Sawhney <harpreet.sawhney@gmail.com>\nCharlie Andrews <charlieandrews.cwa@gmail.com> *\nStanislavs Koikovs <stanislavs.koikovs@gmail.com>\nDan Forest <bonjour@dan.tf>\nMiguel Serrano <miguelvps@gmail.com> *\nStefan Radomski <gibheer@zero-knowledge.org>\nJosh Wright <jshwright@gmail.com>\nJacob Rhoden <jacob.rhoden@gmail.com>\nBen Frye <benfrye@gmail.com>\nFred McCann <fred@sharpnoodles.com> *\nDan Simmons <dan@simmons.io> *\nMuir Manders <muir@retailnext.net>\nSankar P <sankar.curiosity@gmail.com> *\nJulien Da Silva <julien.dasilva@gmail.com>\nDan Kennedy <daniel@firstcs.co.uk> *\nNick Dhupia<nick.dhupia@gmail.com>\nYasuharu Goto <matope.ono@gmail.com> *\nJeremy Schlatter <jeremy.schlatter@gmail.com> *\nMatthias Kadenbach <matthias.kadenbach@gmail.com>\nDean Elbaz <elbaz.dean@gmail.com>\nMike Berman <evencode@gmail.com>\nDmitriy Fedorenko <c0va23@gmail.com> *\nZach Marcantel <zmarcantel@gmail.com>\nJames Maloney <jamessagan@gmail.com>\nAshwin Purohit <purohit@gmail.com> *\nDan Kinder <dkinder.is.me@gmail.com> *\nOliver Beattie <oliver@obeattie.com> *\nJustin Corpron <jncorpron@gmail.com> *\nMiles Delahunty <miles.delahunty@gmail.com>\nZach Badgett <zach.badgett@gmail.com>\nMaciek Sakrejda <maciek@heroku.com> *\nJeff Mitchell <jeffrey.mitchell@gmail.com>\nBaptiste Fontaine <b@ptistefontaine.fr> *\nMatt Heath <matt@mattheath.com> *\nJamie Cuthill <jamie.cuthill@gmail.com>\nAdrian Casajus <adriancasajus@gmail.com> *\nJohn Weldon <johnweldon4@gmail.com> *\nAdrien Bustany <adrien@bustany.org> *\nAndrey Smirnov <smirnov.andrey@gmail.com> *\nAdam Weiner <adamsweiner@gmail.com> *\nDaniel Cannon <daniel@danielcannon.co.uk>\nJohnny Bergström <johnny@joonix.se>\nAdriano Orioli <orioli.adriano@gmail.com> *\nClaudiu Raveica <claudiu.raveica@gmail.com> *\nArtem Chernyshev <artem.0xD2@gmail.com> *\nFerence Fu <fym201@msn.com>\nLOVOO <opensource@lovoo.com>\nnikandfor <nikandfor@gmail.com> *\nAnthony Woods <awoods@raintank.io> *\nAlexander Inozemtsev <alexander.inozemtsev@gmail.com> *\nRob McColl <rob@robmccoll.com>; <rmccoll@ionicsecurity.com> *\nViktor Tönköl <viktor.toenkoel@motionlogic.de> *\nIan Lozinski <ian.lozinski@gmail.com>\nMichael Highstead <highstead@gmail.com> *\nSarah Brown <esbie.is@gmail.com> *\nCaleb Doxsey <caleb@datadoghq.com> *\nFrederic Hemery <frederic.hemery@datadoghq.com> *\nPekka Enberg <penberg@scylladb.com> *\nMark M <m.mim95@gmail.com>\nBartosz Burclaf <burclaf@gmail.com> *\nMarcus King <marcusking01@gmail.com> *\nAndrew de Andrade <andrew@deandrade.com.br>\nRobert Nix <robert@nicerobot.org>\nNathan Youngman <git@nathany.com> *\nCharles Law <charles.law@gmail.com>; <claw@conduce.com> *\nNathan Davies <nathanjamesdavies@gmail.com> *\nBo Blanton <bo.blanton@gmail.com>\nVincent Rischmann <me@vrischmann.me> *\nJesse Claven <jesse.claven@gmail.com> *\nDerrick Wippler <thrawn01@gmail.com>\nLeigh McCulloch <leigh@leighmcculloch.com>\nRon Kuris <swcafe@gmail.com>\nRaphael Gavache <raphael.gavache@gmail.com> *\nYasser Abdolmaleki <yasser@yasser.ca>\nKrishnanand Thommandra <devtkrishna@gmail.com>\nBlake Atkinson <me@blakeatkinson.com>\nDharmendra Parsaila <d4dharmu@gmail.com>\nNayef Ghattas <nayef.ghattas@datadoghq.com> *\nMichał Matczuk <mmatczuk@gmail.com> *\nBen Krebsbach <ben.krebsbach@gmail.com> *\nVivian Mathews <vivian.mathews.3@gmail.com> *\nSascha Steinbiss <satta@debian.org> *\nSeth Rosenblum <seth.t.rosenblum@gmail.com> *\nJavier Zunzunegui <javier.zunzunegui.b@gmail.com>\nLuke Hines <lukehines@protonmail.com> *\nZhixin Wen <john.wenzhixin@hotmail.com> *\nChang Liu <changliu.it@gmail.com>\nIngo Oeser <nightlyone@gmail.com> *\nLuke Hines <lukehines@protonmail.com>\nJacob Greenleaf <jacob@jacobgreenleaf.com>\nAlex Lourie <alex@instaclustr.com>; <djay.il@gmail.com> *\nMarco Cadetg <cadetg@gmail.com> *\nKarl Matthias <karl@matthias.org> *\nThomas Meson <zllak@hycik.org> *\nMartin Sucha <martin.sucha@kiwi.com>; <git@mm.ms47.eu> *\nPavel Buchinchik <p.buchinchik@gmail.com>\nRintaro Okamura <rintaro.okamura@gmail.com> *\nYura Sokolov <y.sokolov@joom.com>; <funny.falcon@gmail.com>\nJorge Bay <jorgebg@apache.org> *\nDmitriy Kozlov <hummerd@mail.ru> *\nAlexey Romanovsky <alexus1024+gocql@gmail.com>\nJaume Marhuenda Beltran <jaumemarhuenda@gmail.com>\nPiotr Dulikowski <piodul@scylladb.com>\nÁrni Dagur <arni@dagur.eu> *\nTushar Das <tushar.das5@gmail.com> *\nMaxim Vladimirskiy <horkhe@gmail.com> *\nBogdan-Ciprian Rusu <bogdanciprian.rusu@crowdstrike.com> *\nYuto Doi <yutodoi.seattle@gmail.com> *\nKrishna Vadali <tejavadali@gmail.com>\nJens-W. Schicke-Uffmann <drahflow@gmx.de> *\nOndrej Polakovič <ondrej.polakovic@kiwi.com> *\nSergei Karetnikov <sergei.karetnikov@gmail.com> *\nStefan Miklosovic <smiklosovic@apache.org> *\nAdam Burk <amburk@gmail.com> *\nValerii Ponomarov <kiparis.kh@gmail.com> *\nNeal Turett <neal.turett@datadoghq.com> *\nDoug Schaapveld <djschaap@gmail.com> *\nSteven Seidman <steven.seidman@datadoghq.com>\nWojciech Przytuła <wojciech.przytula@scylladb.com> *\nJoão Reis <joao.reis@datastax.com> *\nLauro Ramos Venancio <lauro.venancio@incognia.com>\nDmitry Kropachev <dmitry.kropachev@gmail.com>\nOliver Boyle <pleasedontspamme4321+gocql@gmail.com> *\nJackson Fleming <jackson.fleming@instaclustr.com> *\nSylwia Szunejko <sylwia.szunejko@scylladb.com> *\n"
  },
  {
    "path": "README.md",
    "content": "<div align=\"center\">\n\n![Build Passing](https://github.com/scylladb/gocql/workflows/Build/badge.svg)\n[![Read the Fork Driver Docs](https://img.shields.io/badge/Read_the_Docs-pkg_go-blue)](https://pkg.go.dev/github.com/scylladb/gocql#section-documentation)\n[![Protocol Specs](https://img.shields.io/badge/Protocol_Specs-ScyllaDB_Docs-blue)](https://github.com/scylladb/scylladb/blob/master/docs/dev/protocol-extensions.md)\n\n</div>\n\n<h1 align=\"center\">\n\nScylla Shard-Aware Fork of [apache/cassandra-gocql-driver](https://github.com/apache/cassandra-gocql-driver)\n\n</h1>\n\n\n<img src=\"./.github/assets/logo.svg\" width=\"200\" align=\"left\" />\n\nThis is a fork of [apache/cassandra-gocql-driver](https://github.com/apache/cassandra-gocql-driver) package that we created at Scylla.\nIt contains extensions to tokenAwareHostPolicy supported by the Scylla 2.3 and onwards.\nIt allows driver to select a connection to a particular shard on a host based on the token.\nThis eliminates passing data between shards and significantly reduces latency.\n\nThere are open pull requests to merge the functionality to the upstream project:\n\n* [gocql/gocql#1210](https://github.com/gocql/gocql/pull/1210)\n* [gocql/gocql#1211](https://github.com/gocql/gocql/pull/1211).\n\nIt also provides support for shard aware ports, a faster way to connect to all shards, details available in [blogpost](https://www.scylladb.com/2021/04/27/connect-faster-to-scylla-with-a-shard-aware-port/).\n\n---\n\n### Table of Contents\n\n- [1. Sunsetting Model](#1-sunsetting-model)\n- [2. Installation](#2-installation)\n- [3. Quick Start](#3-quick-start)\n- [4. Data Types](#4-data-types)\n- [5. Configuration](#5-configuration)\n  - [5.1 Shard-aware port](#51-shard-aware-port)\n  - [5.2 Client routes (PrivateLink)](#52-client-routes-privatelink)\n  - [5.3 Iterator](#53-iterator)\n- [6. Contributing](#6-contributing)\n\n## 1. Sunsetting Model\n\n> [!WARNING]\n> In general, the gocql team will focus on supporting the current and previous versions of Go. gocql may still work with older versions of Go, but official support for these versions will have been sunset.\n\n## 2. Installation\n\nThis is a drop-in replacement to gocql, it reuses the `github.com/gocql/gocql` import path.\n\nAdd the following line to your project `go.mod` file.\n\n```mod\nreplace github.com/gocql/gocql => github.com/scylladb/gocql latest\n```\n\nand run\n\n```sh\ngo mod tidy\n```\n\nto evaluate `latest` to a concrete tag.\n\nYour project now uses the Scylla driver fork, make sure you are using the `TokenAwareHostPolicy` to enable the shard-awareness, continue reading for details.\n\n## 3. Quick Start\n\nSpawn a ScyllaDB Instance using Docker Run command:\n\n```sh\ndocker run --name node1 --network your-network -p \"9042:9042\" -d scylladb/scylla:6.1.2 \\\n\t--overprovisioned 1 \\\n\t--smp 1\n```\n\nThen, create a new connection using ScyllaDB GoCQL following the example below:\n\n```go\npackage main\n\nimport (\n    \"fmt\"\n    \"github.com/gocql/gocql\"\n)\n\nfunc main() {\n    var cluster = gocql.NewCluster(\"localhost:9042\")\n\n    var session, err = cluster.CreateSession()\n    if err != nil {\n        panic(\"Failed to connect to cluster\")\n    }\n\n    defer session.Close()\n\n    var query = session.Query(\"SELECT * FROM system.clients\")\n\n    if rows, err := query.Iter().SliceMap(); err == nil {\n        for _, row := range rows {\n            fmt.Printf(\"%v\\n\", row)\n        }\n    } else {\n        panic(\"Query error: \" + err.Error())\n    }\n}\n```\n\n`SliceMap()` consumes and closes the iterator before it returns.\n\n## 4. Data Types\n\nHere's an list of all CQL Types reflected in the GoCQL environment:\n\n| ScyllaDB Type    | Go Type            |\n| ---------------- | ------------------ |\n| `ascii`          | `string`           |\n| `bigint`         | `int64`            |\n| `blob`           | `[]byte`           |\n| `boolean`        | `bool`             |\n| `date`           | `time.Time`        |\n| `decimal`        | `inf.Dec`          |\n| `double`         | `float64`          |\n| `duration`       | `gocql.Duration`   |\n| `float`          | `float32`          |\n| `uuid`           | `gocql.UUID`       |\n| `int`            | `int32`            |\n| `inet`           | `string`           |\n| `list<int>`      | `[]int32`          |\n| `map<int, text>` | `map[int32]string` |\n| `set<int>`       | `[]int32`          |\n| `smallint`       | `int16`            |\n| `text`           | `string`           |\n| `time`           | `time.Duration`    |\n| `timestamp`      | `time.Time`        |\n| `timeuuid`       | `gocql.UUID`       |\n| `tinyint`        | `int8`             |\n| `varchar`        | `string`           |\n| `varint`         | `int64`            |\n\n## 5. Configuration\n\nIn order to make shard-awareness work, token aware host selection policy has to be enabled.\nPlease make sure that the gocql configuration has `PoolConfig.HostSelectionPolicy` properly set like in the example below.\n\n__When working with a Scylla cluster, `PoolConfig.NumConns` option has no effect - the driver opens one connection for each shard and completely ignores this option.__\n\n```go\nc := gocql.NewCluster(hosts...)\n\n// Enable token aware host selection policy, if using multi-dc cluster set a local DC.\nfallback := gocql.RoundRobinHostPolicy()\nif localDC != \"\" {\n\tfallback = gocql.DCAwareRoundRobinPolicy(localDC)\n}\nc.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback)\n\n// If using multi-dc cluster use the \"local\" consistency levels.\nif localDC != \"\" {\n\tc.Consistency = gocql.LocalQuorum\n}\n\n// When working with a Scylla cluster the driver always opens one connection per shard, so `NumConns` is ignored.\n// c.NumConns = 4\n```\n\n### 5.1 Shard-aware port\n\nThis version of gocql supports a more robust method of establishing connection for each shard by using _shard aware port_ for native transport.\nIt greatly reduces time and the number of connections needed to establish a connection per shard in some cases - ex. when many clients connect at once, or when there are non-shard-aware clients connected to the same cluster.\n\nIf you are using a custom Dialer and if your nodes expose the shard-aware port, it is highly recommended to update it so that it uses a specific source port when connecting.\n\n* If you are using a custom `net.Dialer`, you can make your dialer honor the source port by wrapping it in a `gocql.ScyllaShardAwareDialer`:\n\n  ```go\n  oldDialer := net.Dialer{...}\n  clusterConfig.Dialer := &gocql.ScyllaShardAwareDialer{oldDialer}\n  ```\n\n* If you are using a custom type implementing `gocql.Dialer`, you can get the source port by using the `gocql.ScyllaGetSourcePort` function.\n  An example:\n\n  ```go\n  func (d *myDialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {\n      sourcePort := gocql.ScyllaGetSourcePort(ctx)\n      localAddr, err := net.ResolveTCPAddr(network, fmt.Sprintf(\":%d\", sourcePort))\n      if err != nil {\n          return nil, err\n      }\n\t  d := &net.Dialer{LocalAddr: localAddr}\n\t  return d.DialContext(ctx, network, addr)\n  }\n  ```\n\n  The source port might be already bound by another connection on your system.\n  In such case, you should return an appropriate error so that the driver can retry with a different port suitable for the shard it tries to connect to.\n\n  * If you are using `net.Dialer.DialContext`, this function will return an error in case the source port is unavailable, and you can just return that error from your custom `Dialer`.\n  * Otherwise, if you detect that the source port is unavailable, you can either return `gocql.ErrScyllaSourcePortAlreadyInUse` or `syscall.EADDRINUSE`.\n\nFor this feature to work correctly, you need to make sure the following conditions are met:\n\n* Your cluster nodes are configured to listen on the shard-aware port (`native_shard_aware_transport_port` option),\n* Your cluster nodes are not behind a NAT which changes source ports,\n* If you have a custom Dialer, it connects from the correct source port (see the guide above).\n\nThe feature is designed to gracefully fall back to the using the non-shard-aware port when it detects that some of the above conditions are not met.\nThe driver will print a warning about misconfigured address translation if it detects it.\nIssues with shard-aware port not being reachable are not reported in non-debug mode, because there is no way to detect it without false positives.\n\nIf you suspect that this feature is causing you problems, you can completely disable it by setting the `ClusterConfig.DisableShardAwarePort` flag to true.\n\n### 5.2 Client routes (PrivateLink)\n\nScylla Cloud exposes a `system.client_routes` table that maps hosts to PrivateLink endpoints.\nWhen configured, the driver can resolve and connect to the per-host PrivateLink address instead of using the public host IP.\n\nUse `WithClientRoutes` to enable it and pass the connection IDs you receive from Scylla Cloud:\n\n```go\ncluster := gocql.NewCluster(\"private-link.dns.name\")\ncluster.WithOptions(\n\tgocql.WithClientRoutes(\n\t\tgocql.WithEndpoints(\n\t\t\tgocql.ClientRoutesEndpoint{ConnectionID: \"your-connection-id\"},\n\t\t),\n\t),\n)\n```\n\nIf you also want to seed the cluster with PrivateLink hostnames, provide `ConnectionAddr` values in the endpoints list.\n\n### 5.3 Iterator\n\nPaging is a way to parse large result sets in smaller chunks.\nThe driver provides an iterator to simplify this process.\n\nUse `Query.Iter()` to obtain iterator:\n\n```go\niter := session.Query(\"SELECT id, value FROM my_table WHERE id > 100 AND id < 10000\").Iter()\nvar results []int\n\nvar id, value int\nfor !iter.Scan(&id, &value) {\n\tif id%2 == 0 {\n\t\tresults = append(results, value)\n\t}\n}\n\nif err := iter.Close(); err != nil {\n    // handle error\n}\n```\n\nIn case of range and `ALLOW FILTERING` queries server can send empty responses for some pages.\nThat is why you should never consider empty response as the end of the result set.\nAlways check `iter.Scan()` result to know if there are more results, or `Iter.LastPage()` to know if the last page was reached.\n\n### 5.3 Compression\n\nTo control network costs and traffic, you can enable compression.\n\nUse `ClusterConfig.Compressor` to enable compression (either Snappy or LZ4):\n\n```go\n...\nimport (\n    ...\n    \"github.com/gocql/gocql\"\n    \"github.com/gocql/gocql/lz4\"\n    ...\n)\n\nconfig := gocql.NewCluster(\"10.0.12.83\", \"10.0.13.04\", \"10.0.14.12\")\nconfig.Compressor = &gocql.SnappyCompressor{}\n//or LZ4\nconfig.Compressor = &lz4.LZ4Compressor{}\n...\n```\n\n## 6. Contributing\n\nIf you have any interest to be contributing in this GoCQL Fork, please read the [CONTRIBUTING.md](CONTRIBUTING.md) before initialize any Issue or Pull Request.\n"
  },
  {
    "path": "address_translators.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport \"net\"\n\n// AddressTranslator provides a way to translate node addresses (and ports) that are\n// discovered or received as a node event. This can be useful in an ec2 environment,\n// for instance, to translate public IPs to private IPs.\ntype AddressTranslator interface {\n\t// Translate will translate the provided address and/or port to another\n\t// address and/or port. If no translation is possible, Translate will return the\n\t// address and port provided to it.\n\tTranslate(addr net.IP, port int) (net.IP, int)\n}\n\ntype AddressTranslatorFunc func(addr net.IP, port int) (net.IP, int)\n\nfunc (fn AddressTranslatorFunc) Translate(addr net.IP, port int) (net.IP, int) {\n\treturn fn(addr, port)\n}\n\n// IdentityTranslator will do nothing but return what it was provided. It is essentially a no-op.\nfunc IdentityTranslator() AddressTranslator {\n\treturn AddressTranslatorFunc(func(addr net.IP, port int) (net.IP, int) {\n\t\treturn addr, port\n\t})\n}\n\ntype AddressTranslatorHostInfo interface {\n\tHostID() string\n\tRack() string\n\tDataCenter() string\n\tBroadcastAddress() net.IP\n\tListenAddress() net.IP\n\tRPCAddress() net.IP\n\tPreferredIP() net.IP\n\tPeer() net.IP\n\tUntranslatedConnectAddress() net.IP\n\tPort() int\n\tPartitioner() string\n\tClusterName() string\n\tScyllaShardAwarePort() uint16\n\tScyllaShardAwarePortTLS() uint16\n\tScyllaShardCount() int\n}\n\n// AddressTranslatorV2 provides a way to translate node addresses (and ports) that are\n// discovered or received as a node event. This can be useful in an ec2 environment,\n// for instance, to translate public IPs to private IPs.\ntype AddressTranslatorV2 interface {\n\tAddressTranslator\n\tTranslateHost(host AddressTranslatorHostInfo, addr AddressPort) (AddressPort, error)\n}\n\ntype AddressTranslatorFuncV2 func(hostID string, addr AddressPort) AddressPort\n\nfunc (fn AddressTranslatorFuncV2) Translate(addr net.IP, port int) (net.IP, int) {\n\tres := fn(\"\", AddressPort{\n\t\tAddress: addr,\n\t\tPort:    uint16(port),\n\t})\n\treturn res.Address, int(res.Port)\n}\n\nfunc (fn AddressTranslatorFuncV2) TranslateHost(host AddressTranslatorHostInfo, addr AddressPort) (AddressPort, error) {\n\treturn fn(host.HostID(), addr), nil\n}\n\nvar _ AddressTranslatorV2 = AddressTranslatorFuncV2(nil)\n"
  },
  {
    "path": "address_translators_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n)\n\nfunc TestIdentityAddressTranslator_NilAddrAndZeroPort(t *testing.T) {\n\tt.Parallel()\n\n\tvar tr AddressTranslator = IdentityTranslator()\n\thostIP := net.ParseIP(\"\")\n\tif hostIP != nil {\n\t\tt.Errorf(\"expected host ip to be (nil) but was (%+v) instead\", hostIP)\n\t}\n\n\taddr, port := tr.Translate(hostIP, 0)\n\tif addr != nil {\n\t\tt.Errorf(\"expected translated host to be (nil) but was (%+v) instead\", addr)\n\t}\n\ttests.AssertEqual(t, \"translated port\", 0, port)\n}\n\nfunc TestIdentityAddressTranslator_HostProvided(t *testing.T) {\n\tt.Parallel()\n\n\tvar tr AddressTranslator = IdentityTranslator()\n\thostIP := net.ParseIP(\"10.1.2.3\")\n\tif hostIP == nil {\n\t\tt.Error(\"expected host ip not to be (nil)\")\n\t}\n\n\taddr, port := tr.Translate(hostIP, 9042)\n\tif !hostIP.Equal(addr) {\n\t\tt.Errorf(\"expected translated addr to be (%+v) but was (%+v) instead\", hostIP, addr)\n\t}\n\ttests.AssertEqual(t, \"translated port\", 9042, port)\n}\n\nfunc TestTranslateHostAddresses_NoScyllaPorts(t *testing.T) {\n\tt.Parallel()\n\n\ttranslator := AddressTranslatorFunc(func(addr net.IP, port int) (net.IP, int) {\n\t\treturn net.ParseIP(\"10.10.10.10\"), 9142\n\t})\n\thost := HostInfoBuilder{\n\t\tConnectAddress: net.ParseIP(\"10.0.0.1\"),\n\t\tPort:           9042,\n\t}.Build()\n\n\ttranslated, err := translateHostAddresses(translator, &host, nil)\n\n\ttests.AssertNil(t, \"should return no error\", err)\n\ttests.AssertTrue(t, \"translated CQL address\", net.ParseIP(\"10.10.10.10\").Equal(translated.CQL.Address))\n\ttests.AssertEqual(t, \"translated CQL port\", uint16(9142), translated.CQL.Port)\n\ttests.AssertTrue(t, \"shard aware empty address\", len(translated.ShardAware.Address) == 0)\n\ttests.AssertEqual(t, \"shard aware empty port\", uint16(0), translated.ShardAware.Port)\n\ttests.AssertTrue(t, \"shard aware tls empty address\", len(translated.ShardAwareTLS.Address) == 0)\n\ttests.AssertEqual(t, \"shard aware tls empty port\", uint16(0), translated.ShardAwareTLS.Port)\n}\n\nfunc TestTranslateHostAddresses_WithScyllaPorts(t *testing.T) {\n\tt.Parallel()\n\n\ttranslatedIP := net.ParseIP(\"192.0.2.10\")\n\ttranslator := AddressTranslatorFuncV2(func(hostID string, addr AddressPort) AddressPort {\n\t\tif hostID != \"a0000000-0000-0000-0000-000000000001\" {\n\t\t\tt.Errorf(\"expected host id %q, got %q\", \"a0000000-0000-0000-0000-000000000001\", hostID)\n\t\t}\n\t\treturn AddressPort{\n\t\t\tAddress: translatedIP,\n\t\t\tPort:    addr.Port + 1,\n\t\t}\n\t})\n\thost := HostInfoBuilder{\n\t\tConnectAddress: net.ParseIP(\"10.0.0.1\"),\n\t\tPort:           9042,\n\t\tHostId:         \"a0000000-0000-0000-0000-000000000001\",\n\t}.Build()\n\thost.setScyllaFeatures(ScyllaHostFeatures{\n\t\tshardAwarePort:    19042,\n\t\tshardAwarePortTLS: 19043,\n\t})\n\n\ttranslated, err := translateHostAddresses(translator, &host, nil)\n\n\ttests.AssertNil(t, \"should return no error\", err)\n\ttests.AssertTrue(t, \"translated CQL address\", translatedIP.Equal(translated.CQL.Address))\n\ttests.AssertEqual(t, \"translated CQL port\", uint16(9043), translated.CQL.Port)\n\ttests.AssertTrue(t, \"translated shard aware address\", translatedIP.Equal(translated.ShardAware.Address))\n\ttests.AssertEqual(t, \"translated shard aware port\", uint16(19043), translated.ShardAware.Port)\n\ttests.AssertTrue(t, \"translated shard aware tls address\", translatedIP.Equal(translated.ShardAwareTLS.Address))\n\ttests.AssertEqual(t, \"translated shard aware tls port\", uint16(19044), translated.ShardAwareTLS.Port)\n}\n"
  },
  {
    "path": "batch_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestBatch_Errors(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key, val inet)`, table)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tb := session.Batch(LoggedBatch)\n\tb = b.Query(fmt.Sprintf(\"SELECT * FROM gocql_test.%s WHERE id=2 AND val=?\", table), nil)\n\tif err := b.Exec(); err == nil {\n\t\tt.Fatal(\"expected to get error for invalid query in batch\")\n\t}\n}\n\nfunc TestBatch_WithTimestamp(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key, val text)`, table)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmicros := time.Now().UnixNano()/1e3 - 1000\n\n\tb := session.Batch(LoggedBatch)\n\tb.WithTimestamp(micros)\n\tb = b.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s (id, val) VALUES (?, ?)\", table), 1, \"val\")\n\tb = b.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s (id, val) VALUES (?, ?)\", table), 2, \"val\")\n\n\tif err := b.Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar storedTs int64\n\tif err := session.Query(fmt.Sprintf(`SELECT writetime(val) FROM gocql_test.%s WHERE id = ?`, table), 1).Scan(&storedTs); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif storedTs != micros {\n\t\tt.Errorf(\"got ts %d, expected %d\", storedTs, micros)\n\t}\n}\n"
  },
  {
    "path": "callreq_wait.go",
    "content": "//go:build !race\n\npackage gocql\n\nfunc waitCallReqDone(call *callReq, where string) {\n\tcall.done.Wait()\n}\n"
  },
  {
    "path": "callreq_wait_race.go",
    "content": "//go:build race\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\nfunc waitCallReqDone(call *callReq, where string) {\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tcall.done.Wait()\n\t\tclose(done)\n\t}()\n\n\ttimer := time.NewTimer(2 * time.Second)\n\tdefer timer.Stop()\n\n\tselect {\n\tcase <-done:\n\tcase <-timer.C:\n\t\tpanic(fmt.Sprintf(\"gocql: timed out waiting for exec cleanup in %s (stream=%d)\", where, call.streamID))\n\t}\n}\n"
  },
  {
    "path": "cass1batch_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestProto1BatchInsert(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int primary key)\", table)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbegin := \"BEGIN BATCH\"\n\tend := \"APPLY BATCH\"\n\tquery := fmt.Sprintf(\"INSERT INTO %s (id) VALUES (?)\", table)\n\tfullQuery := strings.Join([]string{begin, query, end}, \"\\n\")\n\targs := []any{5}\n\tif err := session.Query(fullQuery, args...).Consistency(Quorum).Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestShouldPrepareFunction(t *testing.T) {\n\tt.Parallel()\n\n\tvar shouldPrepareTests = []struct {\n\t\tStmt   string\n\t\tResult bool\n\t}{\n\t\t{`\n      BEGIN BATCH\n        INSERT INTO users (userID, password)\n        VALUES ('smith', 'secret')\n      APPLY BATCH\n    ;\n      `, true},\n\t\t{`INSERT INTO users (userID, password, name) VALUES ('user2', 'ch@ngem3b', 'second user')`, true},\n\t\t{`BEGIN COUNTER BATCH UPDATE stats SET views = views + 1 WHERE pageid = 1 APPLY BATCH`, true},\n\t\t{`delete name from users where userID = 'smith';`, true},\n\t\t{`  UPDATE users SET password = 'secret' WHERE userID = 'smith'   `, true},\n\t\t{`CREATE TABLE users (\n        user_name varchar PRIMARY KEY,\n        password varchar,\n        gender varchar,\n        session_token varchar,\n        state varchar,\n        birth_year bigint\n      );`, false},\n\t}\n\n\tfor _, test := range shouldPrepareTests {\n\t\tq := &Query{stmt: test.Stmt, routingInfo: &queryRoutingInfo{}}\n\t\tif got := q.shouldPrepare(); got != test.Result {\n\t\t\tt.Fatalf(\"%q: got %v, expected %v\\n\", test.Stmt, got, test.Result)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "cassandra_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"net\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\t\"unicode\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n\t\"github.com/gocql/gocql/internal/tests\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"gopkg.in/inf.v0\"\n)\n\nfunc TestEmptyHosts(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\tcluster.Hosts = nil\n\tif session, err := cluster.CreateSession(); err == nil {\n\t\tsession.Close()\n\t\tt.Error(\"expected err, got nil\")\n\t}\n}\n\nfunc TestInvalidPeerEntry(t *testing.T) {\n\tt.Parallel()\n\n\tt.Skip(\"dont mutate system tables, rewrite this to test what we mean to test\")\n\tsession := createSession(t)\n\n\t// rack, release_version, schema_version, tokens are all null\n\tquery := session.Query(\"INSERT into system.peers (peer, data_center, host_id, rpc_address) VALUES (?, ?, ?, ?)\",\n\t\t\"169.254.235.45\",\n\t\t\"datacenter1\",\n\t\t\"35c0ec48-5109-40fd-9281-9e9d4add2f1e\",\n\t\t\"169.254.235.45\",\n\t)\n\n\tif err := query.Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tsession.Close()\n\n\tcluster := createCluster()\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())\n\tsession = createSessionFromCluster(cluster, t)\n\tdefer func() {\n\t\tsession.Query(\"DELETE from system.peers where peer = ?\", \"169.254.235.45\").Exec()\n\t\tsession.Close()\n\t}()\n\n\t// check we can perform a query\n\titer := session.Query(\"select peer from system.peers\").Iter()\n\tvar peer string\n\tfor iter.Scan(&peer) {\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n// TestUseStatementError checks to make sure the correct error is returned when the user tries to execute a use statement.\nfunc TestUseStatementError(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif err := session.Query(\"USE gocql_test\").Exec(); err != nil {\n\t\tif err != ErrUseStmt {\n\t\t\tt.Fatalf(\"expected ErrUseStmt, got: %v\", err)\n\t\t}\n\t} else {\n\t\tt.Fatal(\"expected err, got nil.\")\n\t}\n}\n\n// TestInvalidKeyspace checks that an invalid keyspace will return promptly and without a flood of connections\nfunc TestInvalidKeyspace(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\tcluster.Keyspace = \"invalidKeyspace\"\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tif err != ErrNoConnectionsStarted {\n\t\t\tt.Fatalf(\"Expected ErrNoConnections but got %v\", err)\n\t\t}\n\t} else {\n\t\tsession.Close() //Clean up the session\n\t\tt.Fatal(\"expected err, got nil.\")\n\t}\n}\n\nfunc TestTracing(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key)`, table)); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\n\tbuf := &bytes.Buffer{}\n\ttrace := &TraceWriter{session: session, w: buf}\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id) VALUES (?)`, table), 42).Trace(trace).Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if buf.Len() == 0 {\n\t\tt.Fatal(\"insert: failed to obtain any tracing\")\n\t}\n\ttrace.mu.Lock()\n\tbuf.Reset()\n\ttrace.mu.Unlock()\n\n\tvar value int\n\tif err := session.Query(fmt.Sprintf(`SELECT id FROM %s WHERE id = ?`, table), 42).Trace(trace).Scan(&value); err != nil {\n\t\tt.Fatal(\"select:\", err)\n\t} else if value != 42 {\n\t\tt.Fatalf(\"value: expected %d, got %d\", 42, value)\n\t} else if buf.Len() == 0 {\n\t\tt.Fatal(\"select: failed to obtain any tracing\")\n\t}\n\n\t// also works from session tracer\n\tsession.SetTrace(trace)\n\ttrace.mu.Lock()\n\tbuf.Reset()\n\ttrace.mu.Unlock()\n\tif err := session.Query(fmt.Sprintf(`SELECT id FROM %s WHERE id = ?`, table), 42).Scan(&value); err != nil {\n\t\tt.Fatal(\"select:\", err)\n\t}\n\tif buf.Len() == 0 {\n\t\tt.Fatal(\"select: failed to obtain any tracing\")\n\t}\n}\n\nfunc TestObserve(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key)`, table)); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\n\tvar (\n\t\tobservedErr      error\n\t\tobservedKeyspace string\n\t\tobservedStmt     string\n\t)\n\n\tconst keyspace = \"gocql_test\"\n\n\tresetObserved := func() {\n\t\tobservedErr = errors.New(\"placeholder only\") // used to distinguish err=nil cases\n\t\tobservedKeyspace = \"\"\n\t\tobservedStmt = \"\"\n\t}\n\n\tobserver := funcQueryObserver(func(ctx context.Context, o ObservedQuery) {\n\t\tobservedKeyspace = o.Keyspace\n\t\tobservedStmt = o.Statement\n\t\tobservedErr = o.Err\n\t})\n\n\t// select before inserted, will error but the reporting is err=nil as the query is valid\n\tresetObserved()\n\tvar value int\n\tif err := session.Query(fmt.Sprintf(`SELECT id FROM %s WHERE id = ?`, table), 43).Observer(observer).Scan(&value); err == nil {\n\t\tt.Fatal(\"select: expected error\")\n\t} else if observedErr != nil {\n\t\tt.Fatalf(\"select: observed error expected nil, got %q\", observedErr)\n\t} else if observedKeyspace != keyspace {\n\t\tt.Fatal(\"select: unexpected observed keyspace\", observedKeyspace)\n\t} else if observedStmt != fmt.Sprintf(`SELECT id FROM %s WHERE id = ?`, table) {\n\t\tt.Fatal(\"select: unexpected observed stmt\", observedStmt)\n\t}\n\n\tresetObserved()\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id) VALUES (?)`, table), 42).Observer(observer).Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if observedErr != nil {\n\t\tt.Fatal(\"insert:\", observedErr)\n\t} else if observedKeyspace != keyspace {\n\t\tt.Fatal(\"insert: unexpected observed keyspace\", observedKeyspace)\n\t} else if observedStmt != fmt.Sprintf(`INSERT INTO %s (id) VALUES (?)`, table) {\n\t\tt.Fatal(\"insert: unexpected observed stmt\", observedStmt)\n\t}\n\n\tresetObserved()\n\tvalue = 0\n\tif err := session.Query(fmt.Sprintf(`SELECT id FROM %s WHERE id = ?`, table), 42).Observer(observer).Scan(&value); err != nil {\n\t\tt.Fatal(\"select:\", err)\n\t} else if value != 42 {\n\t\tt.Fatalf(\"value: expected %d, got %d\", 42, value)\n\t} else if observedErr != nil {\n\t\tt.Fatal(\"select:\", observedErr)\n\t} else if observedKeyspace != keyspace {\n\t\tt.Fatal(\"select: unexpected observed keyspace\", observedKeyspace)\n\t} else if observedStmt != fmt.Sprintf(`SELECT id FROM %s WHERE id = ?`, table) {\n\t\tt.Fatal(\"select: unexpected observed stmt\", observedStmt)\n\t}\n\n\t// also works from session observer\n\tresetObserved()\n\toSession := createSession(t, func(config *ClusterConfig) { config.QueryObserver = observer })\n\tif err := oSession.Query(fmt.Sprintf(`SELECT id FROM %s WHERE id = ?`, table), 42).Scan(&value); err != nil {\n\t\tt.Fatal(\"select:\", err)\n\t} else if observedErr != nil {\n\t\tt.Fatal(\"select:\", err)\n\t} else if observedKeyspace != keyspace {\n\t\tt.Fatal(\"select: unexpected observed keyspace\", observedKeyspace)\n\t} else if observedStmt != fmt.Sprintf(`SELECT id FROM %s WHERE id = ?`, table) {\n\t\tt.Fatal(\"select: unexpected observed stmt\", observedStmt)\n\t}\n\n\t// reports errors when the query is poorly formed\n\tresetObserved()\n\tvalue = 0\n\tif err := session.Query(`SELECT id FROM unknown_table WHERE id = ?`, 42).Observer(observer).Scan(&value); err == nil {\n\t\tt.Fatal(\"select: expecting error\")\n\t} else if observedErr == nil {\n\t\tt.Fatal(\"select: expecting observed error\")\n\t} else if observedKeyspace != keyspace {\n\t\tt.Fatal(\"select: unexpected observed keyspace\", observedKeyspace)\n\t} else if observedStmt != `SELECT id FROM unknown_table WHERE id = ?` {\n\t\tt.Fatal(\"select: unexpected observed stmt\", observedStmt)\n\t}\n}\n\nfunc TestObserve_Pagination(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int, PRIMARY KEY (id))`, table)); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\n\tvar observedRows int\n\n\tresetObserved := func() {\n\t\tobservedRows = -1\n\t}\n\n\tobserver := funcQueryObserver(func(ctx context.Context, o ObservedQuery) {\n\t\tobservedRows = o.Rows\n\t})\n\n\t// insert 100 entries, relevant for pagination\n\tfor i := 0; i < 50; i++ {\n\t\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id) VALUES (?)`, table), i).Exec(); err != nil {\n\t\t\tt.Fatal(\"insert:\", err)\n\t\t}\n\t}\n\n\tresetObserved()\n\n\t// read the 100 entries in paginated entries of size 10. Expecting 5 observations, each with 10 rows\n\tscanner := session.Query(fmt.Sprintf(`SELECT id FROM %s LIMIT 100`, table)).\n\t\tObserver(observer).\n\t\tPageSize(10).\n\t\tIter().Scanner()\n\tfor i := 0; i < 50; i++ {\n\t\tif !scanner.Next() {\n\t\t\tt.Fatalf(\"next: should still be true: %d: %v\", i, scanner.Err())\n\t\t}\n\t\tif i%10 == 0 {\n\t\t\tif observedRows != 10 {\n\t\t\t\tt.Fatalf(\"next: expecting a paginated query with 10 entries, got: %d (%d)\", observedRows, i)\n\t\t\t}\n\t\t} else if observedRows != -1 {\n\t\t\tt.Fatalf(\"next: not expecting paginated query (-1 entries), got: %d\", observedRows)\n\t\t}\n\n\t\tresetObserved()\n\t}\n\n\tif scanner.Next() {\n\t\tt.Fatal(\"next: no more entries where expected\")\n\t}\n}\n\nfunc TestPaging(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int primary key)\", table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\tfor i := 0; i < 100; i++ {\n\t\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id) VALUES (?)\", table), i).Exec(); err != nil {\n\t\t\tt.Fatal(\"insert:\", err)\n\t\t}\n\t}\n\n\titer := session.Query(fmt.Sprintf(\"SELECT id FROM %s\", table)).PageSize(10).Iter()\n\tvar id int\n\tcount := 0\n\tfor iter.Scan(&id) {\n\t\tcount++\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatal(\"close:\", err)\n\t}\n\tif count != 100 {\n\t\tt.Fatalf(\"expected %d, got %d\", 100, count)\n\t}\n}\n\nfunc TestPagingWithAllowFiltering(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\n\ttable := testTableName(t)\n\n\tt.Cleanup(func() {\n\t\tif err := session.Query(fmt.Sprintf(\"DROP TABLE gocql_test.%s\", table)).Exec(); err != nil {\n\t\t\tt.Fatal(\"drop table:\", err)\n\t\t}\n\t\tsession.Close()\n\t})\n\n\tconst (\n\t\ttargetP1             = 50\n\t\ttargetP2             = 50\n\t\ttotalExpectedResults = 30\n\t\tpageSize             = 5\n\t\tdeletedRageStart     = 10\n\t\tdeletedRageEnd       = 20\n\t\t// Some record range is being deleted, to test tombstones appearance\n\t\texpectedCount = totalExpectedResults - (deletedRageEnd - deletedRageStart)\n\t)\n\n\tpaginatedSelect := fmt.Sprintf(\"SELECT c1, f1 FROM gocql_test.%s WHERE p1 = %d AND p2 = %d AND f1 < %d ALLOW FILTERING;\", table, targetP1, targetP2, totalExpectedResults)\n\tvalidateResult := func(t *testing.T, results []int) {\n\t\tif len(results) != expectedCount {\n\t\t\tt.Fatalf(\"expected %d got %d: %d\", expectedCount, len(results), results)\n\t\t}\n\n\t\tsort.Ints(results)\n\n\t\texpect := make([]int, 0, expectedCount)\n\t\tfor i := 0; i < totalExpectedResults; i++ {\n\t\t\tif i >= deletedRageStart && i < deletedRageEnd {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\texpect = append(expect, i)\n\t\t}\n\n\t\tif !reflect.DeepEqual(results, expect) {\n\t\t\tt.Fatalf(\"expected %v\\ngot %v\", expect, results)\n\t\t}\n\t}\n\n\tt.Run(\"Prepare\", func(t *testing.T) {\n\t\tif err := createTable(session,\n\t\t\tfmt.Sprintf(\"CREATE TABLE gocql_test.%s (p1 int, p2 int, c1 int, f1 int, \"+\n\t\t\t\t\"PRIMARY KEY ((p1, p2), c1)) WITH CLUSTERING ORDER BY (c1 DESC)\", table)); err != nil {\n\t\t\tt.Fatal(\"create table:\", err)\n\t\t}\n\n\t\t// Insert extra records\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tif err := session.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s (p1,p2,c1,f1) VALUES (?,?,?,?)\", table), i, i, i, i).Exec(); err != nil {\n\t\t\t\tt.Fatal(\"insert:\", err)\n\t\t\t}\n\t\t}\n\n\t\t// Insert records to a target partition\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tif err := session.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s (p1,p2,c1,f1) VALUES (?,?,?,?)\", table), targetP1, targetP2, i, i).Exec(); err != nil {\n\t\t\t\tt.Fatal(\"insert:\", err)\n\t\t\t}\n\t\t}\n\n\t\tif err := session.Query(fmt.Sprintf(\"DELETE FROM gocql_test.%s WHERE p1 = ? AND p2 = ? AND c1 >= ? AND c1 < ?\", table), targetP1, targetP2, deletedRageStart, deletedRageEnd).Exec(); err != nil {\n\t\t\tt.Fatal(\"insert:\", err)\n\t\t}\n\t})\n\n\tt.Run(\"AutoPagination\", func(t *testing.T) {\n\t\tfor _, c := range []Consistency{One, Quorum} {\n\t\t\tt.Run(c.String(), func(t *testing.T) {\n\t\t\t\titer := session.Query(paginatedSelect).Consistency(c).PageSize(pageSize).Iter()\n\n\t\t\t\tvar c1, f1 int\n\t\t\t\tvar results []int\n\n\t\t\t\tfor iter.Scan(&c1, &f1) {\n\t\t\t\t\tif c1 != f1 {\n\t\t\t\t\t\tt.Fatalf(\"expected c1 and f1 values to be the same, but got c1=%d f1=%d\", c1, f1)\n\t\t\t\t\t}\n\t\t\t\t\tresults = append(results, f1)\n\t\t\t\t}\n\t\t\t\tif err := iter.Close(); err != nil {\n\t\t\t\t\tt.Fatal(\"select:\", err.Error())\n\t\t\t\t}\n\t\t\t\tvalidateResult(t, results)\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"ManualPagination\", func(t *testing.T) {\n\t\tfor _, c := range []Consistency{One, Quorum} {\n\t\t\tt.Run(c.String(), func(t *testing.T) {\n\n\t\t\t\tvar c1, f1 int\n\t\t\t\tvar results []int\n\t\t\t\tvar currentPageState []byte\n\n\t\t\t\tqry := session.Query(paginatedSelect).Consistency(c).PageSize(pageSize)\n\n\t\t\t\tfor {\n\t\t\t\t\titer := qry.PageState(currentPageState).Iter()\n\n\t\t\t\t\t// Here we make sure that all iterator, but last one have some data in it\n\t\t\t\t\tif !iter.LastPage() && iter.NumRows() == 0 {\n\t\t\t\t\t\tt.Errorf(\"expected at least one row, but got 0\")\n\t\t\t\t\t}\n\t\t\t\t\tfor iter.Scan(&c1, &f1) {\n\t\t\t\t\t\tif c1 != f1 {\n\t\t\t\t\t\t\tt.Fatalf(\"expected c1 and f1 values to be the same, but got c1=%d f1=%d\", c1, f1)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tresults = append(results, f1)\n\t\t\t\t\t}\n\t\t\t\t\tif err := iter.Close(); err != nil {\n\t\t\t\t\t\tt.Fatal(\"select:\", err.Error())\n\t\t\t\t\t}\n\t\t\t\t\tif iter.LastPage() {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tnewPageState := iter.PageState()\n\t\t\t\t\tif len(currentPageState) == len(newPageState) && bytes.Compare(newPageState, currentPageState) == 0 {\n\t\t\t\t\t\tt.Fatalf(\"page state did not change\")\n\t\t\t\t\t}\n\t\t\t\t\tcurrentPageState = newPageState\n\t\t\t\t}\n\n\t\t\t\tvalidateResult(t, results)\n\t\t\t})\n\t\t}\n\t})\n\n}\n\nfunc TestPagingWithBind(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, val int, primary key(id,val))\", table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\tfor i := 0; i < 100; i++ {\n\t\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id,val) VALUES (?,?)\", table), 1, i).Exec(); err != nil {\n\t\t\tt.Fatal(\"insert:\", err)\n\t\t}\n\t}\n\n\tq := session.Query(fmt.Sprintf(\"SELECT val FROM %s WHERE id = ? AND val < ?\", table), 1, 50).PageSize(10)\n\titer := q.Iter()\n\tvar id int\n\tcount := 0\n\tfor iter.Scan(&id) {\n\t\tcount++\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatal(\"close:\", err)\n\t}\n\tif count != 50 {\n\t\tt.Fatalf(\"expected %d, got %d\", 50, count)\n\t}\n\n\titer = q.Bind(1, 20).Iter()\n\tcount = 0\n\tfor iter.Scan(&id) {\n\t\tcount++\n\t}\n\tif count != 20 {\n\t\tt.Fatalf(\"expected %d, got %d\", 20, count)\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatal(\"close:\", err)\n\t}\n}\n\nfunc TestCAS(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\tcluster.SerialConsistency = LocalSerial\n\tsession := createSessionFromClusterTabletsDisabled(cluster, t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE %s (\n\t\t\ttitle         varchar,\n\t\t\trevid   \t  timeuuid,\n\t\t\tlast_modified timestamp,\n\t\t\tPRIMARY KEY (title, revid)\n\t\t)`, table)); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\n\ttitle, revid, modified := \"baz\", TimeUUID(), time.Now()\n\tvar titleCAS string\n\tvar revidCAS UUID\n\tvar modifiedCAS time.Time\n\n\tif applied, err := session.Query(fmt.Sprintf(`INSERT INTO %s (title, revid, last_modified)\n\t\tVALUES (?, ?, ?) IF NOT EXISTS`,\n\t\ttable), title, revid, modified).ScanCAS(&titleCAS, &revidCAS, &modifiedCAS); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if !applied {\n\t\tt.Fatal(\"insert should have been applied\")\n\t}\n\n\tif applied, err := session.Query(fmt.Sprintf(`INSERT INTO %s (title, revid, last_modified)\n\t\tVALUES (?, ?, ?) IF NOT EXISTS`,\n\t\ttable), title, revid, modified).ScanCAS(&titleCAS, &revidCAS, &modifiedCAS); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if applied {\n\t\tt.Fatal(\"insert should not have been applied\")\n\t} else if title != titleCAS || revid != revidCAS {\n\t\tt.Fatalf(\"expected %s/%v/%v but got %s/%v/%v\", title, revid, modified, titleCAS, revidCAS, modifiedCAS)\n\t}\n\n\ttenSecondsLater := modified.Add(10 * time.Second)\n\n\tif applied, err := session.Query(fmt.Sprintf(`DELETE FROM %s WHERE title = ? and revid = ? IF last_modified = ?`,\n\t\ttable), title, revid, tenSecondsLater).ScanCAS(&modifiedCAS); err != nil {\n\t\tt.Fatal(\"delete:\", err)\n\t} else if applied {\n\t\tt.Fatal(\"delete should have not been applied\")\n\t}\n\n\tif modifiedCAS.Unix() != tenSecondsLater.Add(-10*time.Second).Unix() {\n\t\tt.Fatalf(\"Was expecting modified CAS to be %v; but was one second later\", modifiedCAS.UTC())\n\t}\n\n\tif _, err := session.Query(fmt.Sprintf(`DELETE FROM %s WHERE title = ? and revid = ? IF last_modified = ?`,\n\t\ttable), title, revid, tenSecondsLater).ScanCAS(); !strings.HasPrefix(err.Error(), \"gocql: not enough columns to scan into\") {\n\t\tt.Fatalf(\"delete: was expecting count mismatch error but got: %q\", err.Error())\n\t}\n\n\tif applied, err := session.Query(fmt.Sprintf(`DELETE FROM %s WHERE title = ? and revid = ? IF last_modified = ?`,\n\t\ttable), title, revid, modified).ScanCAS(&modifiedCAS); err != nil {\n\t\tt.Fatal(\"delete:\", err)\n\t} else if !applied {\n\t\tt.Fatal(\"delete should have been applied\")\n\t}\n\n\tif err := session.Query(fmt.Sprintf(`TRUNCATE %s`, table)).Exec(); err != nil {\n\t\tt.Fatal(\"truncate:\", err)\n\t}\n\n\tsuccessBatch := session.Batch(LoggedBatch)\n\tsuccessBatch.Query(fmt.Sprintf(\"INSERT INTO %s (title, revid, last_modified) VALUES (?, ?, ?) IF NOT EXISTS\", table), title, revid, modified)\n\tif applied, _, err := session.ExecuteBatchCAS(successBatch, &titleCAS, &revidCAS, &modifiedCAS); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if !applied {\n\t\tt.Fatalf(\"insert should have been applied: title=%v revID=%v modified=%v\", titleCAS, revidCAS, modifiedCAS)\n\t}\n\n\tsuccessBatch = session.Batch(LoggedBatch)\n\tsuccessBatch.Query(fmt.Sprintf(\"INSERT INTO %s (title, revid, last_modified) VALUES (?, ?, ?) IF NOT EXISTS\", table), title+\"_foo\", revid, modified)\n\tcasMap := make(map[string]any)\n\tif applied, _, err := session.MapExecuteBatchCAS(successBatch, casMap); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if !applied {\n\t\tt.Fatal(\"insert should have been applied\")\n\t}\n\n\tfailBatch := session.Batch(LoggedBatch)\n\tfailBatch.Query(fmt.Sprintf(\"INSERT INTO %s (title, revid, last_modified) VALUES (?, ?, ?) IF NOT EXISTS\", table), title, revid, modified)\n\tif applied, _, err := session.ExecuteBatchCAS(successBatch, &titleCAS, &revidCAS, &modifiedCAS); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if applied {\n\t\tt.Fatalf(\"insert should have not been applied: title=%v revID=%v modified=%v\", titleCAS, revidCAS, modifiedCAS)\n\t}\n\n\tinsertBatch := session.Batch(LoggedBatch)\n\tif *flagDistribution == \"cassandra\" && flagCassVersion.AtLeast(4, 1, 0) {\n\t\tinsertBatch.Query(fmt.Sprintf(\"INSERT INTO %s (title, revid, last_modified) VALUES ('_foo', 2c3af400-73a4-11e5-9381-29463d90c3f0, toTimestamp(NOW()))\", table))\n\t\tinsertBatch.Query(fmt.Sprintf(\"INSERT INTO %s (title, revid, last_modified) VALUES ('_foo', 3e4ad2f1-73a4-11e5-9381-29463d90c3f0, toTimestamp(NOW()))\", table))\n\t} else {\n\t\tinsertBatch.Query(fmt.Sprintf(\"INSERT INTO %s (title, revid, last_modified) VALUES ('_foo', 2c3af400-73a4-11e5-9381-29463d90c3f0, DATEOF(NOW()))\", table))\n\t\tinsertBatch.Query(fmt.Sprintf(\"INSERT INTO %s (title, revid, last_modified) VALUES ('_foo', 3e4ad2f1-73a4-11e5-9381-29463d90c3f0, DATEOF(NOW()))\", table))\n\t}\n\tif err := session.ExecuteBatch(insertBatch); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t}\n\n\tfailBatch = session.Batch(LoggedBatch)\n\tif *flagDistribution == \"cassandra\" && flagCassVersion.AtLeast(4, 1, 0) {\n\t\tfailBatch.Query(fmt.Sprintf(\"UPDATE %s SET last_modified = toTimestamp(NOW()) WHERE title='_foo' AND revid=2c3af400-73a4-11e5-9381-29463d90c3f0 IF last_modified=toTimestamp(NOW());\", table))\n\t\tfailBatch.Query(fmt.Sprintf(\"UPDATE %s SET last_modified = toTimestamp(NOW()) WHERE title='_foo' AND revid=3e4ad2f1-73a4-11e5-9381-29463d90c3f0 IF last_modified=toTimestamp(NOW());\", table))\n\t} else {\n\t\tfailBatch.Query(fmt.Sprintf(\"UPDATE %s SET last_modified = DATEOF(NOW()) WHERE title='_foo' AND revid=2c3af400-73a4-11e5-9381-29463d90c3f0 IF last_modified=DATEOF(NOW());\", table))\n\t\tfailBatch.Query(fmt.Sprintf(\"UPDATE %s SET last_modified = DATEOF(NOW()) WHERE title='_foo' AND revid=3e4ad2f1-73a4-11e5-9381-29463d90c3f0 IF last_modified=DATEOF(NOW());\", table))\n\t}\n\tif applied, iter, err := session.ExecuteBatchCAS(failBatch, &titleCAS, &revidCAS, &modifiedCAS); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if applied {\n\t\tt.Fatalf(\"insert should have not been applied: title=%v revID=%v modified=%v\", titleCAS, revidCAS, modifiedCAS)\n\t} else {\n\t\tif scan := iter.Scan(&applied, &titleCAS, &revidCAS, &modifiedCAS); scan && applied {\n\t\t\tt.Fatalf(\"insert should have been applied: title=%v revID=%v modified=%v\", titleCAS, revidCAS, modifiedCAS)\n\t\t} else if !scan {\n\t\t\tt.Fatal(\"should have scanned another row\")\n\t\t}\n\t\tif err := iter.Close(); err != nil {\n\t\t\tt.Fatal(\"scan:\", err)\n\t\t}\n\t}\n\n\tcasMap = make(map[string]any)\n\tif applied, err := session.Query(fmt.Sprintf(`SELECT revid FROM %s WHERE title = ?`, table),\n\t\ttitle+\"_foo\").MapScanCAS(casMap); err != nil {\n\t\tt.Fatal(\"select:\", err)\n\t} else if applied {\n\t\tt.Fatal(\"select shouldn't have returned applied\")\n\t}\n\n\tif _, err := session.Query(fmt.Sprintf(`SELECT revid FROM %s WHERE title = ?`, table),\n\t\ttitle+\"_foo\").ScanCAS(&revidCAS); err == nil {\n\t\tt.Fatal(\"select: should have returned an error\")\n\t}\n\n\tnotCASBatch := session.Batch(LoggedBatch)\n\tnotCASBatch.Query(fmt.Sprintf(\"INSERT INTO %s (title, revid, last_modified) VALUES (?, ?, ?)\", table), title+\"_baz\", revid, modified)\n\tcasMap = make(map[string]any)\n\tif _, _, err := session.MapExecuteBatchCAS(notCASBatch, casMap); err != ErrNotFound {\n\t\tt.Fatal(\"insert should have returned not found:\", err)\n\t}\n\n\tnotCASBatch = session.Batch(LoggedBatch)\n\tnotCASBatch.Query(fmt.Sprintf(\"INSERT INTO %s (title, revid, last_modified) VALUES (?, ?, ?)\", table), title+\"_baz\", revid, modified)\n\tcasMap = make(map[string]any)\n\tif _, _, err := session.ExecuteBatchCAS(notCASBatch, &revidCAS); err != ErrNotFound {\n\t\tt.Fatal(\"insert should have returned not found:\", err)\n\t}\n\n\tfailBatch = session.Batch(LoggedBatch)\n\tfailBatch.Query(fmt.Sprintf(\"UPDATE %s SET last_modified = TOTIMESTAMP(NOW()) WHERE title='_foo' AND revid=3e4ad2f1-73a4-11e5-9381-29463d90c3f0 IF last_modified = ?\", table), modified)\n\tif _, _, err := session.ExecuteBatchCAS(failBatch, new(bool)); err == nil {\n\t\tt.Fatal(\"update should have errored\")\n\t}\n\t// make sure MapScanCAS does not panic when MapScan fails\n\tcasMap = make(map[string]any)\n\tcasMap[\"last_modified\"] = false\n\tif _, err := session.Query(fmt.Sprintf(`UPDATE %s SET last_modified = TOTIMESTAMP(NOW()) WHERE title='_foo' AND revid=3e4ad2f1-73a4-11e5-9381-29463d90c3f0 IF last_modified = ?`, table),\n\t\tmodified).MapScanCAS(casMap); err == nil {\n\t\tt.Fatal(\"update should hvae errored\", err)\n\t}\n\n\t// make sure MapExecuteBatchCAS does not panic when MapScan fails\n\tfailBatch = session.Batch(LoggedBatch)\n\tfailBatch.Query(fmt.Sprintf(\"UPDATE %s SET last_modified = TOTIMESTAMP(NOW()) WHERE title='_foo' AND revid=3e4ad2f1-73a4-11e5-9381-29463d90c3f0 IF last_modified = ?\", table), modified)\n\tcasMap = make(map[string]any)\n\tcasMap[\"last_modified\"] = false\n\tif _, _, err := session.MapExecuteBatchCAS(failBatch, casMap); err == nil {\n\t\tt.Fatal(\"update should have errored\")\n\t}\n}\n\nfunc TestConsistencySerial(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\ttype testStruct struct {\n\t\tname               string\n\t\tid                 int\n\t\tconsistency        Consistency\n\t\texpectedPanicValue string\n\t}\n\n\ttestCases := []testStruct{\n\t\t{\n\t\t\tname:               \"Any\",\n\t\t\tconsistency:        Any,\n\t\t\texpectedPanicValue: \"Serial consistency can only be SERIAL or LOCAL_SERIAL got ANY\",\n\t\t}, {\n\t\t\tname:               \"One\",\n\t\t\tconsistency:        One,\n\t\t\texpectedPanicValue: \"Serial consistency can only be SERIAL or LOCAL_SERIAL got ONE\",\n\t\t}, {\n\t\t\tname:               \"Two\",\n\t\t\tconsistency:        Two,\n\t\t\texpectedPanicValue: \"Serial consistency can only be SERIAL or LOCAL_SERIAL got TWO\",\n\t\t}, {\n\t\t\tname:               \"Three\",\n\t\t\tconsistency:        Three,\n\t\t\texpectedPanicValue: \"Serial consistency can only be SERIAL or LOCAL_SERIAL got THREE\",\n\t\t}, {\n\t\t\tname:               \"Quorum\",\n\t\t\tconsistency:        Quorum,\n\t\t\texpectedPanicValue: \"Serial consistency can only be SERIAL or LOCAL_SERIAL got QUORUM\",\n\t\t}, {\n\t\t\tname:               \"LocalQuorum\",\n\t\t\tconsistency:        LocalQuorum,\n\t\t\texpectedPanicValue: \"Serial consistency can only be SERIAL or LOCAL_SERIAL got LOCAL_QUORUM\",\n\t\t}, {\n\t\t\tname:               \"EachQuorum\",\n\t\t\tconsistency:        EachQuorum,\n\t\t\texpectedPanicValue: \"Serial consistency can only be SERIAL or LOCAL_SERIAL got EACH_QUORUM\",\n\t\t}, {\n\t\t\tname:               \"Serial\",\n\t\t\tid:                 8,\n\t\t\tconsistency:        Serial,\n\t\t\texpectedPanicValue: \"\",\n\t\t}, {\n\t\t\tname:               \"LocalSerial\",\n\t\t\tid:                 9,\n\t\t\tconsistency:        LocalSerial,\n\t\t\texpectedPanicValue: \"\",\n\t\t}, {\n\t\t\tname:               \"LocalOne\",\n\t\t\tconsistency:        LocalOne,\n\t\t\texpectedPanicValue: \"Serial consistency can only be SERIAL or LOCAL_SERIAL got LOCAL_ONE\",\n\t\t},\n\t}\n\n\terr := session.Query(fmt.Sprintf(\"CREATE TABLE IF NOT EXISTS gocql_test.%s (id int PRIMARY KEY)\", table)).Exec()\n\tif err != nil {\n\t\tt.Fatalf(\"can't create table:%v\", err)\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tif tc.expectedPanicValue == \"\" {\n\t\t\t\terr = session.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s (id) VALUES (?)\", table), tc.id).SerialConsistency(tc.consistency).Exec()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tvar receivedID int\n\t\t\t\terr = session.Query(fmt.Sprintf(\"SELECT * FROM gocql_test.%s WHERE id=?\", table), tc.id).Scan(&receivedID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\trequire.Equal(t, tc.id, receivedID)\n\t\t\t} else {\n\t\t\t\trequire.PanicsWithValue(t, tc.expectedPanicValue, func() {\n\t\t\t\t\tsession.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s (id) VALUES (?)\", table), tc.id).SerialConsistency(tc.consistency)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDurationType(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif session.cfg.ProtoVersion < protoVersion5 {\n\t\tt.Skip(\"Duration type is not supported. Please use protocol version >= 4 and cassandra version >= 3.11\")\n\t}\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\tk int primary key, v duration\n\t)`, table)); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\n\tdurations := []Duration{\n\t\tDuration{\n\t\t\tMonths:      250,\n\t\t\tDays:        500,\n\t\t\tNanoseconds: 300010001,\n\t\t},\n\t\tDuration{\n\t\t\tMonths:      -250,\n\t\t\tDays:        -500,\n\t\t\tNanoseconds: -300010001,\n\t\t},\n\t\tDuration{\n\t\t\tMonths:      0,\n\t\t\tDays:        128,\n\t\t\tNanoseconds: 127,\n\t\t},\n\t\tDuration{\n\t\t\tMonths:      0x7FFFFFFF,\n\t\t\tDays:        0x7FFFFFFF,\n\t\t\tNanoseconds: 0x7FFFFFFFFFFFFFFF,\n\t\t},\n\t}\n\tfor _, durationSend := range durations {\n\t\tif err := session.Query(fmt.Sprintf(`INSERT INTO gocql_test.%s (k, v) VALUES (1, ?)`, table), durationSend).Exec(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tvar id int\n\t\tvar duration Duration\n\t\tif err := session.Query(fmt.Sprintf(`SELECT k, v FROM gocql_test.%s`, table)).Scan(&id, &duration); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif duration.Months != durationSend.Months || duration.Days != durationSend.Days || duration.Nanoseconds != durationSend.Nanoseconds {\n\t\t\tt.Fatalf(\"Unexpeted value returned, expected=%v, received=%v\", durationSend, duration)\n\t\t}\n\t}\n}\n\nfunc TestMapScanCAS(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSessionFromClusterTabletsDisabled(createCluster(), t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE %s (\n\t\t\ttitle         varchar,\n\t\t\trevid   \t  timeuuid,\n\t\t\tlast_modified timestamp,\n\t\t\tdeleted boolean,\n\t\t\tPRIMARY KEY (title, revid)\n\t\t)`, table)); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\n\ttitle, revid, modified, deleted := \"baz\", TimeUUID(), time.Now(), false\n\tmapCAS := map[string]any{}\n\n\tif applied, err := session.Query(fmt.Sprintf(`INSERT INTO %s (title, revid, last_modified, deleted)\n\t\tVALUES (?, ?, ?, ?) IF NOT EXISTS`, table),\n\t\ttitle, revid, modified, deleted).MapScanCAS(mapCAS); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if !applied {\n\t\tt.Fatalf(\"insert should have been applied: title=%v revID=%v modified=%v\", title, revid, modified)\n\t}\n\n\tmapCAS = map[string]any{}\n\tif applied, err := session.Query(fmt.Sprintf(`INSERT INTO %s (title, revid, last_modified, deleted)\n\t\tVALUES (?, ?, ?, ?) IF NOT EXISTS`, table),\n\t\ttitle, revid, modified, deleted).MapScanCAS(mapCAS); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if applied {\n\t\tt.Fatalf(\"insert should have been applied: title=%v revID=%v modified=%v\", title, revid, modified)\n\t} else if title != mapCAS[\"title\"] || revid != mapCAS[\"revid\"] || deleted != mapCAS[\"deleted\"] {\n\t\tt.Fatalf(\"expected %s/%v/%v/%v but got %s/%v/%v%v\", title, revid, modified, false, mapCAS[\"title\"], mapCAS[\"revid\"], mapCAS[\"last_modified\"], mapCAS[\"deleted\"])\n\t}\n\n}\n\nfunc TestBatch(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key)`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\n\tbatch := session.Batch(LoggedBatch)\n\tfor i := 0; i < 100; i++ {\n\t\tbatch.Query(fmt.Sprintf(`INSERT INTO %s (id) VALUES (?)`, table), i)\n\t}\n\n\tif err := session.ExecuteBatch(batch); err != nil {\n\t\tt.Fatal(\"execute batch:\", err)\n\t}\n\n\tcount := 0\n\tif err := session.Query(fmt.Sprintf(`SELECT COUNT(*) FROM %s`, table)).Scan(&count); err != nil {\n\t\tt.Fatal(\"select count:\", err)\n\t} else if count != 100 {\n\t\tt.Fatalf(\"count: expected %d, got %d\\n\", 100, count)\n\t}\n}\n\nfunc TestUnpreparedBatch(t *testing.T) {\n\tt.Parallel()\n\n\tt.Skip(\"FLAKE skipping\")\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key, c counter)`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\n\tbatch := session.Batch(UnloggedBatch)\n\n\tfor i := 0; i < 100; i++ {\n\t\tbatch.Query(fmt.Sprintf(`UPDATE %s SET c = c + 1 WHERE id = 1`, table))\n\t}\n\n\tif err := session.ExecuteBatch(batch); err != nil {\n\t\tt.Fatal(\"execute batch:\", err)\n\t}\n\n\tcount := 0\n\tif err := session.Query(fmt.Sprintf(`SELECT COUNT(*) FROM %s`, table)).Scan(&count); err != nil {\n\t\tt.Fatal(\"select count:\", err)\n\t} else if count != 1 {\n\t\tt.Fatalf(\"count: expected %d, got %d\\n\", 100, count)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(`SELECT c FROM %s`, table)).Scan(&count); err != nil {\n\t\tt.Fatal(\"select count:\", err)\n\t} else if count != 100 {\n\t\tt.Fatalf(\"count: expected %d, got %d\\n\", 100, count)\n\t}\n}\n\n// TestBatchLimit tests gocql to make sure batch operations larger than the maximum\n// statement limit are not submitted to a cassandra node.\nfunc TestBatchLimit(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key)`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\n\tbatch := session.Batch(LoggedBatch)\n\tfor i := 0; i < 65537; i++ {\n\t\tbatch.Query(fmt.Sprintf(`INSERT INTO %s (id) VALUES (?)`, table), i)\n\t}\n\tif err := session.ExecuteBatch(batch); err != ErrTooManyStmts {\n\t\tt.Fatal(\"gocql attempted to execute a batch larger than the support limit of statements.\")\n\t}\n\n}\n\nfunc TestWhereIn(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int, cluster int, primary key (id,cluster))`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id, cluster) VALUES (?,?)\", table), 100, 200).Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t}\n\n\titer := session.Query(fmt.Sprintf(\"SELECT * FROM %s WHERE id = ? AND cluster IN (?)\", table), 100, 200).Iter()\n\tvar id, cluster int\n\tcount := 0\n\tfor iter.Scan(&id, &cluster) {\n\t\tcount++\n\t}\n\n\tif id != 100 || cluster != 200 {\n\t\tt.Fatalf(\"Was expecting id and cluster to be (100,200) but were (%d,%d)\", id, cluster)\n\t}\n}\n\n// TestTooManyQueryArgs tests to make sure the library correctly handles the application level bug\n// whereby too many query arguments are passed to a query\nfunc TestTooManyQueryArgs(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key, value int)`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\n\t_, err := session.Query(fmt.Sprintf(`SELECT * FROM %s WHERE id = ?`, table), 1, 2).Iter().SliceMap()\n\n\tif err == nil {\n\t\tt.Fatal(\"'SELECT * FROM <table> WHERE id = ?, 1, 2' should return an error\")\n\t}\n\n\tbatch := session.Batch(UnloggedBatch)\n\tbatch.Query(fmt.Sprintf(\"INSERT INTO %s (id, value) VALUES (?, ?)\", table), 1, 2, 3)\n\terr = session.ExecuteBatch(batch)\n\n\tif err == nil {\n\t\tt.Fatal(\"'`INSERT INTO too_many_query_args (id, value) VALUES (?, ?)`, 1, 2, 3' should return an error\")\n\t}\n\n\t// TODO: should indicate via an error code that it is an invalid arg?\n\n}\n\n// TestNotEnoughQueryArgs tests to make sure the library correctly handles the application level bug\n// whereby not enough query arguments are passed to a query\nfunc TestNotEnoughQueryArgs(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int, cluster int, value int, primary key (id, cluster))`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\n\t_, err := session.Query(fmt.Sprintf(`SELECT * FROM %s WHERE id = ? and cluster = ?`, table), 1).Iter().SliceMap()\n\n\tif err == nil {\n\t\tt.Fatal(\"'SELECT * FROM <table> WHERE id = ? and cluster = ?, 1' should return an error\")\n\t}\n\n\tbatch := session.Batch(UnloggedBatch)\n\tbatch.Query(fmt.Sprintf(\"INSERT INTO %s (id, cluster, value) VALUES (?, ?, ?)\", table), 1, 2)\n\terr = session.ExecuteBatch(batch)\n\n\tif err == nil {\n\t\tt.Fatal(\"'`INSERT INTO not_enough_query_args (id, cluster, value) VALUES (?, ?, ?)`, 1, 2' should return an error\")\n\t}\n}\n\n// TestCreateSessionTimeout tests to make sure the CreateSession function timeouts out correctly\n// and prevents an infinite loop of connection retries.\nfunc TestCreateSessionTimeout(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tgo func() {\n\t\tselect {\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Error(\"no startup timeout\")\n\t\tcase <-ctx.Done():\n\t\t}\n\t}()\n\n\tcluster := createCluster()\n\tcluster.Hosts = []string{\"127.0.0.1:1\"}\n\tsession, err := cluster.CreateSession()\n\tif err == nil {\n\t\tsession.Close()\n\t\tt.Fatal(\"expected ErrNoConnectionsStarted, but no error was returned.\")\n\t}\n}\n\n// TestReconnection verifies that a node marked down is eventually reconnected.\n// WARNING: This test must NOT use t.Parallel(). It calls session.handleNodeDown()\n// which mutates shared HostInfo state visible to all concurrent sessions.\n//\n//nolint:paralleltest // mutates shared HostInfo state via handleNodeDown()\nfunc TestReconnection(t *testing.T) {\n\tcluster := createCluster()\n\tcluster.ReconnectInterval = 1 * time.Second\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\th := session.hostSource.getHostsList()[0]\n\tsession.handleNodeDown(h.ConnectAddress(), h.Port())\n\n\tif h.State() != NodeDown {\n\t\tt.Fatal(\"Host should be NodeDown but not.\")\n\t}\n\n\ttime.Sleep(cluster.ReconnectInterval + h.Version().nodeUpDelay() + 1*time.Second)\n\n\tif h.State() != NodeUp {\n\t\tt.Fatal(\"Host should be NodeUp but not. Failed to reconnect.\")\n\t}\n}\n\ntype FullName struct {\n\tFirstName string\n\tLastName  string\n}\n\nfunc (n FullName) MarshalCQL(info TypeInfo) ([]byte, error) {\n\treturn []byte(n.FirstName + \" \" + n.LastName), nil\n}\n\nfunc (n *FullName) UnmarshalCQL(info TypeInfo, data []byte) error {\n\tt := strings.SplitN(string(data), \" \", 2)\n\tn.FirstName, n.LastName = t[0], t[1]\n\treturn nil\n}\n\nfunc TestMapScanWithRefMap(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\t\ttesttext       text PRIMARY KEY,\n\t\t\ttestfullname   text,\n\t\t\ttestint        int,\n\t\t)`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\tm := make(map[string]any)\n\tm[\"testtext\"] = \"testtext\"\n\tm[\"testfullname\"] = FullName{FirstName: \"John\", LastName: \"Doe\"}\n\tm[\"testint\"] = 100\n\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (testtext, testfullname, testint) values (?,?,?)`, table),\n\t\tm[\"testtext\"], m[\"testfullname\"], m[\"testint\"]).Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t}\n\n\tvar testText string\n\tvar testFullName FullName\n\tret := map[string]any{\n\t\t\"testtext\":     &testText,\n\t\t\"testfullname\": &testFullName,\n\t\t// testint is not set here.\n\t}\n\titer := session.Query(fmt.Sprintf(`SELECT * FROM %s`, table)).Iter()\n\tif ok := iter.MapScan(ret); !ok {\n\t\tt.Fatal(\"select:\", iter.Close())\n\t} else {\n\t\tif ret[\"testtext\"] != \"testtext\" {\n\t\t\tt.Fatal(\"returned testtext did not match\")\n\t\t}\n\t\tf := ret[\"testfullname\"].(FullName)\n\t\tif f.FirstName != \"John\" || f.LastName != \"Doe\" {\n\t\t\tt.Fatal(\"returned testfullname did not match\")\n\t\t}\n\t\tif ret[\"testint\"] != 100 {\n\t\t\tt.Fatal(\"returned testinit did not match\")\n\t\t}\n\t}\n\tif testText != \"testtext\" {\n\t\tt.Fatal(\"returned testtext did not match\")\n\t}\n\tif testFullName.FirstName != \"John\" || testFullName.LastName != \"Doe\" {\n\t\tt.Fatal(\"returned testfullname did not match\")\n\t}\n\n\t// using MapScan to read a nil int value\n\tintp := new(int64)\n\tret = map[string]any{\n\t\t\"testint\": &intp,\n\t}\n\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s(testtext, testint) VALUES(?, ?)\", table), \"null-int\", nil).Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr := session.Query(fmt.Sprintf(`SELECT testint FROM %s WHERE testtext = ?`, table), \"null-int\").MapScan(ret)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if v := ret[\"testint\"].(*int64); v != nil {\n\t\tt.Fatalf(\"testint should be nil got %+#v\", v)\n\t}\n\n}\n\nfunc TestMapScan(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\t\tfullname       text PRIMARY KEY,\n\t\t\tage            int,\n\t\t\taddress        inet,\n\t\t\tdata           blob,\n\t\t)`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (fullname, age, address) values (?,?,?)`, table),\n\t\t\"Grace Hopper\", 31, net.ParseIP(\"10.0.0.1\")).Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t}\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (fullname, age, address, data) values (?,?,?,?)`, table),\n\t\t\"Ada Lovelace\", 30, net.ParseIP(\"10.0.0.2\"), []byte(`{\"foo\": \"bar\"}`)).Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t}\n\n\titer := session.Query(fmt.Sprintf(`SELECT * FROM %s`, table)).Iter()\n\n\t// First iteration\n\trow := make(map[string]any)\n\tif !iter.MapScan(row) {\n\t\tt.Fatal(\"select:\", iter.Close())\n\t}\n\ttests.AssertEqual(t, \"fullname\", \"Ada Lovelace\", row[\"fullname\"])\n\ttests.AssertEqual(t, \"age\", 30, row[\"age\"])\n\ttests.AssertEqual(t, \"address\", \"10.0.0.2\", row[\"address\"])\n\ttests.AssertDeepEqual(t, \"data\", []byte(`{\"foo\": \"bar\"}`), row[\"data\"])\n\n\t// Second iteration using a new map\n\trow = make(map[string]any)\n\tif !iter.MapScan(row) {\n\t\tt.Fatal(\"select:\", iter.Close())\n\t}\n\ttests.AssertEqual(t, \"fullname\", \"Grace Hopper\", row[\"fullname\"])\n\ttests.AssertEqual(t, \"age\", 31, row[\"age\"])\n\ttests.AssertEqual(t, \"address\", \"10.0.0.1\", row[\"address\"])\n\ttests.AssertDeepEqual(t, \"data\", []byte(nil), row[\"data\"])\n}\n\nfunc TestSliceMap(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\t\ttestuuid       timeuuid PRIMARY KEY,\n\t\t\ttesttimestamp  timestamp,\n\t\t\ttestvarchar    varchar,\n\t\t\ttestbigint     bigint,\n\t\t\ttestblob       blob,\n\t\t\ttestbool       boolean,\n\t\t\ttestfloat      float,\n\t\t\ttestdouble     double,\n\t\t\ttestint        int,\n\t\t\ttestdecimal    decimal,\n\t\t\ttestlist       list<text>,\n\t\t\ttestset        set<int>,\n\t\t\ttestmap        map<varchar, varchar>,\n\t\t\ttestvarint     varint,\n\t\t\ttestinet\t\t\t inet\n\t\t)`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\tm := make(map[string]any)\n\n\tbigInt := new(big.Int)\n\tif _, ok := bigInt.SetString(\"830169365738487321165427203929228\", 10); !ok {\n\t\tt.Fatal(\"Failed setting bigint by string\")\n\t}\n\n\tm[\"testuuid\"] = TimeUUID()\n\tm[\"testvarchar\"] = \"Test VarChar\"\n\tm[\"testbigint\"] = time.Now().Unix()\n\tm[\"testtimestamp\"] = time.Now().Truncate(time.Millisecond).UTC()\n\tm[\"testblob\"] = []byte(\"test blob\")\n\tm[\"testbool\"] = true\n\tm[\"testfloat\"] = float32(4.564)\n\tm[\"testdouble\"] = float64(4.815162342)\n\tm[\"testint\"] = 2343\n\tm[\"testdecimal\"] = inf.NewDec(100, 0)\n\tm[\"testlist\"] = []string{\"quux\", \"foo\", \"bar\", \"baz\", \"quux\"}\n\tm[\"testset\"] = []int{1, 2, 3, 4, 5, 6, 7, 8, 9}\n\tm[\"testmap\"] = map[string]string{\"field1\": \"val1\", \"field2\": \"val2\", \"field3\": \"val3\"}\n\tm[\"testvarint\"] = bigInt\n\tm[\"testinet\"] = \"213.212.2.19\"\n\tsliceMap := []map[string]any{m}\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (testuuid, testtimestamp, testvarchar, testbigint, testblob, testbool, testfloat, testdouble, testint, testdecimal, testlist, testset, testmap, testvarint, testinet) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, table),\n\t\tm[\"testuuid\"], m[\"testtimestamp\"], m[\"testvarchar\"], m[\"testbigint\"], m[\"testblob\"], m[\"testbool\"], m[\"testfloat\"], m[\"testdouble\"], m[\"testint\"], m[\"testdecimal\"], m[\"testlist\"], m[\"testset\"], m[\"testmap\"], m[\"testvarint\"], m[\"testinet\"]).Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t}\n\tif returned, retErr := session.Query(fmt.Sprintf(`SELECT * FROM %s`, table)).Iter().SliceMap(); retErr != nil {\n\t\tt.Fatal(\"select:\", retErr)\n\t} else {\n\t\tmatchSliceMap(t, sliceMap, returned[0])\n\t}\n\n\t// Test for Iter.MapScan()\n\t{\n\t\ttestMap := make(map[string]any)\n\t\tif !session.Query(fmt.Sprintf(`SELECT * FROM %s`, table)).Iter().MapScan(testMap) {\n\t\t\tt.Fatal(\"MapScan failed to work with one row\")\n\t\t}\n\t\tmatchSliceMap(t, sliceMap, testMap)\n\t}\n\n\t// Test for Query.MapScan()\n\t{\n\t\ttestMap := make(map[string]any)\n\t\tif session.Query(fmt.Sprintf(`SELECT * FROM %s`, table)).MapScan(testMap) != nil {\n\t\t\tt.Fatal(\"MapScan failed to work with one row\")\n\t\t}\n\t\tmatchSliceMap(t, sliceMap, testMap)\n\t}\n}\nfunc matchSliceMap(t *testing.T, sliceMap []map[string]any, testMap map[string]any) {\n\tif sliceMap[0][\"testuuid\"] != testMap[\"testuuid\"] {\n\t\tt.Fatal(\"returned testuuid did not match\")\n\t}\n\tif sliceMap[0][\"testtimestamp\"] != testMap[\"testtimestamp\"] {\n\t\tt.Fatal(\"returned testtimestamp did not match\")\n\t}\n\tif sliceMap[0][\"testvarchar\"] != testMap[\"testvarchar\"] {\n\t\tt.Fatal(\"returned testvarchar did not match\")\n\t}\n\tif sliceMap[0][\"testbigint\"] != testMap[\"testbigint\"] {\n\t\tt.Fatal(\"returned testbigint did not match\")\n\t}\n\tif !reflect.DeepEqual(sliceMap[0][\"testblob\"], testMap[\"testblob\"]) {\n\t\tt.Fatal(\"returned testblob did not match\")\n\t}\n\tif sliceMap[0][\"testbool\"] != testMap[\"testbool\"] {\n\t\tt.Fatal(\"returned testbool did not match\")\n\t}\n\tif sliceMap[0][\"testfloat\"] != testMap[\"testfloat\"] {\n\t\tt.Fatal(\"returned testfloat did not match\")\n\t}\n\tif sliceMap[0][\"testdouble\"] != testMap[\"testdouble\"] {\n\t\tt.Fatal(\"returned testdouble did not match\")\n\t}\n\tif sliceMap[0][\"testinet\"] != testMap[\"testinet\"] {\n\t\tt.Fatal(\"returned testinet did not match\")\n\t}\n\n\texpectedDecimal := sliceMap[0][\"testdecimal\"].(*inf.Dec)\n\treturnedDecimal := testMap[\"testdecimal\"].(*inf.Dec)\n\n\tif expectedDecimal.Cmp(returnedDecimal) != 0 {\n\t\tt.Fatal(\"returned testdecimal did not match\")\n\t}\n\n\tif !reflect.DeepEqual(sliceMap[0][\"testlist\"], testMap[\"testlist\"]) {\n\t\tt.Fatal(\"returned testlist did not match\")\n\t}\n\tif !reflect.DeepEqual(sliceMap[0][\"testset\"], testMap[\"testset\"]) {\n\t\tt.Fatal(\"returned testset did not match\")\n\t}\n\tif !reflect.DeepEqual(sliceMap[0][\"testmap\"], testMap[\"testmap\"]) {\n\t\tt.Fatal(\"returned testmap did not match\")\n\t}\n\tif sliceMap[0][\"testint\"] != testMap[\"testint\"] {\n\t\tt.Fatal(\"returned testint did not match\")\n\t}\n}\n\ntype MyRetryPolicy struct {\n}\n\nfunc (*MyRetryPolicy) Attempt(q RetryableQuery) bool {\n\tif q.Attempts() > 5 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (*MyRetryPolicy) GetRetryType(err error) RetryType {\n\tvar executedErr *QueryError\n\tif errors.As(err, &executedErr) && !executedErr.IsIdempotent() {\n\t\treturn Ignore\n\t}\n\treturn Retry\n}\n\nfunc Test_RetryPolicyIdempotence(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttestCases := []struct {\n\t\tname                  string\n\t\tidempotency           bool\n\t\texpectedNumberOfTries int\n\t}{\n\t\t{\n\t\t\tname:                  \"with retry\",\n\t\t\tidempotency:           true,\n\t\t\texpectedNumberOfTries: 6,\n\t\t},\n\t\t{\n\t\t\tname:                  \"without retry\",\n\t\t\tidempotency:           false,\n\t\t\texpectedNumberOfTries: 1,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tq := session.Query(\"INSERT INTO  gocql_test.not_existing_table(event_id, time, args) VALUES (?,?,?)\", 4, UUIDFromTime(time.Now()), \"test\")\n\n\t\t\tq.Idempotent(tc.idempotency)\n\t\t\tq.RetryPolicy(&MyRetryPolicy{})\n\t\t\tq.Consistency(All)\n\n\t\t\t_ = q.Exec()\n\t\t\trequire.Equal(t, tc.expectedNumberOfTries, q.Attempts())\n\t\t})\n\t}\n}\n\nfunc TestSmallInt(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\t\ttestsmallint  smallint PRIMARY KEY,\n\t\t)`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\tm := make(map[string]any)\n\tm[\"testsmallint\"] = int16(2)\n\tsliceMap := []map[string]any{m}\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (testsmallint) VALUES (?)`, table),\n\t\tm[\"testsmallint\"]).Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t}\n\tif returned, retErr := session.Query(fmt.Sprintf(`SELECT * FROM %s`, table)).Iter().SliceMap(); retErr != nil {\n\t\tt.Fatal(\"select:\", retErr)\n\t} else {\n\t\tif sliceMap[0][\"testsmallint\"] != returned[0][\"testsmallint\"] {\n\t\t\tt.Fatal(\"returned testsmallint did not match\")\n\t\t}\n\t}\n}\n\nfunc TestScanWithNilArguments(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\t\tfoo   varchar,\n\t\t\tbar   int,\n\t\t\tPRIMARY KEY (foo, bar)\n\t)`, table)); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\tfor i := 1; i <= 20; i++ {\n\t\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (foo, bar) VALUES (?, ?)\", table),\n\t\t\t\"squares\", i*i).Exec(); err != nil {\n\t\t\tt.Fatal(\"insert:\", err)\n\t\t}\n\t}\n\n\titer := session.Query(fmt.Sprintf(\"SELECT * FROM %s WHERE foo = ?\", table), \"squares\").Iter()\n\tvar n int\n\tcount := 0\n\tfor iter.Scan(nil, &n) {\n\t\tcount += n\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatal(\"close:\", err)\n\t}\n\tif count != 2870 {\n\t\tt.Fatalf(\"expected %d, got %d\", 2870, count)\n\t}\n}\n\nfunc TestScanCASWithNilArguments(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSessionFromClusterTabletsDisabled(createCluster(), t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE %s (\n\t\tfoo   varchar,\n\t\tbar   varchar,\n\t\tPRIMARY KEY (foo, bar)\n\t)`, table)); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\n\tfoo := \"baz\"\n\tvar cas string\n\n\tif applied, err := session.Query(fmt.Sprintf(`INSERT INTO %s (foo, bar)\n\t\tVALUES (?, ?) IF NOT EXISTS`, table),\n\t\tfoo, foo).ScanCAS(nil, nil); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if !applied {\n\t\tt.Fatal(\"insert should have been applied\")\n\t}\n\n\tif applied, err := session.Query(fmt.Sprintf(`INSERT INTO %s (foo, bar)\n\t\tVALUES (?, ?) IF NOT EXISTS`, table),\n\t\tfoo, foo).ScanCAS(&cas, nil); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if applied {\n\t\tt.Fatal(\"insert should not have been applied\")\n\t} else if foo != cas {\n\t\tt.Fatalf(\"expected %v but got %v\", foo, cas)\n\t}\n\n\tif applied, err := session.Query(fmt.Sprintf(`INSERT INTO %s (foo, bar)\n\t\tVALUES (?, ?) IF NOT EXISTS`, table),\n\t\tfoo, foo).ScanCAS(nil, &cas); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t} else if applied {\n\t\tt.Fatal(\"insert should not have been applied\")\n\t} else if foo != cas {\n\t\tt.Fatalf(\"expected %v but got %v\", foo, cas)\n\t}\n}\n\nfunc TestRebindQueryInfo(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, value text, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id, value) VALUES (?, ?)\", table), 23, \"quux\").Exec(); err != nil {\n\t\tt.Fatalf(\"insert into rebind_query failed, err '%v'\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id, value) VALUES (?, ?)\", table), 24, \"w00t\").Exec(); err != nil {\n\t\tt.Fatalf(\"insert into rebind_query failed, err '%v'\", err)\n\t}\n\n\tq := session.Query(fmt.Sprintf(\"SELECT value FROM %s WHERE ID = ?\", table))\n\tq.Bind(23)\n\n\titer := q.Iter()\n\tvar value string\n\tfor iter.Scan(&value) {\n\t}\n\n\tif value != \"quux\" {\n\t\tt.Fatalf(\"expected %v but got %v\", \"quux\", value)\n\t}\n\n\tq.Bind(24)\n\titer = q.Iter()\n\n\tfor iter.Scan(&value) {\n\t}\n\n\tif value != \"w00t\" {\n\t\tt.Fatalf(\"expected %v but got %v\", \"w00t\", value)\n\t}\n}\n\n// TestStaticQueryInfo makes sure that the application can manually bind query parameters using the simplest possible static binding strategy\nfunc TestStaticQueryInfo(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, value text, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id, value) VALUES (?, ?)\", table), 113, \"foo\").Exec(); err != nil {\n\t\tt.Fatalf(\"insert into static_query_info failed, err '%v'\", err)\n\t}\n\n\tautobinder := func(q *QueryInfo) ([]any, error) {\n\t\tvalues := make([]any, 1)\n\t\tvalues[0] = 113\n\t\treturn values, nil\n\t}\n\n\tqry := session.Bind(fmt.Sprintf(\"SELECT id, value FROM %s WHERE id = ?\", table), autobinder)\n\n\tif err := qry.Exec(); err != nil {\n\t\tt.Fatalf(\"expose query info failed, error '%v'\", err)\n\t}\n\n\titer := qry.Iter()\n\n\tvar id int\n\tvar value string\n\n\titer.Scan(&id, &value)\n\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatalf(\"query with exposed info failed, err '%v'\", err)\n\t}\n\n\tif value != \"foo\" {\n\t\tt.Fatalf(\"Expected value %s, but got %s\", \"foo\", value)\n\t}\n\n}\n\ntype ClusteredKeyValue struct {\n\tId      int\n\tCluster int\n\tValue   string\n}\n\nfunc (kv *ClusteredKeyValue) Bind(q *QueryInfo) ([]any, error) {\n\tvalues := make([]any, len(q.Args))\n\n\tfor i, info := range q.Args {\n\t\tfieldName := upcaseInitial(info.Name)\n\t\tvalue := reflect.ValueOf(kv)\n\t\tfield := reflect.Indirect(value).FieldByName(fieldName)\n\t\tvalues[i] = field.Addr().Interface()\n\t}\n\n\treturn values, nil\n}\n\nfunc upcaseInitial(str string) string {\n\tfor i, v := range str {\n\t\treturn string(unicode.ToUpper(v)) + str[i+1:]\n\t}\n\treturn \"\"\n}\n\n// TestBoundQueryInfo makes sure that the application can manually bind query parameters using the query meta data supplied at runtime\nfunc TestBoundQueryInfo(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, cluster int, value text, PRIMARY KEY (id, cluster))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\n\twrite := &ClusteredKeyValue{Id: 200, Cluster: 300, Value: \"baz\"}\n\n\tinsert := session.Bind(fmt.Sprintf(\"INSERT INTO %s (id, cluster, value) VALUES (?, ?,?)\", table), write.Bind)\n\n\tif err := insert.Exec(); err != nil {\n\t\tt.Fatalf(\"insert into clustered_query_info failed, err '%v'\", err)\n\t}\n\n\tread := &ClusteredKeyValue{Id: 200, Cluster: 300}\n\n\tqry := session.Bind(fmt.Sprintf(\"SELECT id, cluster, value FROM %s WHERE id = ? and cluster = ?\", table), read.Bind)\n\n\titer := qry.Iter()\n\n\tvar id, cluster int\n\tvar value string\n\n\titer.Scan(&id, &cluster, &value)\n\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatalf(\"query with clustered_query_info info failed, err '%v'\", err)\n\t}\n\n\tif value != \"baz\" {\n\t\tt.Fatalf(\"Expected value %s, but got %s\", \"baz\", value)\n\t}\n\n}\n\n// TestBatchQueryInfo makes sure that the application can manually bind query parameters when executing in a batch\nfunc TestBatchQueryInfo(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, cluster int, value text, PRIMARY KEY (id, cluster))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\n\twrite := func(q *QueryInfo) ([]any, error) {\n\t\tvalues := make([]any, 3)\n\t\tvalues[0] = 4000\n\t\tvalues[1] = 5000\n\t\tvalues[2] = \"bar\"\n\t\treturn values, nil\n\t}\n\n\tbatch := session.Batch(LoggedBatch)\n\tbatch.Bind(fmt.Sprintf(\"INSERT INTO %s (id, cluster, value) VALUES (?, ?,?)\", table), write)\n\n\tif err := session.ExecuteBatch(batch); err != nil {\n\t\tt.Fatalf(\"batch insert into batch_query_info failed, err '%v'\", err)\n\t}\n\n\tread := func(q *QueryInfo) ([]any, error) {\n\t\tvalues := make([]any, 2)\n\t\tvalues[0] = 4000\n\t\tvalues[1] = 5000\n\t\treturn values, nil\n\t}\n\n\tqry := session.Bind(fmt.Sprintf(\"SELECT id, cluster, value FROM %s WHERE id = ? and cluster = ?\", table), read)\n\n\titer := qry.Iter()\n\n\tvar id, cluster int\n\tvar value string\n\n\titer.Scan(&id, &cluster, &value)\n\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatalf(\"query with batch_query_info info failed, err '%v'\", err)\n\t}\n\n\tif value != \"bar\" {\n\t\tt.Fatalf(\"Expected value %s, but got %s\", \"bar\", value)\n\t}\n}\n\nfunc getRandomConn(t *testing.T, session *Session) *Conn {\n\tconn := session.getConn()\n\tif conn == nil {\n\t\tt.Fatal(\"unable to get a connection\")\n\t}\n\treturn conn\n}\n\nfunc injectInvalidPreparedStatement(t *testing.T, session *Session, table string) (string, *Conn) {\n\tif err := createTable(session, `CREATE TABLE gocql_test.`+table+` (\n\t\t\tfoo   varchar,\n\t\t\tbar   int,\n\t\t\tPRIMARY KEY (foo, bar)\n\t)`); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\n\tstmt := \"INSERT INTO \" + table + \" (foo, bar) VALUES (?, 7)\"\n\n\tconn := getRandomConn(t, session)\n\n\tflight := new(inflightPrepare)\n\tkey := session.stmtsLRU.keyFor(conn.host.HostID(), \"\", stmt)\n\tsession.stmtsLRU.add(key, flight)\n\n\tflight.preparedStatment = &preparedStatment{\n\t\tid: []byte{'f', 'o', 'o', 'b', 'a', 'r'},\n\t\trequest: preparedMetadata{\n\t\t\tresultMetadata: resultMetadata{\n\t\t\t\tcolCount:       1,\n\t\t\t\tactualColCount: 1,\n\t\t\t\tcolumns: []ColumnInfo{\n\t\t\t\t\t{\n\t\t\t\t\t\tKeyspace: \"gocql_test\",\n\t\t\t\t\t\tTable:    table,\n\t\t\t\t\t\tName:     \"foo\",\n\t\t\t\t\t\tTypeInfo: NativeType{\n\t\t\t\t\t\t\ttyp: TypeVarchar,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn stmt, conn\n}\n\nfunc TestPrepare_MissingSchemaPrepare(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\ts := createSession(t)\n\tconn := getRandomConn(t, s)\n\tdefer s.Close()\n\n\ttable := testTableName(t)\n\n\tinsertQry := s.Query(fmt.Sprintf(\"INSERT INTO %s (val) VALUES (?)\", table), 5)\n\tif err := conn.executeQuery(ctx, insertQry).err; err == nil {\n\t\tt.Fatal(\"expected error, but got nil.\")\n\t}\n\n\tif err := createTable(s, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (val int, PRIMARY KEY (val))\", table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\n\tif err := conn.executeQuery(ctx, insertQry).err; err != nil {\n\t\tt.Fatal(err) // unconfigured columnfamily\n\t}\n}\n\nfunc TestPrepare_ReprepareStatement(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tstmt, conn := injectInvalidPreparedStatement(t, session, table)\n\tquery := session.Query(stmt, \"bar\")\n\tif err := conn.executeQuery(ctx, query).Close(); err != nil {\n\t\tt.Fatalf(\"Failed to execute query for reprepare statement: %v\", err)\n\t}\n}\n\nfunc TestPrepare_ReprepareBatch(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tstmt, conn := injectInvalidPreparedStatement(t, session, table)\n\tbatch := session.Batch(UnloggedBatch)\n\tbatch.Query(stmt, \"bar\")\n\tif err := conn.executeBatch(ctx, batch).Close(); err != nil {\n\t\tt.Fatalf(\"Failed to execute query for reprepare statement: %v\", err)\n\t}\n}\n\nfunc TestQueryInfo(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tconn := getRandomConn(t, session)\n\tinfo, err := conn.prepareStatement(context.Background(), \"SELECT release_version, host_id FROM system.local WHERE key = ?\", nil, time.Second)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to execute query for preparing statement: %v\", err)\n\t}\n\n\tif x := len(info.request.columns); x != 1 {\n\t\tt.Fatalf(\"Was not expecting meta data for %d query arguments, but got %d\\n\", 1, x)\n\t}\n\n\tif x := len(info.response.columns); x != 2 {\n\t\tt.Fatalf(\"Was not expecting meta data for %d result columns, but got %d\\n\", 2, x)\n\t}\n}\n\n// TestPreparedCacheEviction will make sure that the cache size is maintained\nfunc TestPrepare_PreparedCacheEviction(t *testing.T) {\n\tt.Parallel()\n\n\tconst maxPrepared = 4\n\n\tclusterHosts := getClusterHosts()\n\thost := clusterHosts[0]\n\tcluster := createCluster()\n\tcluster.MaxPreparedStmts = maxPrepared\n\tcluster.Events.DisableSchemaEvents = true\n\tcluster.Hosts = []string{host}\n\n\tcluster.HostFilter = WhiteListHostFilter(host)\n\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int,mod int,PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\t// clear the cache\n\tsession.stmtsLRU.clear()\n\n\t//Fill the table\n\tfor i := 0; i < 2; i++ {\n\t\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id,mod) VALUES (?, ?)\", table), i, 10000%(i+1)).Exec(); err != nil {\n\t\t\tt.Fatalf(\"insert into prepcachetest failed, err '%v'\", err)\n\t\t}\n\t}\n\t//Populate the prepared statement cache with select statements\n\tvar id, mod int\n\tfor i := 0; i < 2; i++ {\n\t\terr := session.Query(fmt.Sprintf(\"SELECT id,mod FROM %s WHERE id = \", table)+strconv.FormatInt(int64(i), 10)).Scan(&id, &mod)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"select from prepcachetest failed, error '%v'\", err)\n\t\t}\n\t}\n\n\t//generate an update statement to test they are prepared\n\terr := session.Query(fmt.Sprintf(\"UPDATE %s SET mod = ? WHERE id = ?\", table), 1, 11).Exec()\n\tif err != nil {\n\t\tt.Fatalf(\"update prepcachetest failed, error '%v'\", err)\n\t}\n\n\t//generate a delete statement to test they are prepared\n\terr = session.Query(fmt.Sprintf(\"DELETE FROM %s WHERE id = ?\", table), 1).Exec()\n\tif err != nil {\n\t\tt.Fatalf(\"delete from prepcachetest failed, error '%v'\", err)\n\t}\n\n\t//generate an insert statement to test they are prepared\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s (id,mod) VALUES (?, ?)\", table), 3, 11).Exec()\n\tif err != nil {\n\t\tt.Fatalf(\"insert into prepcachetest failed, error '%v'\", err)\n\t}\n\n\tsession.stmtsLRU.mu.Lock()\n\tdefer session.stmtsLRU.mu.Unlock()\n\n\t//Make sure the cache size is maintained\n\tif session.stmtsLRU.lru.Len() != session.stmtsLRU.lru.MaxEntries {\n\t\tt.Fatalf(\"expected cache size of %v, got %v\", session.stmtsLRU.lru.MaxEntries, session.stmtsLRU.lru.Len())\n\t}\n\n\t// Walk through all the configured hosts and test cache retention and eviction\n\tfor _, host := range session.hostSource.hosts {\n\t\t_, ok := session.stmtsLRU.lru.Get(session.stmtsLRU.keyFor(host.HostID(), session.cfg.Keyspace, fmt.Sprintf(\"SELECT id,mod FROM %s WHERE id = 0\", table)))\n\t\tif ok {\n\t\t\tt.Errorf(\"expected first select to be purged but was in cache for host=%q\", host)\n\t\t}\n\n\t\t_, ok = session.stmtsLRU.lru.Get(session.stmtsLRU.keyFor(host.HostID(), session.cfg.Keyspace, fmt.Sprintf(\"SELECT id,mod FROM %s WHERE id = 1\", table)))\n\t\tif !ok {\n\t\t\tt.Errorf(\"exepected second select to be in cache for host=%q\", host)\n\t\t}\n\n\t\t_, ok = session.stmtsLRU.lru.Get(session.stmtsLRU.keyFor(host.HostID(), session.cfg.Keyspace, fmt.Sprintf(\"INSERT INTO %s (id,mod) VALUES (?, ?)\", table)))\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected insert to be in cache for host=%q\", host)\n\t\t}\n\n\t\t_, ok = session.stmtsLRU.lru.Get(session.stmtsLRU.keyFor(host.HostID(), session.cfg.Keyspace, fmt.Sprintf(\"UPDATE %s SET mod = ? WHERE id = ?\", table)))\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected update to be in cached for host=%q\", host)\n\t\t}\n\n\t\t_, ok = session.stmtsLRU.lru.Get(session.stmtsLRU.keyFor(host.HostID(), session.cfg.Keyspace, fmt.Sprintf(\"DELETE FROM %s WHERE id = ?\", table)))\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected delete to be cached for host=%q\", host)\n\t\t}\n\t}\n}\n\nfunc TestPrepare_PreparedCacheKey(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\t// create a second keyspace with a unique name to avoid collisions under parallel execution\n\tks2 := testKeyspaceName(t, \"ks2\")\n\tcluster2 := createCluster()\n\tcreateKeyspace(t, cluster2, ks2, false)\n\tcluster2.Keyspace = ks2\n\tsession2, err := cluster2.CreateSession()\n\tif err != nil {\n\t\tt.Fatal(\"create session:\", err)\n\t}\n\tdefer session2.Close()\n\n\t// both keyspaces have a table named \"test_stmt_cache_key\"\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id varchar primary key, field varchar)\", table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\tif err := createTable(session2, fmt.Sprintf(\"CREATE TABLE %s.%s (id varchar primary key, field varchar)\", ks2, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\n\t// both tables have a single row with the same partition key but different column value\n\tif err = session.Query(fmt.Sprintf(`INSERT INTO %s (id, field) VALUES (?, ?)`, table), \"key\", \"one\").Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t}\n\tif err = session2.Query(fmt.Sprintf(`INSERT INTO %s (id, field) VALUES (?, ?)`, table), \"key\", \"two\").Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t}\n\n\t// should be able to see different values in each keyspace\n\tvar value string\n\tif err = session.Query(fmt.Sprintf(\"SELECT field FROM %s WHERE id = ?\", table), \"key\").Scan(&value); err != nil {\n\t\tt.Fatal(\"select:\", err)\n\t}\n\tif value != \"one\" {\n\t\tt.Errorf(\"Expected one, got %s\", value)\n\t}\n\n\tif err = session2.Query(fmt.Sprintf(\"SELECT field FROM %s WHERE id = ?\", table), \"key\").Scan(&value); err != nil {\n\t\tt.Fatal(\"select:\", err)\n\t}\n\tif value != \"two\" {\n\t\tt.Errorf(\"Expected two, got %s\", value)\n\t}\n}\n\n// TestMarshalFloat64Ptr tests to see that a pointer to a float64 is marshalled correctly.\nfunc TestMarshalFloat64Ptr(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id double, test double, primary key (id))\", table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\ttestNum := float64(7500)\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id,test) VALUES (?,?)`, table), float64(7500.00), &testNum).Exec(); err != nil {\n\t\tt.Fatal(\"insert float64:\", err)\n\t}\n}\n\n// TestMarshalInet tests to see that a pointer to a float64 is marshalled correctly.\nfunc TestMarshalInet(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (ip inet, name text, primary key (ip))\", table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\tstringIp := \"123.34.45.56\"\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (ip,name) VALUES (?,?)`, table), stringIp, \"Test IP 1\").Exec(); err != nil {\n\t\tt.Fatal(\"insert string inet:\", err)\n\t}\n\tvar stringResult string\n\tif err := session.Query(fmt.Sprintf(\"SELECT ip FROM %s\", table)).Scan(&stringResult); err != nil {\n\t\tt.Fatalf(\"select for string from table 1 failed: %v\", err)\n\t}\n\tif stringResult != stringIp {\n\t\tt.Errorf(\"Expected %s, was %s\", stringIp, stringResult)\n\t}\n\n\tvar ipResult net.IP\n\tif err := session.Query(fmt.Sprintf(\"SELECT ip FROM %s\", table)).Scan(&ipResult); err != nil {\n\t\tt.Fatalf(\"select for net.IP from table 1 failed: %v\", err)\n\t}\n\tif ipResult.String() != stringIp {\n\t\tt.Errorf(\"Expected %s, was %s\", stringIp, ipResult.String())\n\t}\n\n\tif err := session.Query(fmt.Sprintf(`DELETE FROM %s WHERE ip = ?`, table), stringIp).Exec(); err != nil {\n\t\tt.Fatal(\"delete inet table:\", err)\n\t}\n\n\tnetIp := net.ParseIP(\"222.43.54.65\")\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (ip,name) VALUES (?,?)`, table), netIp, \"Test IP 2\").Exec(); err != nil {\n\t\tt.Fatal(\"insert netIp inet:\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(\"SELECT ip FROM %s\", table)).Scan(&stringResult); err != nil {\n\t\tt.Fatalf(\"select for string from table 2 failed: %v\", err)\n\t}\n\tif stringResult != netIp.String() {\n\t\tt.Errorf(\"Expected %s, was %s\", netIp.String(), stringResult)\n\t}\n\tif err := session.Query(fmt.Sprintf(\"SELECT ip FROM %s\", table)).Scan(&ipResult); err != nil {\n\t\tt.Fatalf(\"select for net.IP from table 2 failed: %v\", err)\n\t}\n\tif ipResult.String() != netIp.String() {\n\t\tt.Errorf(\"Expected %s, was %s\", netIp.String(), ipResult.String())\n\t}\n\n}\n\nfunc TestVarint(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id varchar, test varint, test2 varint, primary key (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id, test) VALUES (?, ?)`, table), \"id\", 0).Exec(); err != nil {\n\t\tt.Fatalf(\"insert varint: %v\", err)\n\t}\n\n\tvar result int\n\tif err := session.Query(fmt.Sprintf(\"SELECT test FROM %s\", table)).Scan(&result); err != nil {\n\t\tt.Fatalf(\"select failed: %v\", err)\n\t}\n\n\tif result != 0 {\n\t\tt.Errorf(\"Expected 0, was %d\", result)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id, test) VALUES (?, ?)`, table), \"id\", -1).Exec(); err != nil {\n\t\tt.Fatalf(\"insert varint: %v\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(\"SELECT test FROM %s\", table)).Scan(&result); err != nil {\n\t\tt.Fatalf(\"select failed: %v\", err)\n\t}\n\n\tif result != -1 {\n\t\tt.Errorf(\"Expected -1, was %d\", result)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id, test) VALUES (?, ?)`, table), \"id\", nil).Exec(); err != nil {\n\t\tt.Fatalf(\"insert varint: %v\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(\"SELECT test FROM %s\", table)).Scan(&result); err != nil {\n\t\tt.Fatalf(\"select failed: %v\", err)\n\t}\n\n\tif result != 0 {\n\t\tt.Errorf(\"Expected 0, was %d\", result)\n\t}\n\n\tvar nullableResult *int\n\n\tif err := session.Query(fmt.Sprintf(\"SELECT test FROM %s\", table)).Scan(&nullableResult); err != nil {\n\t\tt.Fatalf(\"select failed: %v\", err)\n\t}\n\n\tif nullableResult != nil {\n\t\tt.Errorf(\"Expected nil, was %d\", nullableResult)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id, test) VALUES (?, ?)`, table), \"id\", int64(math.MaxInt32)+1).Exec(); err != nil {\n\t\tt.Fatalf(\"insert varint: %v\", err)\n\t}\n\n\tvar result64 int64\n\tif err := session.Query(fmt.Sprintf(\"SELECT test FROM %s\", table)).Scan(&result64); err != nil {\n\t\tt.Fatalf(\"select failed: %v\", err)\n\t}\n\n\tif result64 != int64(math.MaxInt32)+1 {\n\t\tt.Errorf(\"Expected %d, was %d\", int64(math.MaxInt32)+1, result64)\n\t}\n\n\tbiggie := new(big.Int)\n\tbiggie.SetString(\"36893488147419103232\", 10) // > 2**64\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id, test) VALUES (?, ?)`, table), \"id\", biggie).Exec(); err != nil {\n\t\tt.Fatalf(\"insert varint: %v\", err)\n\t}\n\n\tresultBig := new(big.Int)\n\tif err := session.Query(fmt.Sprintf(\"SELECT test FROM %s\", table)).Scan(resultBig); err != nil {\n\t\tt.Fatalf(\"select failed: %v\", err)\n\t}\n\n\tif resultBig.String() != biggie.String() {\n\t\tt.Errorf(\"Expected %s, was %s\", biggie.String(), resultBig.String())\n\t}\n\n\terr := session.Query(fmt.Sprintf(\"SELECT test FROM %s\", table)).Scan(&result64)\n\tif err == nil || strings.Index(err.Error(), \"the data value should be in the int64 range\") == -1 {\n\t\tt.Errorf(\"expected out of range error since value is too big for int64, result:%d\", result64)\n\t}\n\n\t// value not set in cassandra, leave bind variable empty\n\tresultBig = new(big.Int)\n\tif err := session.Query(fmt.Sprintf(\"SELECT test2 FROM %s\", table)).Scan(resultBig); err != nil {\n\t\tt.Fatalf(\"select failed: %v\", err)\n\t}\n\n\tif resultBig.Int64() != 0 {\n\t\tt.Errorf(\"Expected %s, was %s\", biggie.String(), resultBig.String())\n\t}\n\n\t// can use double pointer to explicitly detect value is not set in cassandra\n\tif err := session.Query(fmt.Sprintf(\"SELECT test2 FROM %s\", table)).Scan(&resultBig); err != nil {\n\t\tt.Fatalf(\"select failed: %v\", err)\n\t}\n\n\tif resultBig != nil {\n\t\tt.Errorf(\"Expected %v, was %v\", nil, *resultBig)\n\t}\n}\n\n// TestQueryStats confirms that the stats are returning valid data. Accuracy may be questionable.\nfunc TestQueryStats(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\tqry := session.Query(\"SELECT * FROM system.peers\")\n\tif err := qry.Exec(); err != nil {\n\t\tt.Fatalf(\"query failed. %v\", err)\n\t} else {\n\t\tif qry.Attempts() < 1 {\n\t\t\tt.Fatal(\"expected at least 1 attempt, but got 0\")\n\t\t}\n\t\tif qry.Latency() <= 0 {\n\t\t\tt.Fatalf(\"expected latency to be greater than 0, but got %v instead.\", qry.Latency())\n\t\t}\n\t}\n}\n\n// TestIterHosts confirms that host is added to Iter when the query succeeds.\nfunc TestIterHost(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\titer := session.Query(\"SELECT * FROM system.peers\").Iter()\n\n\t// check if Host method works\n\tif iter.Host() == nil {\n\t\tt.Error(\"No host in iter\")\n\t}\n}\n\n// TestBatchStats confirms that the stats are returning valid data. Accuracy may be questionable.\nfunc TestBatchStats(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\n\tb := session.Batch(LoggedBatch)\n\tb.Query(fmt.Sprintf(\"INSERT INTO %s (id) VALUES (?)\", table), 1)\n\tb.Query(fmt.Sprintf(\"INSERT INTO %s (id) VALUES (?)\", table), 2)\n\n\tif err := session.ExecuteBatch(b); err != nil {\n\t\tt.Fatalf(\"query failed. %v\", err)\n\t} else {\n\t\tif b.Attempts() < 1 {\n\t\t\tt.Fatal(\"expected at least 1 attempt, but got 0\")\n\t\t}\n\t\tif b.Latency() <= 0 {\n\t\t\tt.Fatalf(\"expected latency to be greater than 0, but got %v instead.\", b.Latency())\n\t\t}\n\t}\n}\n\ntype funcBatchObserver func(context.Context, ObservedBatch)\n\nfunc (f funcBatchObserver) ObserveBatch(ctx context.Context, o ObservedBatch) {\n\tf(ctx, o)\n}\n\nfunc TestBatchObserve(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int, other int, PRIMARY KEY (id))`, table)); err != nil {\n\t\tt.Fatal(\"create table:\", err)\n\t}\n\n\ttype observation struct {\n\t\tobservedErr      error\n\t\tobservedKeyspace string\n\t\tobservedStmts    []string\n\t\tobservedValues   [][]any\n\t}\n\n\tvar observedBatch *observation\n\n\tbatch := session.Batch(LoggedBatch)\n\tbatch.Observer(funcBatchObserver(func(ctx context.Context, o ObservedBatch) {\n\t\tif observedBatch != nil {\n\t\t\tt.Fatal(\"batch observe called more than once\")\n\t\t}\n\n\t\tobservedBatch = &observation{\n\t\t\tobservedKeyspace: o.Keyspace,\n\t\t\tobservedStmts:    o.Statements,\n\t\t\tobservedErr:      o.Err,\n\t\t\tobservedValues:   o.Values,\n\t\t}\n\t}))\n\tfor i := 0; i < 100; i++ {\n\t\t// hard coding 'i' into one of the values for better  testing of observation\n\t\tbatch.Query(fmt.Sprintf(`INSERT INTO %s (id,other) VALUES (?,%d)`, table, i), i)\n\t}\n\n\tif err := session.ExecuteBatch(batch); err != nil {\n\t\tt.Fatal(\"execute batch:\", err)\n\t}\n\tif observedBatch == nil {\n\t\tt.Fatal(\"batch observation has not been called\")\n\t}\n\tif len(observedBatch.observedStmts) != 100 {\n\t\tt.Fatal(\"expecting 100 observed statements, got\", len(observedBatch.observedStmts))\n\t}\n\tif observedBatch.observedErr != nil {\n\t\tt.Fatal(\"not expecting to observe an error\", observedBatch.observedErr)\n\t}\n\tif observedBatch.observedKeyspace != \"gocql_test\" {\n\t\tt.Fatalf(\"expecting keyspace 'gocql_test', got %q\", observedBatch.observedKeyspace)\n\t}\n\tfor i, stmt := range observedBatch.observedStmts {\n\t\tif stmt != fmt.Sprintf(`INSERT INTO %s (id,other) VALUES (?,%d)`, table, i) {\n\t\t\tt.Fatal(\"unexpected query\", stmt)\n\t\t}\n\n\t\ttests.AssertDeepEqual(t, \"observed value\", []any{i}, observedBatch.observedValues[i])\n\t}\n}\n\n// TestNilInQuery tests to see that a nil value passed to a query is handled by Cassandra\n// TODO validate the nil value by reading back the nil. Need to fix Unmarshalling.\nfunc TestNilInQuery(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, count int, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id,count) VALUES (?,?)\", table), 1, nil).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to insert with err: %v\", err)\n\t}\n\n\tvar id int\n\n\tif err := session.Query(fmt.Sprintf(\"SELECT id FROM %s\", table)).Scan(&id); err != nil {\n\t\tt.Fatalf(\"failed to select with err: %v\", err)\n\t} else if id != 1 {\n\t\tt.Fatalf(\"expected id to be 1, got %v\", id)\n\t}\n}\n\n// Don't initialize time.Time bind variable if cassandra timestamp column is empty\nfunc TestEmptyTimestamp(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, time timestamp, num int, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id, num) VALUES (?,?)\", table), 1, 561).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to insert with err: %v\", err)\n\t}\n\n\tvar timeVal time.Time\n\n\tif err := session.Query(fmt.Sprintf(\"SELECT time FROM %s where id = ?\", table), 1).Scan(&timeVal); err != nil {\n\t\tt.Fatalf(\"failed to select with err: %v\", err)\n\t}\n\n\tif !timeVal.IsZero() {\n\t\tt.Errorf(\"time.Time bind variable should be zero (was %s)\", timeVal)\n\t}\n}\n\n// Integration test of just querying for data from the system.schema_keyspace table where the keyspace DOES exist.\nfunc TestGetKeyspaceMetadata(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tkeyspaceMetadata, err := getKeyspaceMetadata(session, \"gocql_test\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to query the keyspace metadata with err: %v\", err)\n\t}\n\tif keyspaceMetadata == nil {\n\t\tt.Fatal(\"failed to query the keyspace metadata, nil returned\")\n\t}\n\tif keyspaceMetadata.Name != \"gocql_test\" {\n\t\tt.Errorf(\"Expected keyspace name to be 'gocql' but was '%s'\", keyspaceMetadata.Name)\n\t}\n\tif keyspaceMetadata.StrategyClass != \"org.apache.cassandra.locator.NetworkTopologyStrategy\" {\n\t\tt.Errorf(\"Expected replication strategy class to be 'org.apache.cassandra.locator.NetworkTopologyStrategy' but was '%s'\", keyspaceMetadata.StrategyClass)\n\t}\n\tif keyspaceMetadata.StrategyOptions == nil {\n\t\tt.Error(\"Expected replication strategy options map but was nil\")\n\t}\n\trfStr, ok := keyspaceMetadata.StrategyOptions[\"datacenter1\"]\n\tif !ok {\n\t\tt.Fatalf(\"Expected strategy option 'datacenter1' but was not found in %v\", keyspaceMetadata.StrategyOptions)\n\t}\n\trfInt, err := strconv.Atoi(rfStr.(string))\n\tif err != nil {\n\t\tt.Fatalf(\"Error converting string to int with err: %v\", err)\n\t}\n\tif rfInt != *flagRF {\n\t\tt.Errorf(\"Expected replication factor to be %d but was %d\", *flagRF, rfInt)\n\t}\n}\n\nfunc TestSessionMetadataAPIs(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tconst ks = \"gocql_test\"\n\n\tif _, err := session.KeyspaceMetadata(ks); err != nil {\n\t\tt.Fatalf(\"failed to get initial keyspace metadata: %v\", err)\n\t}\n\n\twaitForSchemaRefresh := func() {\n\t\tif err := session.control.awaitSchemaAgreement(); err != nil {\n\t\t\tt.Logf(\"schema agreement warning: %v\", err)\n\t\t}\n\t\tsession.metadataDescriber.invalidateKeyspaceSchema(ks)\n\t}\n\n\tt.Run(\"TableMetadata\", func(t *testing.T) {\n\t\tt.Run(\"basic_table_after_create\", func(t *testing.T) {\n\t\t\ttable := testTableName(t)\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE TABLE IF NOT EXISTS %s.%s (pk int PRIMARY KEY, v int)\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"create table: %v\", err)\n\t\t\t}\n\t\t\tdefer session.Query(fmt.Sprintf(\"DROP TABLE IF EXISTS %s.%s\", ks, table)).Exec()\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\ttm, err := session.TableMetadata(ks, table)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"TableMetadata failed: %v\", err)\n\t\t\t}\n\t\t\tif tm.Name != table {\n\t\t\t\tt.Errorf(\"expected table name %q, got %q\", table, tm.Name)\n\t\t\t}\n\t\t\tif tm.Keyspace != ks {\n\t\t\t\tt.Errorf(\"expected keyspace %q, got %q\", ks, tm.Keyspace)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"columns_and_partition_key\", func(t *testing.T) {\n\t\t\ttable := testTableName(t)\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE TABLE IF NOT EXISTS %s.%s (pk1 int, pk2 text, ck int, val blob, PRIMARY KEY ((pk1, pk2), ck))\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"create table: %v\", err)\n\t\t\t}\n\t\t\tdefer session.Query(fmt.Sprintf(\"DROP TABLE IF EXISTS %s.%s\", ks, table)).Exec()\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\ttm, err := session.TableMetadata(ks, table)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"TableMetadata failed: %v\", err)\n\t\t\t}\n\n\t\t\tif len(tm.PartitionKey) != 2 {\n\t\t\t\tt.Fatalf(\"expected 2 partition key columns, got %d\", len(tm.PartitionKey))\n\t\t\t}\n\t\t\tif tm.PartitionKey[0].Name != \"pk1\" || tm.PartitionKey[1].Name != \"pk2\" {\n\t\t\t\tt.Errorf(\"unexpected partition key columns: %v, %v\", tm.PartitionKey[0].Name, tm.PartitionKey[1].Name)\n\t\t\t}\n\n\t\t\tif len(tm.ClusteringColumns) != 1 || tm.ClusteringColumns[0].Name != \"ck\" {\n\t\t\t\tt.Errorf(\"expected clustering column 'ck', got %v\", tm.ClusteringColumns)\n\t\t\t}\n\n\t\t\tfor _, col := range []string{\"pk1\", \"pk2\", \"ck\", \"val\"} {\n\t\t\t\tif _, ok := tm.Columns[col]; !ok {\n\t\t\t\t\tt.Errorf(\"expected column %q in metadata\", col)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"with_secondary_index\", func(t *testing.T) {\n\t\t\tif isTabletsSupported() {\n\t\t\t\tt.Skip(\"secondary indexes are not supported on tables with tablets\")\n\t\t\t}\n\n\t\t\ttable := testTableName(t)\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE TABLE IF NOT EXISTS %s.%s (pk int PRIMARY KEY, v int)\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"create table: %v\", err)\n\t\t\t}\n\t\t\tdefer session.Query(fmt.Sprintf(\"DROP TABLE IF EXISTS %s.%s\", ks, table)).Exec()\n\n\t\t\tidxName := table + \"_v_idx\"\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE INDEX IF NOT EXISTS %s ON %s.%s (v)\", idxName, ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"create index: %v\", err)\n\t\t\t}\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\tsession.metadataDescriber.invalidateKeyspaceSchema(ks)\n\t\t\tkm, err := session.KeyspaceMetadata(ks)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"KeyspaceMetadata failed: %v\", err)\n\t\t\t}\n\t\t\tif _, ok := km.Indexes[idxName]; !ok {\n\t\t\t\tt.Errorf(\"expected index %q in keyspace metadata indexes\", idxName)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"with_materialized_view\", func(t *testing.T) {\n\t\t\tif flagCassVersion.Before(3, 0, 0) {\n\t\t\t\tt.Skip(\"materialized views require Cassandra 3.0+\")\n\t\t\t}\n\t\t\tif isTabletsSupported() {\n\t\t\t\tt.Skip(\"materialized views are not supported on tables with tablets\")\n\t\t\t}\n\n\t\t\tbaseTable := testTableName(t, \"base\")\n\t\t\tviewName := testTableName(t, \"view\")\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE TABLE IF NOT EXISTS %s.%s (pk int, ck int, v int, PRIMARY KEY (pk, ck))\", ks, baseTable)); err != nil {\n\t\t\t\tt.Fatalf(\"create base table: %v\", err)\n\t\t\t}\n\t\t\tdefer session.Query(fmt.Sprintf(\"DROP MATERIALIZED VIEW IF EXISTS %s.%s\", ks, viewName)).Exec()\n\t\t\tdefer session.Query(fmt.Sprintf(\"DROP TABLE IF EXISTS %s.%s\", ks, baseTable)).Exec()\n\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE MATERIALIZED VIEW IF NOT EXISTS %s.%s AS SELECT pk, ck, v FROM %s.%s WHERE pk IS NOT NULL AND ck IS NOT NULL AND v IS NOT NULL PRIMARY KEY (v, pk, ck)\",\n\t\t\t\tks, viewName, ks, baseTable)); err != nil {\n\t\t\t\tt.Fatalf(\"create materialized view: %v\", err)\n\t\t\t}\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\ttm, err := session.TableMetadata(ks, baseTable)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"TableMetadata for base table failed: %v\", err)\n\t\t\t}\n\t\t\tif tm.Name != baseTable {\n\t\t\t\tt.Errorf(\"expected table name %q, got %q\", baseTable, tm.Name)\n\t\t\t}\n\n\t\t\tsession.metadataDescriber.invalidateKeyspaceSchema(ks)\n\t\t\tkm, err := session.KeyspaceMetadata(ks)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"KeyspaceMetadata failed: %v\", err)\n\t\t\t}\n\t\t\tif _, ok := km.Views[viewName]; !ok {\n\t\t\t\tt.Errorf(\"expected view %q in keyspace metadata\", viewName)\n\t\t\t}\n\t\t\tif km.Views[viewName].BaseTableName != baseTable {\n\t\t\t\tt.Errorf(\"expected view base table %q, got %q\", baseTable, km.Views[viewName].BaseTableName)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"after_alter_table\", func(t *testing.T) {\n\t\t\ttable := testTableName(t)\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE TABLE IF NOT EXISTS %s.%s (pk int PRIMARY KEY, v int)\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"create table: %v\", err)\n\t\t\t}\n\t\t\tdefer session.Query(fmt.Sprintf(\"DROP TABLE IF EXISTS %s.%s\", ks, table)).Exec()\n\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"ALTER TABLE %s.%s ADD v2 text\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"alter table: %v\", err)\n\t\t\t}\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\ttm, err := session.TableMetadata(ks, table)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"TableMetadata failed: %v\", err)\n\t\t\t}\n\t\t\tif _, ok := tm.Columns[\"v2\"]; !ok {\n\t\t\t\tt.Errorf(\"expected column 'v2' after ALTER TABLE, got columns: %v\", columnNames(tm.Columns))\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"after_drop_and_recreate\", func(t *testing.T) {\n\t\t\ttable := testTableName(t)\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE TABLE IF NOT EXISTS %s.%s (pk int PRIMARY KEY, v int)\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"create table: %v\", err)\n\t\t\t}\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\tif _, err := session.TableMetadata(ks, table); err != nil {\n\t\t\t\tt.Fatalf(\"TableMetadata before drop failed: %v\", err)\n\t\t\t}\n\n\t\t\tif err := createTable(session, fmt.Sprintf(\"DROP TABLE %s.%s\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"drop table: %v\", err)\n\t\t\t}\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE TABLE %s.%s (pk text PRIMARY KEY, new_col int)\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"recreate table: %v\", err)\n\t\t\t}\n\t\t\tdefer session.Query(fmt.Sprintf(\"DROP TABLE IF EXISTS %s.%s\", ks, table)).Exec()\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\ttm, err := session.TableMetadata(ks, table)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"TableMetadata after recreate failed: %v\", err)\n\t\t\t}\n\t\t\tif _, ok := tm.Columns[\"new_col\"]; !ok {\n\t\t\t\tt.Errorf(\"expected column 'new_col' after recreate, got columns: %v\", columnNames(tm.Columns))\n\t\t\t}\n\t\t\tif _, ok := tm.Columns[\"v\"]; ok {\n\t\t\t\tt.Errorf(\"old column 'v' should not exist after recreate\")\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"nonexistent_table\", func(t *testing.T) {\n\t\t\t_, err := session.TableMetadata(ks, \"does_not_exist_at_all\")\n\t\t\tif err == nil {\n\t\t\t\tt.Fatal(\"expected error for nonexistent table, got nil\")\n\t\t\t}\n\t\t\tif !errors.Is(err, ErrNotFound) {\n\t\t\t\tt.Errorf(\"expected ErrNotFound, got: %v\", err)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"empty_table_name\", func(t *testing.T) {\n\t\t\t_, err := session.TableMetadata(ks, \"\")\n\t\t\tif err == nil {\n\t\t\t\tt.Fatal(\"expected error for empty table name, got nil\")\n\t\t\t}\n\t\t\tif !errors.Is(err, ErrNoTable) {\n\t\t\t\tt.Errorf(\"expected ErrNoTable, got: %v\", err)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"empty_keyspace\", func(t *testing.T) {\n\t\t\t_, err := session.TableMetadata(\"\", \"some_table\")\n\t\t\tif err == nil {\n\t\t\t\tt.Fatal(\"expected error for empty keyspace, got nil\")\n\t\t\t}\n\t\t\tif !errors.Is(err, ErrNoKeyspace) {\n\t\t\t\tt.Errorf(\"expected ErrNoKeyspace, got: %v\", err)\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"KeyspaceMetadata\", func(t *testing.T) {\n\t\tt.Run(\"includes_new_table\", func(t *testing.T) {\n\t\t\ttable := testTableName(t)\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE TABLE IF NOT EXISTS %s.%s (pk int PRIMARY KEY, v int)\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"create table: %v\", err)\n\t\t\t}\n\t\t\tdefer session.Query(fmt.Sprintf(\"DROP TABLE IF EXISTS %s.%s\", ks, table)).Exec()\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\tsession.metadataDescriber.invalidateKeyspaceSchema(ks)\n\t\t\tkm, err := session.KeyspaceMetadata(ks)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"KeyspaceMetadata failed: %v\", err)\n\t\t\t}\n\t\t\tif _, ok := km.Tables[table]; !ok {\n\t\t\t\tt.Fatalf(\"expected table %q in keyspace metadata, got tables: %v\", table, tableNames(km.Tables))\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"excludes_dropped_table\", func(t *testing.T) {\n\t\t\ttable := testTableName(t)\n\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\"CREATE TABLE IF NOT EXISTS %s.%s (pk int PRIMARY KEY, v int)\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"create table: %v\", err)\n\t\t\t}\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\tsession.metadataDescriber.invalidateKeyspaceSchema(ks)\n\t\t\tkm, err := session.KeyspaceMetadata(ks)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"KeyspaceMetadata before drop failed: %v\", err)\n\t\t\t}\n\t\t\tif _, ok := km.Tables[table]; !ok {\n\t\t\t\tt.Fatalf(\"expected table %q before drop\", table)\n\t\t\t}\n\n\t\t\tif err := createTable(session, fmt.Sprintf(\"DROP TABLE %s.%s\", ks, table)); err != nil {\n\t\t\t\tt.Fatalf(\"drop table: %v\", err)\n\t\t\t}\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\tsession.metadataDescriber.invalidateKeyspaceSchema(ks)\n\t\t\tkm, err = session.KeyspaceMetadata(ks)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"KeyspaceMetadata after drop failed: %v\", err)\n\t\t\t}\n\t\t\tif _, ok := km.Tables[table]; ok {\n\t\t\t\tt.Errorf(\"table %q should not appear after DROP\", table)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"multiple_tables\", func(t *testing.T) {\n\t\t\ttables := []string{testTableName(t, \"a\"), testTableName(t, \"b\"), testTableName(t, \"c\")}\n\t\t\tfor _, table := range tables {\n\t\t\t\tif err := createTable(session, fmt.Sprintf(\n\t\t\t\t\t\"CREATE TABLE IF NOT EXISTS %s.%s (pk int PRIMARY KEY)\", ks, table)); err != nil {\n\t\t\t\t\tt.Fatalf(\"create table %s: %v\", table, err)\n\t\t\t\t}\n\t\t\t\tdefer session.Query(fmt.Sprintf(\"DROP TABLE IF EXISTS %s.%s\", ks, table)).Exec()\n\t\t\t}\n\n\t\t\twaitForSchemaRefresh()\n\n\t\t\tsession.metadataDescriber.invalidateKeyspaceSchema(ks)\n\t\t\tkm, err := session.KeyspaceMetadata(ks)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"KeyspaceMetadata failed: %v\", err)\n\t\t\t}\n\t\t\tfor _, table := range tables {\n\t\t\t\tif _, ok := km.Tables[table]; !ok {\n\t\t\t\t\tt.Errorf(\"expected table %q in keyspace metadata\", table)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"nonexistent_keyspace\", func(t *testing.T) {\n\t\t\t_, err := session.KeyspaceMetadata(\"keyspace_that_does_not_exist_xyz\")\n\t\t\tif err == nil {\n\t\t\t\tt.Fatal(\"expected error for nonexistent keyspace, got nil\")\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"empty_keyspace\", func(t *testing.T) {\n\t\t\t_, err := session.KeyspaceMetadata(\"\")\n\t\t\tif err == nil {\n\t\t\t\tt.Fatal(\"expected error for empty keyspace, got nil\")\n\t\t\t}\n\t\t\tif !errors.Is(err, ErrNoKeyspace) {\n\t\t\t\tt.Errorf(\"expected ErrNoKeyspace, got: %v\", err)\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc tableNames(tables map[string]*TableMetadata) []string {\n\tnames := make([]string, 0, len(tables))\n\tfor name := range tables {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}\n\nfunc columnNames(columns map[string]*ColumnMetadata) []string {\n\tnames := make([]string, 0, len(columns))\n\tfor name := range columns {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}\n\n// Integration test of just querying for data from the system.schema_keyspace table where the keyspace DOES NOT exist.\nfunc TestGetKeyspaceMetadataFails(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\t_, err := getKeyspaceMetadata(session, \"gocql_keyspace_does_not_exist\")\n\n\tif err != ErrKeyspaceDoesNotExist || err == nil {\n\t\tt.Fatalf(\"Expected error of type ErrKeySpaceDoesNotExist. Instead, error was %v\", err)\n\t}\n}\n\n// Integration test of the routing key calculation\nfunc TestRoutingKey(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tsingleTable := testTableName(t, \"single\")\n\tcompositeTable := testTableName(t, \"composite\")\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (first_id int, second_id int, PRIMARY KEY (first_id, second_id))\", singleTable)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (first_id int, second_id int, PRIMARY KEY ((first_id, second_id)))\", compositeTable)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\n\tinitCacheSize := session.routingKeyInfoCache.lru.Len()\n\n\troutingKeyInfo, err := session.routingKeyInfo(context.Background(), fmt.Sprintf(\"SELECT * FROM %s WHERE second_id=? AND first_id=?\", singleTable), time.Second)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get routing key info due to error: %v\", err)\n\t}\n\tif routingKeyInfo == nil {\n\t\tt.Fatal(\"Expected routing key info, but was nil\")\n\t}\n\tif len(routingKeyInfo.indexes) != 1 {\n\t\tt.Fatalf(\"Expected routing key indexes length to be 1 but was %d\", len(routingKeyInfo.indexes))\n\t}\n\tif routingKeyInfo.indexes[0] != 1 {\n\t\tt.Errorf(\"Expected routing key index[0] to be 1 but was %d\", routingKeyInfo.indexes[0])\n\t}\n\tif len(routingKeyInfo.types) != 1 {\n\t\tt.Fatalf(\"Expected routing key types length to be 1 but was %d\", len(routingKeyInfo.types))\n\t}\n\tif routingKeyInfo.types[0] == nil {\n\t\tt.Fatal(\"Expected routing key types[0] to be non-nil\")\n\t}\n\tif routingKeyInfo.types[0].Type() != TypeInt {\n\t\tt.Fatalf(\"Expected routing key types[0].Type to be %v but was %v\", TypeInt, routingKeyInfo.types[0].Type())\n\t}\n\n\t// verify the cache is working\n\troutingKeyInfo, err = session.routingKeyInfo(\n\t\tcontext.Background(),\n\t\tfmt.Sprintf(\"SELECT * FROM %s WHERE second_id=? AND first_id=?\", singleTable),\n\t\t// Routing info will be pulled from cached prepared statement, it should work with minimal timeout\n\t\ttime.Nanosecond)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get routing key info due to error: %v\", err)\n\t}\n\tif len(routingKeyInfo.indexes) != 1 {\n\t\tt.Fatalf(\"Expected routing key indexes length to be 1 but was %d\", len(routingKeyInfo.indexes))\n\t}\n\tif routingKeyInfo.indexes[0] != 1 {\n\t\tt.Errorf(\"Expected routing key index[0] to be 1 but was %d\", routingKeyInfo.indexes[0])\n\t}\n\tif len(routingKeyInfo.types) != 1 {\n\t\tt.Fatalf(\"Expected routing key types length to be 1 but was %d\", len(routingKeyInfo.types))\n\t}\n\tif routingKeyInfo.types[0] == nil {\n\t\tt.Fatal(\"Expected routing key types[0] to be non-nil\")\n\t}\n\tif routingKeyInfo.types[0].Type() != TypeInt {\n\t\tt.Fatalf(\"Expected routing key types[0] to be %v but was %v\", TypeInt, routingKeyInfo.types[0].Type())\n\t}\n\tcacheSize := session.routingKeyInfoCache.lru.Len()\n\tif cacheSize != initCacheSize+1 {\n\t\tt.Errorf(\"Expected cache size to be %d but was %d\", initCacheSize+1, cacheSize)\n\t}\n\n\tquery := session.Query(fmt.Sprintf(\"SELECT * FROM %s WHERE second_id=? AND first_id=?\", singleTable), 1, 2)\n\troutingKey, err := query.GetRoutingKey()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get routing key due to error: %v\", err)\n\t}\n\texpectedRoutingKey := []byte{0, 0, 0, 2}\n\tif !reflect.DeepEqual(expectedRoutingKey, routingKey) {\n\t\tt.Errorf(\"Expected routing key %v but was %v\", expectedRoutingKey, routingKey)\n\t}\n\n\troutingKeyInfo, err = session.routingKeyInfo(\n\t\tcontext.Background(),\n\t\tfmt.Sprintf(\"SELECT * FROM %s WHERE second_id=? AND first_id=?\", compositeTable),\n\t\ttime.Second)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get routing key info due to error: %v\", err)\n\t}\n\tif routingKeyInfo == nil {\n\t\tt.Fatal(\"Expected routing key info, but was nil\")\n\t}\n\tif len(routingKeyInfo.indexes) != 2 {\n\t\tt.Fatalf(\"Expected routing key indexes length to be 2 but was %d\", len(routingKeyInfo.indexes))\n\t}\n\tif routingKeyInfo.indexes[0] != 1 {\n\t\tt.Errorf(\"Expected routing key index[0] to be 1 but was %d\", routingKeyInfo.indexes[0])\n\t}\n\tif routingKeyInfo.indexes[1] != 0 {\n\t\tt.Errorf(\"Expected routing key index[1] to be 0 but was %d\", routingKeyInfo.indexes[1])\n\t}\n\tif len(routingKeyInfo.types) != 2 {\n\t\tt.Fatalf(\"Expected routing key types length to be 1 but was %d\", len(routingKeyInfo.types))\n\t}\n\tif routingKeyInfo.types[0] == nil {\n\t\tt.Fatal(\"Expected routing key types[0] to be non-nil\")\n\t}\n\tif routingKeyInfo.types[0].Type() != TypeInt {\n\t\tt.Fatalf(\"Expected routing key types[0] to be %v but was %v\", TypeInt, routingKeyInfo.types[0].Type())\n\t}\n\tif routingKeyInfo.types[1] == nil {\n\t\tt.Fatal(\"Expected routing key types[1] to be non-nil\")\n\t}\n\tif routingKeyInfo.types[1].Type() != TypeInt {\n\t\tt.Fatalf(\"Expected routing key types[0] to be %v but was %v\", TypeInt, routingKeyInfo.types[1].Type())\n\t}\n\n\tquery = session.Query(fmt.Sprintf(\"SELECT * FROM %s WHERE second_id=? AND first_id=?\", compositeTable), 1, 2)\n\troutingKey, err = query.GetRoutingKey()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get routing key due to error: %v\", err)\n\t}\n\texpectedRoutingKey = []byte{0, 4, 0, 0, 0, 2, 0, 0, 4, 0, 0, 0, 1, 0}\n\tif !reflect.DeepEqual(expectedRoutingKey, routingKey) {\n\t\tt.Errorf(\"Expected routing key %v but was %v\", expectedRoutingKey, routingKey)\n\t}\n\n\t// verify the cache is working\n\tcacheSize = session.routingKeyInfoCache.lru.Len()\n\tif cacheSize != initCacheSize+2 {\n\t\tt.Errorf(\"Expected cache size to be %d but was %d\", initCacheSize+2, cacheSize)\n\t}\n}\n\n// Integration test of the token-aware policy-based connection pool\nfunc TestTokenAwareConnPool(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())\n\n\t// force metadata query to page\n\tcluster.PageSize = 1\n\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\texpectedPoolSize := cluster.NumConns * len(session.hostSource.getHostsList())\n\n\t// wait for pool to fill\n\tfor i := 0; i < 50; i++ {\n\t\tif session.pool.Size() == expectedPoolSize {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\n\tif expectedPoolSize != session.pool.Size() {\n\t\tt.Errorf(\"Expected pool size %d but was %d\", expectedPoolSize, session.pool.Size())\n\t}\n\n\ttable := testTableName(t)\n\totherTable := testTableName(t, \"other\")\n\n\t// add another cf so there are two pages when fetching table metadata from our keyspace\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, data text, PRIMARY KEY (id))\", otherTable)); err != nil {\n\t\tt.Fatalf(\"failed to create test_token_aware table with err: %v\", err)\n\t}\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, data text, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create test_token_aware table with err: %v\", err)\n\t}\n\tquery := session.Query(fmt.Sprintf(\"INSERT INTO %s (id, data) VALUES (?,?)\", table), 42, \"8 * 6 =\")\n\tif err := query.Exec(); err != nil {\n\t\tt.Fatalf(\"failed to insert with err: %v\", err)\n\t}\n\n\tquery = session.Query(fmt.Sprintf(\"SELECT data FROM %s where id = ?\", table), 42).Consistency(One)\n\tvar data string\n\tif err := query.Scan(&data); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// TODO add verification that the query went to the correct host\n}\n\nfunc TestNegativeStream(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tconn := getRandomConn(t, session)\n\n\tconst stream = -50\n\twriter := frameWriterFunc(func(f *framer, streamID int) error {\n\t\tf.writeHeader(0, frm.OpOptions, stream)\n\t\treturn f.finish()\n\t})\n\n\tframe, err := conn.exec(context.Background(), writer, nil, time.Second)\n\tif err == nil {\n\t\tt.Fatalf(\"expected to get an error on stream %d\", stream)\n\t} else if frame != nil {\n\t\tt.Fatalf(\"expected to get nil frame got %+v\", frame)\n\t}\n}\n\nfunc TestManualQueryPaging(t *testing.T) {\n\tt.Parallel()\n\n\tconst rowsToInsert = 5\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, count int, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor i := 0; i < rowsToInsert; i++ {\n\t\terr := session.Query(fmt.Sprintf(\"INSERT INTO %s(id, count) VALUES(?, ?)\", table), i, i*i).Exec()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t// disable auto paging, 1 page per iteration\n\tquery := session.Query(fmt.Sprintf(\"SELECT id, count FROM %s\", table)).PageState(nil).PageSize(2)\n\tvar id, count, fetched int\n\n\titer := query.Iter()\n\t// NOTE: this isnt very indicative of how it should be used, the idea is that\n\t// the page state is returned to some client who will send it back to manually\n\t// page through the results.\n\tfor {\n\t\tfor iter.Scan(&id, &count) {\n\t\t\tif count != (id * id) {\n\t\t\t\tt.Fatalf(\"got wrong value from iteration: got %d expected %d\", count, id*id)\n\t\t\t}\n\n\t\t\tfetched++\n\t\t}\n\n\t\tif !iter.LastPage() {\n\t\t\t// more pages\n\t\t\titer = query.PageState(iter.PageState()).Iter()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif fetched != rowsToInsert {\n\t\tt.Fatalf(\"expected to fetch %d rows got %d\", rowsToInsert, fetched)\n\t}\n}\n\n// Issue 475\nfunc TestSessionBindRoutingKey(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())\n\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\t\tkey     varchar,\n\t\t\tvalue   int,\n\t\t\tPRIMARY KEY (key)\n\t\t)`, table)); err != nil {\n\n\t\tt.Fatal(err)\n\t}\n\n\tconst (\n\t\tkey   = \"routing-key\"\n\t\tvalue = 5\n\t)\n\n\tfn := func(info *QueryInfo) ([]any, error) {\n\t\treturn []any{key, value}, nil\n\t}\n\n\tq := session.Bind(fmt.Sprintf(\"INSERT INTO %s(key, value) VALUES(?, ?)\", table), fn)\n\tif err := q.Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestJSONSupport(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif session.cfg.ProtoVersion < protoVersion4 {\n\t\tt.Skip(\"skipping JSON support on proto < 4\")\n\t}\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\t    id text PRIMARY KEY,\n\t\t    age int,\n\t\t    state text\n\t\t)`, table)); err != nil {\n\n\t\tt.Fatal(err)\n\t}\n\n\terr := session.Query(fmt.Sprintf(\"INSERT INTO %s JSON ?\", table), `{\"id\": \"user123\", \"age\": 42, \"state\": \"TX\"}`).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar (\n\t\tid    string\n\t\tage   int\n\t\tstate string\n\t)\n\n\terr = session.Query(fmt.Sprintf(\"SELECT id, age, state FROM %s WHERE id = ?\", table), \"user123\").Scan(&id, &age, &state)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif id != \"user123\" {\n\t\tt.Errorf(\"got id %q expected %q\", id, \"user123\")\n\t}\n\tif age != 42 {\n\t\tt.Errorf(\"got age %d expected %d\", age, 42)\n\t}\n\tif state != \"TX\" {\n\t\tt.Errorf(\"got state %q expected %q\", state, \"TX\")\n\t}\n}\n\nfunc TestUnmarshallNestedTypes(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\t    id text PRIMARY KEY,\n\t\t    val list<frozen<map<text, text> > >\n\t\t)`, table)); err != nil {\n\n\t\tt.Fatal(err)\n\t}\n\n\tm := []map[string]string{\n\t\t{\"key1\": \"val1\"},\n\t\t{\"key2\": \"val2\"},\n\t}\n\n\tconst id = \"key\"\n\terr := session.Query(fmt.Sprintf(\"INSERT INTO %s(id, val) VALUES(?, ?)\", table), id, m).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar data []map[string]string\n\tif err := session.Query(fmt.Sprintf(\"SELECT val FROM %s WHERE id = ?\", table), id).Scan(&data); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !reflect.DeepEqual(data, m) {\n\t\tt.Fatalf(\"%+#v != %+#v\", data, m)\n\t}\n}\n\nfunc TestSchemaReset(t *testing.T) {\n\tt.Parallel()\n\n\tif flagCassVersion.Major == 0 || flagCassVersion.Before(2, 1, 3) {\n\t\tt.Skipf(\"skipping TestSchemaReset due to CASSANDRA-7910 in Cassandra <2.1.3 version=%v\", flagCassVersion)\n\t}\n\n\tcluster := createCluster()\n\tcluster.NumConns = 1\n\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\tid text PRIMARY KEY)`, table)); err != nil {\n\n\t\tt.Fatal(err)\n\t}\n\n\tconst key = \"test\"\n\n\terr := session.Query(fmt.Sprintf(\"INSERT INTO %s(id) VALUES(?)\", table), key).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar id string\n\terr = session.Query(fmt.Sprintf(\"SELECT * FROM %s WHERE id=?\", table), key).Scan(&id)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if id != key {\n\t\tt.Fatalf(\"expected to get id=%q got=%q\", key, id)\n\t}\n\n\tif err := createTable(session, fmt.Sprintf(`ALTER TABLE gocql_test.%s ADD val text`, table)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tconst expVal = \"test-val\"\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, val) VALUES(?, ?)\", table), key, expVal).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar val string\n\terr = session.Query(fmt.Sprintf(\"SELECT * FROM %s WHERE id=?\", table), key).Scan(&id, &val)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif id != key {\n\t\tt.Errorf(\"expected to get id=%q got=%q\", key, id)\n\t}\n\tif val != expVal {\n\t\tt.Errorf(\"expected to get val=%q got=%q\", expVal, val)\n\t}\n}\n\nfunc TestCreateSession_DontSwallowError(t *testing.T) {\n\tt.Parallel()\n\n\tt.Skip(\"This test is bad, and the resultant error from cassandra changes between versions\")\n\tcluster := createCluster()\n\tcluster.ProtoVersion = 0x100\n\tsession, err := cluster.CreateSession()\n\tif err == nil {\n\t\tsession.Close()\n\n\t\tt.Fatal(\"expected to get an error for unsupported protocol\")\n\t}\n\n\tif flagCassVersion.Major < 3 {\n\t\t// TODO: we should get a distinct error type here which include the underlying\n\t\t// cassandra error about the protocol version, for now check this here.\n\t\tif !strings.Contains(err.Error(), \"Invalid or unsupported protocol version\") {\n\t\t\tt.Fatalf(`expcted to get error \"unsupported protocol version\" got: %q`, err)\n\t\t}\n\t} else {\n\t\tif !strings.Contains(err.Error(), \"unsupported response version\") {\n\t\t\tt.Fatalf(`expcted to get error \"unsupported response version\" got: %q`, err)\n\t\t}\n\t}\n}\n\nfunc TestControl_DiscoverProtocol(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\tcluster.ProtoVersion = 0\n\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tif session.cfg.ProtoVersion == 0 {\n\t\tt.Fatal(\"did not discovery protocol\")\n\t}\n}\n\n// TestUnsetCol verify unset column will not replace an existing column\nfunc TestUnsetCol(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif session.cfg.ProtoVersion < protoVersion4 {\n\t\tt.Skip(\"Unset Values are not supported in protocol < 4\")\n\t}\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, my_int int, my_text text, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id,my_int,my_text) VALUES (?,?,?)\", table), 1, 2, \"3\").Exec(); err != nil {\n\t\tt.Fatalf(\"failed to insert with err: %v\", err)\n\t}\n\tif err := session.Query(fmt.Sprintf(\"INSERT INTO %s (id,my_int,my_text) VALUES (?,?,?)\", table), 1, UnsetValue, UnsetValue).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to insert with err: %v\", err)\n\t}\n\n\tvar id, mInt int\n\tvar mText string\n\n\tif err := session.Query(fmt.Sprintf(\"SELECT id, my_int ,my_text FROM %s\", table)).Scan(&id, &mInt, &mText); err != nil {\n\t\tt.Fatalf(\"failed to select with err: %v\", err)\n\t} else if id != 1 || mInt != 2 || mText != \"3\" {\n\t\tt.Fatalf(\"Expected results: 1, 2, \\\"3\\\", got %v, %v, %v\", id, mInt, mText)\n\t}\n}\n\n// TestUnsetColBatch verify unset column will not replace a column in batch\nfunc TestUnsetColBatch(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif session.cfg.ProtoVersion < protoVersion4 {\n\t\tt.Skip(\"Unset Values are not supported in protocol < 4\")\n\t}\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, my_int int, my_text text, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\n\tb := session.Batch(LoggedBatch)\n\tb.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s(id, my_int, my_text) VALUES (?,?,?)\", table), 1, 1, UnsetValue)\n\tb.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s(id, my_int, my_text) VALUES (?,?,?)\", table), 1, UnsetValue, \"\")\n\tb.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s(id, my_int, my_text) VALUES (?,?,?)\", table), 2, 2, UnsetValue)\n\n\tif err := session.ExecuteBatch(b); err != nil {\n\t\tt.Fatalf(\"query failed. %v\", err)\n\t} else {\n\t\tif b.Attempts() < 1 {\n\t\t\tt.Fatal(\"expected at least 1 attempt, but got 0\")\n\t\t}\n\t\tif b.Latency() <= 0 {\n\t\t\tt.Fatalf(\"expected latency to be greater than 0, but got %v instead.\", b.Latency())\n\t\t}\n\t}\n\tvar id, mInt, count int\n\tvar mText string\n\tif err := session.Query(fmt.Sprintf(\"SELECT count(*) FROM gocql_test.%s;\", table)).Scan(&count); err != nil {\n\t\tt.Fatalf(\"Failed to select with err: %v\", err)\n\t} else if count != 2 {\n\t\tt.Fatalf(\"Expected Batch Insert count 2, got %v\", count)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(\"SELECT id, my_int ,my_text FROM gocql_test.%s where id=1;\", table)).Scan(&id, &mInt, &mText); err != nil {\n\t\tt.Fatalf(\"failed to select with err: %v\", err)\n\t} else if id != mInt {\n\t\tt.Fatalf(\"expected id, my_int to be 1, got %v and %v\", id, mInt)\n\t}\n}\n\nfunc TestQuery_NamedValues(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s(id int, value text, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr := session.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s(id, value) VALUES(:id, :value)\", table), NamedValue(\"id\", 1), NamedValue(\"value\", \"i am a value\")).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar value string\n\tif err := session.Query(fmt.Sprintf(\"SELECT VALUE from gocql_test.%s WHERE id = :id\", table), NamedValue(\"id\", 1)).Scan(&value); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n// TestQuery_SetHostID ensures that queries are sent to the specified host only.\n// WARNING: This test must NOT use t.Parallel(). It calls pool.host.setState(NodeDown)\n// which mutates shared HostInfo state visible to all concurrent sessions.\n//\n//nolint:paralleltest // mutates shared HostInfo state via setState(NodeDown)\nfunc TestQuery_SetHostID(t *testing.T) {\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\thosts := session.GetHosts()\n\n\tconst iterations = 5\n\tfor _, expectedHost := range hosts {\n\t\tfor i := 0; i < iterations; i++ {\n\t\t\tvar actualHostID string\n\t\t\terr := session.Query(\"SELECT host_id FROM system.local\").\n\t\t\t\tSetHostID(expectedHost.HostID()).\n\t\t\t\tScan(&actualHostID)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tif expectedHost.HostID() != actualHostID {\n\t\t\t\tt.Fatalf(\"Expected query to be executed on host %s, but it was executed on %s\",\n\t\t\t\t\texpectedHost.HostID(),\n\t\t\t\t\tactualHostID,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\n\t// ensuring properly handled invalid host id\n\terr := session.Query(\"SELECT host_id FROM system.local\").\n\t\tSetHostID(\"[invalid]\").\n\t\tExec()\n\tif !errors.Is(err, ErrNoPool) {\n\t\tt.Fatalf(\"Expected error to be: %v, but got %v\", ErrNoPool, err)\n\t}\n\n\t// ensuring that the driver properly handles the case\n\t// when specified host for the query is down\n\thost := hosts[0]\n\tpool, _ := session.pool.getPoolByHostID(host.HostID())\n\t// simulating specified host is down\n\tpool.host.setState(NodeDown)\n\terr = session.Query(\"SELECT host_id FROM system.local\").\n\t\tSetHostID(host.HostID()).\n\t\tExec()\n\tif !errors.Is(err, ErrHostDown) {\n\t\tt.Fatalf(\"Expected error to be: %v, but got %v\", ErrHostDown, err)\n\t}\n}\n"
  },
  {
    "path": "ci/clean-old-temporary-docker-images.py",
    "content": "import os\nimport requests\nfrom datetime import datetime, timedelta\n\nDOCKERHUB_USERNAME = os.environ[\"DOCKERHUB_USERNAME\"]\nDOCKERHUB_TOKEN = os.environ[\"DOCKERHUB_TOKEN\"]\nDELETE_AFTER_DAYS = os.environ[\"DELETE_AFTER_DAYS\"]\n\ndef get_docker_token(username, password):\n    url = \"https://hub.docker.com/v2/users/login/\"\n    headers = {\"Content-Type\": \"application/json\"}\n    data = {\"username\": username, \"password\": password}\n\n    response = requests.post(url, json=data, headers=headers)\n    if response.status_code == 200:\n        return response.json()[\"token\"]\n    else:\n        print(f\"Failed to login to DockerHub: {response.status_code}\")\n        return None\n\ndef get_repo_tags(token):\n    url = f\"https://hub.docker.com/v2/repositories/scylladb/gocql-extended-ci/tags/\"\n    headers = {\"Authorization\": f\"Bearer {token}\"}\n    response = requests.get(url, headers=headers)\n    if response.status_code != 200:\n        print(f\"Failed to get tags, Status Code: {response.status_code}, {response.text}\")\n        return None\n    return response.json()[\"results\"]\n\ndef delete_tag(tag, token):\n    url = f\"https://hub.docker.com/v2/repositories/scylladb/gocql-extended-ci/tags/{tag}/\"\n    headers = {\"Authorization\": f\"Bearer {token}\"}\n    response = requests.delete(url, headers=headers)\n    if response.status_code > 200 and response.status_code < 300:\n        print(f\"Deleted tag: {tag}\")\n        return True\n    print(f\"Failed to delete tag: {tag}, Status Code: {response.status_code}\")\n    return False\n\ndef clean_old_images():\n    token = get_docker_token(DOCKERHUB_USERNAME, DOCKERHUB_TOKEN)\n    if token is None:\n        return False\n    tags = get_repo_tags(token)\n    if tags is None:\n        return False\n    threshold_date = datetime.now() - timedelta(days=int(DELETE_AFTER_DAYS))\n    status = True\n    for tag in tags:\n        last_updated = datetime.strptime(tag[\"last_updated\"], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n        if last_updated < threshold_date:\n            status = status and delete_tag(tag[\"name\"], token)\n    return status\n\nif __name__ == \"__main__\":\n    if not clean_old_images():\n        exit(1)\n    exit(0)\n"
  },
  {
    "path": "client_routes.go",
    "content": "package gocql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/events\"\n\t\"github.com/gocql/gocql/internal/debug\"\n\t\"github.com/gocql/gocql/internal/eventbus\"\n)\n\ntype ClientRoutesEndpoint struct {\n\t// Scylla Cloud ConnectionID to read from `system.client_routes`\n\tConnectionID string\n\n\t// Ip Address or DNS name of the AWS endpoint\n\t// Could stay empty, in this case driver will pick it up from system.client_routes table\n\tConnectionAddr string\n}\n\nfunc (e ClientRoutesEndpoint) Validate() error {\n\tif e.ConnectionID == \"\" {\n\t\treturn errors.New(\"missing ConnectionID\")\n\t}\n\treturn nil\n}\n\ntype ClientRoutesEndpointList []ClientRoutesEndpoint\n\nfunc (l *ClientRoutesEndpointList) GetAllConnectionIDs() []string {\n\tvar ids []string\n\tfor _, endpoint := range *l {\n\t\tids = append(ids, endpoint.ConnectionID)\n\t}\n\treturn ids\n}\n\nfunc (l *ClientRoutesEndpointList) GetConnectionAddr(connectionID string) string {\n\tfor _, endpoint := range *l {\n\t\tif endpoint.ConnectionID == connectionID {\n\t\t\treturn endpoint.ConnectionAddr\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (l *ClientRoutesEndpointList) Validate() error {\n\tfor id, endpoint := range *l {\n\t\tif err := endpoint.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"endpoint #%d is invalid: %w\", id, err)\n\t\t}\n\t}\n\treturn nil\n}\n\ntype ClientRoutesConfig struct {\n\tTableName                    string\n\tEndpoints                    ClientRoutesEndpointList\n\tResolveHealthyEndpointPeriod time.Duration\n\tResolverCacheDuration        time.Duration\n\tMaxResolverConcurrency       int\n\n\t// Deprecated: BlockUnknownEndpoints no longer has any effect. Unknown\n\t// endpoints are always blocked. This field will be removed in a future\n\t// release.\n\tBlockUnknownEndpoints bool\n\n\t// EnableShardAwareness controls whether the driver should use shard-aware\n\t// connections when using ClientRoutes (PrivateLink).\n\t//\n\t// By default this is false because NAT typically breaks shard-awareness.\n\t// Shard-aware routing relies on the driver knowing the source port of connections,\n\t// which NAT devices modify, making it impossible for the server to route\n\t// requests to the correct shard.\n\t//\n\t// However, in some deployments shard-awareness can still work:\n\t//   - When using PROXY Protocol v2, the original source port is preserved\n\t//     in the protocol header. See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt\n\t//   - When using direct connections without NAT (e.g., VPC peering)\n\t//   - When the load balancer/proxy is shard-aware itself\n\t//\n\t// Set this to true only if your network setup preserves or correctly handles\n\t// the source port information needed for shard-aware routing.\n\tEnableShardAwareness bool\n}\n\nfunc (cfg *ClientRoutesConfig) Validate() error {\n\tif cfg == nil {\n\t\treturn nil\n\t}\n\tif len(cfg.Endpoints) == 0 {\n\t\treturn errors.New(\"no endpoints specified\")\n\t}\n\n\tif err := cfg.Endpoints.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"failed to validate endpoints: %w\", err)\n\t}\n\tif cfg.ResolveHealthyEndpointPeriod < 0 {\n\t\treturn errors.New(\"resolve healthy endpoint period must be >= 0\")\n\t}\n\tif cfg.MaxResolverConcurrency <= 0 {\n\t\treturn errors.New(\"max resolver concurrency must be > 0\")\n\t}\n\treturn nil\n}\n\ntype UnresolvedClientRoute struct {\n\tConnectionID  string\n\tHostID        string\n\tAddress       string\n\tCQLPort       uint16\n\tSecureCQLPort uint16\n}\n\n// Similar returns true if both records targets same host and connection id\nfunc (r UnresolvedClientRoute) Similar(o UnresolvedClientRoute) bool {\n\treturn r.ConnectionID == o.ConnectionID && r.HostID == o.HostID\n}\n\n// Equal returns true if both records are exactly the same\nfunc (r UnresolvedClientRoute) Equal(o UnresolvedClientRoute) bool {\n\treturn r == o\n}\n\nfunc (r UnresolvedClientRoute) String() string {\n\treturn fmt.Sprintf(\n\t\t\"UnresolvedClientRoute{ConnectionID=%s, HostID=%s, Address=%s, CQLPort=%d, SecureCQLPort=%d}\",\n\t\tr.ConnectionID,\n\t\tr.HostID,\n\t\tr.Address,\n\t\tr.CQLPort,\n\t\tr.SecureCQLPort,\n\t)\n}\n\ntype UnresolvedClientRouteList []UnresolvedClientRoute\n\nfunc (l *UnresolvedClientRouteList) Len() int {\n\treturn len(*l)\n}\n\ntype ResolvedClientRoute struct {\n\tupdateTime time.Time\n\tUnresolvedClientRoute\n\tallKnownIPs   []net.IP\n\tcurrentIP     net.IP\n\tforcedResolve bool\n}\n\nfunc (r ResolvedClientRoute) String() string {\n\tvar ip string\n\tif r.currentIP == nil {\n\t\tip = \"<nil>\"\n\t} else {\n\t\tip = r.currentIP.String()\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"ResolvedClientRoute{ConnectionID=%s, HostID=%s, Address=%s, CQLPort=%d, SecureCQLPort=%d, CurrentIP=%s}\",\n\t\tr.ConnectionID,\n\t\tr.HostID,\n\t\tr.Address,\n\t\tr.CQLPort,\n\t\tr.SecureCQLPort,\n\t\tip,\n\t)\n}\n\nfunc (r ResolvedClientRoute) Clone() ResolvedClientRoute {\n\tres := r\n\tif res.allKnownIPs != nil {\n\t\tres.allKnownIPs = make([]net.IP, 0, len(r.allKnownIPs))\n\t\tfor _, ip := range r.allKnownIPs {\n\t\t\tres.allKnownIPs = append(res.allKnownIPs, slices.Clone(ip))\n\t\t}\n\t}\n\tif len(res.currentIP) != 0 {\n\t\tcopy(res.currentIP, r.currentIP)\n\t\tres.currentIP = slices.Clone(res.currentIP)\n\t}\n\treturn res\n}\n\n// Newer returns true if o is newer than r\nfunc (r ResolvedClientRoute) Newer(o ResolvedClientRoute) bool {\n\tif len(r.currentIP) == 0 && len(o.currentIP) != 0 {\n\t\treturn true\n\t}\n\tif len(r.allKnownIPs) == 0 && len(o.allKnownIPs) != 0 {\n\t\treturn true\n\t}\n\treturn r.updateTime.Compare(o.updateTime) == -1\n}\n\n// Similar returns true if both records targets same host and connection id\nfunc (r ResolvedClientRoute) Similar(o ResolvedClientRoute) bool {\n\treturn r.ConnectionID == o.ConnectionID && r.HostID == o.HostID\n}\n\nfunc (r ResolvedClientRoute) NeedsUpdate() bool {\n\treturn r.currentIP == nil || len(r.allKnownIPs) == 0 || r.forcedResolve\n}\n\nfunc (r ResolvedClientRoute) GetCQLPort() uint16 {\n\tif r.SecureCQLPort != 0 {\n\t\treturn r.SecureCQLPort\n\t}\n\treturn r.CQLPort\n}\n\ntype ResolvedClientRouteList []ResolvedClientRoute\n\nfunc (l *ResolvedClientRouteList) Len() int {\n\treturn len(*l)\n}\n\nfunc (l *ResolvedClientRouteList) MergeWithUnresolved(unresolved UnresolvedClientRouteList) {\n\tfor _, unres := range unresolved {\n\t\tfound := false\n\t\tfor id, res := range *l {\n\t\t\tif res.UnresolvedClientRoute.Similar(unres) {\n\t\t\t\tfound = true\n\t\t\t\tif res.Equal(unres) {\n\t\t\t\t\t// Records are the same, no information has changed\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t// Records are not the same, add unresolved record\n\t\t\t\t// It will be picked up by resolver on very next iteration\n\t\t\t\t(*l)[id] = ResolvedClientRoute{\n\t\t\t\t\tUnresolvedClientRoute: unres,\n\t\t\t\t\tforcedResolve:         true,\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\t*l = append(*l, ResolvedClientRoute{\n\t\t\t\tUnresolvedClientRoute: unres,\n\t\t\t\tforcedResolve:         true,\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc (l *ResolvedClientRouteList) MergeWithResolved(o *ResolvedClientRouteList) {\n\tfor id, rec := range *l {\n\t\tfor _, otherRec := range *o {\n\t\t\tif rec.Similar(otherRec) {\n\t\t\t\tif rec.Newer(otherRec) {\n\t\t\t\t\t(*l)[id] = otherRec\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, otherRec := range *o {\n\t\tif !slices.ContainsFunc(*l, otherRec.Similar) {\n\t\t\t*l = append(*l, otherRec)\n\t\t}\n\t}\n}\n\nfunc (l *ResolvedClientRouteList) UpdateIfNewer(route ResolvedClientRoute) bool {\n\tfor id, r := range *l {\n\t\tif r.Similar(route) {\n\t\t\tif !r.Newer(route) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t(*l)[id] = route\n\t\t\treturn true\n\t\t}\n\t}\n\t*l = append(*l, route)\n\treturn true\n}\n\nfunc (l *ResolvedClientRouteList) FindByHostID(hostID string) *ResolvedClientRoute {\n\tfor i := range *l {\n\t\tif (*l)[i].HostID == hostID {\n\t\t\treturn &(*l)[i]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (l *ResolvedClientRouteList) Clone() ResolvedClientRouteList {\n\tif len(*l) == 0 {\n\t\treturn make(ResolvedClientRouteList, 0)\n\t}\n\tcpy := make(ResolvedClientRouteList, len(*l))\n\tcopy(cpy, *l)\n\treturn cpy\n}\n\ntype ResolvedEndpoint struct {\n\tupdateTime    time.Time\n\tconnectionID  string\n\tdc            string\n\track          string\n\taddress       string\n\tallKnown      []net.IP\n\tcurrentIP     net.IP\n\tforcedResolve bool\n}\n\ntype ClientRoutesResolver interface {\n\tResolve(endpoint ResolvedClientRoute) ([]net.IP, net.IP, error)\n}\n\ntype resolvedCacheRecord struct {\n\tlastTimeResolved time.Time\n\tlastResult       []net.IP\n}\n\nfunc (r resolvedCacheRecord) WasResolvedLessThan(cachingTime time.Duration) bool {\n\treturn time.Now().UTC().Sub(r.lastTimeResolved) < cachingTime\n}\n\n// simpleClientRoutesResolver resolves endpoints using the provided lookup function while enforcing\n// a minimal period between successive resolutions of the same address.\ntype simpleClientRoutesResolver struct {\n\tresolver    DNSResolver\n\tcache       map[string]resolvedCacheRecord\n\tcachingTime time.Duration\n\tmu          sync.RWMutex\n}\n\nfunc newSimpleClientRoutesResolver(cachingTime time.Duration, resolver DNSResolver) *simpleClientRoutesResolver {\n\tif resolver == nil {\n\t\tresolver = defaultDnsResolver\n\t}\n\treturn &simpleClientRoutesResolver{\n\t\tresolver:    resolver,\n\t\tcachingTime: cachingTime,\n\t\tcache:       make(map[string]resolvedCacheRecord),\n\t}\n}\n\nfunc (r *simpleClientRoutesResolver) Resolve(endpoint ResolvedClientRoute) (allKnown []net.IP, current net.IP, err error) {\n\tr.mu.RLock()\n\tcache, ok := r.cache[endpoint.Address]\n\tr.mu.RUnlock()\n\tif ok && cache.WasResolvedLessThan(r.cachingTime) {\n\t\tallKnown = cache.lastResult\n\t}\n\n\tif len(allKnown) == 0 {\n\t\tallKnown, err = r.resolver.LookupIP(endpoint.Address)\n\t\tif err != nil {\n\t\t\treturn endpoint.allKnownIPs, endpoint.currentIP, err\n\t\t}\n\t\tif len(allKnown) == 0 {\n\t\t\treturn endpoint.allKnownIPs, endpoint.currentIP, fmt.Errorf(\"no addresses returned for %s\", endpoint.Address)\n\t\t}\n\t}\n\n\tfor _, addr := range allKnown {\n\t\tif endpoint.currentIP != nil && endpoint.currentIP.Equal(addr) {\n\t\t\tcurrent = addr\n\t\t\tbreak\n\t\t}\n\t}\n\tif current == nil {\n\t\tcurrent = allKnown[0]\n\t}\n\n\tr.mu.Lock()\n\tr.cache[endpoint.Address] = resolvedCacheRecord{\n\t\tlastTimeResolved: time.Now().UTC(),\n\t\tlastResult:       allKnown,\n\t}\n\tr.mu.Unlock()\n\treturn allKnown, current, nil\n}\n\ntype ClientRoutesHandler struct {\n\tlog               StdLogger\n\tc                 controlConnection\n\tresolver          ClientRoutesResolver\n\tsub               *eventbus.Subscriber[events.Event]\n\tresolvedEndpoints atomic.Pointer[ResolvedClientRouteList]\n\tupdateTasks       chan updateTask\n\tcloseChan         chan struct{}\n\tcfg               ClientRoutesConfig\n\tpickTLSPorts      bool\n\tinitialized       bool\n}\n\nvar _ AddressTranslatorV2 = (*ClientRoutesHandler)(nil)\n\n// Translate implements old AddressTranslator interface\n// should not be uses since driver prefer AddressTranslatorV2 API if it is implemented\nfunc (p *ClientRoutesHandler) Translate(addr net.IP, port int) (net.IP, int) {\n\tpanic(\"should never be called\")\n}\n\nfunc pickProperPort(pickTLSPorts bool, rec *ResolvedClientRoute) uint16 {\n\tif pickTLSPorts {\n\t\treturn rec.SecureCQLPort\n\t}\n\treturn rec.CQLPort\n}\n\n// TranslateWithHost implements AddressTranslatorV2 interface\nfunc (p *ClientRoutesHandler) TranslateHost(host AddressTranslatorHostInfo, addr AddressPort) (AddressPort, error) {\n\thostID := host.HostID()\n\tif hostID == \"\" {\n\t\treturn addr, nil\n\t}\n\n\tcurrent := p.resolvedEndpoints.Load()\n\trec := current.FindByHostID(hostID)\n\tif rec == nil {\n\t\treturn addr, fmt.Errorf(\"no address found for host %s\", hostID)\n\t}\n\n\tif rec.currentIP != nil {\n\t\tport := pickProperPort(p.pickTLSPorts, rec)\n\t\tif port == 0 {\n\t\t\treturn addr, fmt.Errorf(\"record %s/%s has target port empty\", rec.HostID, rec.ConnectionID)\n\t\t}\n\t\treturn AddressPort{\n\t\t\tAddress: rec.currentIP,\n\t\t\tPort:    port,\n\t\t}, nil\n\t}\n\n\tall, currentIP, err := p.resolver.Resolve(*rec)\n\tif err != nil {\n\t\treturn addr, fmt.Errorf(\"failed to resolve DNS resolver for host %s: %v\", hostID, err)\n\t}\n\trec.allKnownIPs = all\n\trec.currentIP = currentIP\n\n\tfor {\n\t\tupdated := current.Clone()\n\t\tif updated.UpdateIfNewer(*rec) {\n\t\t\tif p.resolvedEndpoints.CompareAndSwap(current, &updated) {\n\t\t\t\tport := pickProperPort(p.pickTLSPorts, rec)\n\t\t\t\tif port == 0 {\n\t\t\t\t\treturn addr, fmt.Errorf(\"record %s/%s has target port empty\", rec.HostID, rec.ConnectionID)\n\t\t\t\t}\n\t\t\t\treturn AddressPort{\n\t\t\t\t\tAddress: rec.currentIP,\n\t\t\t\t\tPort:    port,\n\t\t\t\t}, nil\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\trec = current.FindByHostID(hostID)\n\t\tif rec == nil {\n\t\t\treturn addr, fmt.Errorf(\"no address found for host %s\", hostID)\n\t\t}\n\t\tport := pickProperPort(p.pickTLSPorts, rec)\n\t\tif port == 0 {\n\t\t\treturn addr, fmt.Errorf(\"record %s/%s has target port empty\", rec.HostID, rec.ConnectionID)\n\t\t}\n\t\treturn AddressPort{\n\t\t\tAddress: rec.currentIP,\n\t\t\tPort:    port,\n\t\t}, nil\n\t}\n}\n\nvar never = time.Unix(1<<63-1, 0)\n\ntype updateTask struct {\n\tresult        chan error\n\tconnectionIDs []string\n\thostIDs       []string\n}\n\nfunc (p *ClientRoutesHandler) Initialize(s *Session) error {\n\tif p.initialized {\n\t\treturn errors.New(\"already initialized\")\n\t}\n\tconnectionIDs := make([]string, 0, len(p.cfg.Endpoints))\n\tfor _, ep := range p.cfg.Endpoints {\n\t\tif ep.ConnectionID != \"\" {\n\t\t\tconnectionIDs = append(connectionIDs, ep.ConnectionID)\n\t\t}\n\t}\n\tp.c = s.control\n\tp.sub = s.eventBus.Subscribe(\"port-mux\", 1024, func(event events.Event) bool {\n\t\tswitch event.Type() {\n\t\tcase events.SessionEventTypeControlConnectionRecreated, events.ClusterEventTypeClientRoutesChanged:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t})\n\tp.startUpdateWorker()\n\tp.startReadingEvents()\n\terr := p.updateHostPortMappingSync(connectionIDs, nil)\n\tif err != nil {\n\t\tp.log.Printf(\"error updating host ports: %v\\n\", err)\n\t}\n\treturn nil\n}\n\nfunc (p *ClientRoutesHandler) Stop() {\n\tif p.updateTasks != nil {\n\t\tclose(p.updateTasks)\n\t}\n\tif p.closeChan != nil {\n\t\tclose(p.closeChan)\n\t}\n\tif p.sub != nil {\n\t\tp.sub.Stop()\n\t}\n}\n\n// resolveAndUpdateInPlace updates provided list of resolved endpoint in place\n// If it can't resolve it keeps old record as is.\n// Logic to pick a single address from all available addresses is delegated to ClientRoutesResolver at p.endpointResolver\n// It does not resolve everything, it picks endpoints that are:\n// 1. Marked via forcedResolve=true,\n// 2. Have not resolved previously and have no ip address information\n// 3. Was resolved more than cfg.ResolveHealthyEndpointPeriod ago.\nfunc (p *ClientRoutesHandler) resolveAndUpdateInPlace(records ResolvedClientRouteList) error {\n\tif len(records) == 0 {\n\t\treturn nil\n\t}\n\n\terrs := make([]error, len(records))\n\ttasks := make(chan int, len(records))\n\n\tvar cutoffTimeForHealthy time.Time\n\tif p.cfg.ResolveHealthyEndpointPeriod == 0 {\n\t\tcutoffTimeForHealthy = never\n\t} else {\n\t\tcutoffTimeForHealthy = time.Now().UTC().Add(-p.cfg.ResolveHealthyEndpointPeriod)\n\t}\n\n\tscheduled := false\n\tfor id, endpoint := range records {\n\t\tif endpoint.currentIP == nil || len(endpoint.allKnownIPs) == 0 || endpoint.forcedResolve {\n\t\t\tscheduled = true\n\t\t\ttasks <- id\n\t\t} else if endpoint.updateTime.Before(cutoffTimeForHealthy) {\n\t\t\tscheduled = true\n\t\t\ttasks <- id\n\t\t}\n\t}\n\n\tif !scheduled {\n\t\treturn nil\n\t}\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < p.cfg.MaxResolverConcurrency; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor id := range tasks {\n\t\t\t\tall, currentIP, err := p.resolver.Resolve(records[id])\n\t\t\t\trecords[id].updateTime = time.Now().UTC()\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs[id] = fmt.Errorf(\"resolve %s failed: %w\", records[id].currentIP, err)\n\t\t\t\t\tcontinue\n\t\t\t\t} else if len(all) == 0 {\n\t\t\t\t\terrs[id] = fmt.Errorf(\"resolve %s: no addresses returned\", records[id].currentIP)\n\t\t\t\t} else if currentIP == nil {\n\t\t\t\t\terrs[id] = fmt.Errorf(\"resolve %s: no current addres has been set, should not happen, please report a bug\", records[id].currentIP)\n\t\t\t\t} else {\n\t\t\t\t\t// Reset forcedResolve is it was resolved successfully\n\t\t\t\t\trecords[id].forcedResolve = false\n\t\t\t\t}\n\t\t\t\trecords[id].allKnownIPs = all\n\t\t\t\trecords[id].currentIP = currentIP\n\t\t\t}\n\t\t}()\n\t}\n\n\tclose(tasks)\n\twg.Wait()\n\n\treturn errors.Join(errs...)\n}\n\nfunc (p *ClientRoutesHandler) updateHostPortMappingAsync(connectionIDs []string, hostIDs []string) {\n\tp.updateTasks <- updateTask{\n\t\tconnectionIDs: connectionIDs,\n\t\thostIDs:       hostIDs,\n\t}\n}\n\nfunc (p *ClientRoutesHandler) updateHostPortMappingSync(connectionIDs []string, hostIDs []string) error {\n\tresult := make(chan error, 1)\n\tp.updateTasks <- updateTask{\n\t\tconnectionIDs: connectionIDs,\n\t\thostIDs:       hostIDs,\n\t\tresult:        result,\n\t}\n\treturn <-result\n}\n\nfunc (p *ClientRoutesHandler) startReadingEvents() {\n\tconnectionIDs := p.cfg.Endpoints.GetAllConnectionIDs()\n\n\tgo func() {\n\t\tfor event := range p.sub.Events() {\n\t\t\tswitch evt := event.(type) {\n\t\t\tcase *events.ClientRoutesChangedEvent:\n\t\t\t\tif debug.Enabled {\n\t\t\t\t\tif len(evt.ConnectionIDs) == 0 {\n\t\t\t\t\t\tp.log.Printf(\"got CLIENT_ROUTES_CHANGE event with no connection IDs\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif len(evt.HostIDs) == 0 {\n\t\t\t\t\t\tp.log.Printf(\"got CLIENT_ROUTES_CHANGE event with no host IDs\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tvar newConnectionIDs []string\n\t\t\t\tfor _, connectionID := range evt.ConnectionIDs {\n\t\t\t\t\tif connectionID == \"\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif slices.ContainsFunc(p.cfg.Endpoints, func(ep ClientRoutesEndpoint) bool {\n\t\t\t\t\t\treturn ep.ConnectionID == connectionID\n\t\t\t\t\t}) {\n\t\t\t\t\t\tnewConnectionIDs = append(newConnectionIDs, connectionID)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(newConnectionIDs) != 0 {\n\t\t\t\t\tp.updateHostPortMappingAsync(newConnectionIDs, evt.HostIDs)\n\t\t\t\t}\n\t\t\tcase *events.ControlConnectionRecreatedEvent:\n\t\t\t\tp.updateHostPortMappingAsync(connectionIDs, nil)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (p *ClientRoutesHandler) startUpdateWorker() {\n\tgo func() {\n\t\tfor task := range p.updateTasks {\n\t\t\terr := p.updateHostPortMapping(task.connectionIDs, task.hostIDs)\n\t\t\tif err != nil {\n\t\t\t\tif debug.Enabled {\n\t\t\t\t\tp.log.Printf(\"failed to update host port mapping: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif task.result != nil {\n\t\t\t\ttask.result <- err\n\t\t\t\tclose(task.result)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (p *ClientRoutesHandler) updateHostPortMapping(connectionIDs []string, hostIDs []string) error {\n\tunresolved, err := getHostPortMappingFromCluster(p.c, p.cfg.TableName, connectionIDs, hostIDs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcurrent := p.resolvedEndpoints.Load()\n\tupdated := slices.Clone(*current)\n\tupdated.MergeWithUnresolved(unresolved)\n\terr = p.resolveAndUpdateInPlace(updated)\n\tif err != nil {\n\t\tp.log.Printf(\"failed to resolve endpoints: %v\", err)\n\t\t// Despite an error it is better to save results, it should not corrupt existing and resolved records\n\t}\n\n\t// Try to update until it successes\n\t// 10 times is more than enough, if it fails\n\tfor range 10 {\n\t\tif p.resolvedEndpoints.CompareAndSwap(current, &updated) {\n\t\t\treturn nil\n\t\t}\n\n\t\tcurrent = p.resolvedEndpoints.Load()\n\t\tupdated.MergeWithResolved(current)\n\t}\n\tp.log.Printf(\"failed to update host port mapping due to collisions\")\n\n\treturn nil\n}\n\nfunc NewClientRoutesAddressTranslator(\n\tcfg ClientRoutesConfig,\n\tresolver DNSResolver,\n\tpickTLSPorts bool,\n\tlog StdLogger,\n) *ClientRoutesHandler {\n\tres := &ClientRoutesHandler{\n\t\tcfg:          cfg,\n\t\tlog:          log,\n\t\tpickTLSPorts: pickTLSPorts,\n\t\tcloseChan:    make(chan struct{}),\n\t\tupdateTasks:  make(chan updateTask, 1024),\n\t\tresolver:     newSimpleClientRoutesResolver(cfg.ResolverCacheDuration, resolver),\n\t}\n\tres.resolvedEndpoints.Store(&ResolvedClientRouteList{})\n\treturn res\n}\n\nvar _ AddressTranslator = &ClientRoutesHandler{}\n\nfunc getHostPortMappingFromCluster(c controlConnection, table string, connectionIDs []string, hostIDs []string) (UnresolvedClientRouteList, error) {\n\tvar res UnresolvedClientRouteList\n\n\tstmt := []string{fmt.Sprintf(\"select connection_id, host_id, address, port, tls_port from %s\", table)}\n\tvar bounds []any\n\tif len(connectionIDs) != 0 {\n\t\tvar inClause []string\n\t\tfor _, connectionID := range connectionIDs {\n\t\t\tbounds = append(bounds, connectionID)\n\t\t\tinClause = append(inClause, \"?\")\n\t\t}\n\t\tif len(stmt) == 1 {\n\t\t\tstmt = append(stmt, \"where\")\n\t\t}\n\t\tstmt = append(stmt, fmt.Sprintf(\"connection_id in (%s)\", strings.Join(inClause, \",\")))\n\t}\n\n\tif len(hostIDs) != 0 {\n\t\tvar inClause []string\n\t\tfor _, hostID := range hostIDs {\n\t\t\tbounds = append(bounds, hostID)\n\t\t\tinClause = append(inClause, \"?\")\n\t\t}\n\t\tif len(stmt) == 1 {\n\t\t\tstmt = append(stmt, \"where\")\n\t\t} else {\n\t\t\tstmt = append(stmt, \"and\")\n\t\t}\n\t\tstmt = append(stmt, fmt.Sprintf(\"host_id in (%s)\", strings.Join(inClause, \",\")))\n\t}\n\n\tisFullScan := len(hostIDs) == 0 || len(connectionIDs) == 0\n\tif isFullScan {\n\t\tstmt = append(stmt, \"allow filtering\")\n\t}\n\n\titer := c.query(strings.Join(stmt, \" \"), bounds...)\n\tvar rec UnresolvedClientRoute\n\tfor iter.Scan(&rec.ConnectionID, &rec.HostID, &rec.Address, &rec.CQLPort, &rec.SecureCQLPort) {\n\t\tres = append(res, rec)\n\t}\n\tif err := iter.Close(); err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading %s table: %v\", table, err)\n\t}\n\treturn res, nil\n}\n"
  },
  {
    "path": "client_routes_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sort\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n)\n\nfunc TestGetHostPortMapping(t *testing.T) {\n\tt.Parallel()\n\n\tkeyspace := testKeyspaceName(t)\n\tcluster := createCluster()\n\tcreateKeyspace(t, cluster, keyspace, true)\n\n\tcluster.Keyspace = keyspace\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create session: %v\", err)\n\t}\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\tqualifiedTable := keyspace + \".\" + table\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE %s.%s (\n    connection_id uuid,\n    host_id uuid,\n    Address text,\n    port int,\n    tls_port int,\n    alternator_port int,\n    alternator_https_port int,\n    Datacenter text,\n    Rack text,\n    PRIMARY KEY (connection_id, host_id))`, keyspace, table)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar hostIDs []string\n\tfor i := 0; i < 3; i++ {\n\t\thostIDs = append(hostIDs, MustRandomUUID().String())\n\t}\n\tvar connectionIDs []string\n\tfor i := 0; i < 3; i++ {\n\t\tconnectionIDs = append(connectionIDs, MustRandomUUID().String())\n\t}\n\n\tracks := []string{\"rack1\", \"rack2\", \"rack3\"}\n\texpected := []UnresolvedClientRoute{}\n\tfor id, hostID := range hostIDs {\n\t\track := racks[id]\n\t\tip := net.ParseIP(fmt.Sprintf(\"127.0.0.%d\", id+1))\n\t\tfor _, connectionID := range connectionIDs {\n\t\t\terr := session.Query(\n\t\t\t\tfmt.Sprintf(`INSERT INTO %s (\n                                            connection_id, host_id, Address, port, tls_port, alternator_port, alternator_https_port, Datacenter, Rack) \n\t\t\t\t\t\tVALUES (?, ?, ?, 9042, 9142, 0, 0, 'dc1', ?);`, qualifiedTable),\n\t\t\t\tconnectionID, hostID, ip.String(), rack,\n\t\t\t).Exec()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unable to insert connection metadata: %s\", err.Error())\n\t\t\t}\n\t\t\texpected = append(expected, UnresolvedClientRoute{\n\t\t\t\tConnectionID:  connectionID,\n\t\t\t\tHostID:        hostID,\n\t\t\t\tAddress:       ip.String(),\n\t\t\t\tCQLPort:       9042,\n\t\t\t\tSecureCQLPort: 9142,\n\t\t\t})\n\t\t}\n\t}\n\n\tsortUnresolvedHostPorts(expected)\n\n\ttcases := []struct {\n\t\tname     string\n\t\tmethod   func(controlConnection) ([]UnresolvedClientRoute, error)\n\t\texpected []UnresolvedClientRoute\n\t}{\n\t\t{\n\t\t\tname: \"get-all\",\n\t\t\tmethod: func(controlConnection) ([]UnresolvedClientRoute, error) {\n\t\t\t\treturn getHostPortMappingFromCluster(session.control, qualifiedTable, nil, nil)\n\t\t\t},\n\t\t\texpected: expected,\n\t\t},\n\t\t{\n\t\t\tname: \"get-all-hosts\",\n\t\t\tmethod: func(controlConnection) ([]UnresolvedClientRoute, error) {\n\t\t\t\treturn getHostPortMappingFromCluster(session.control, qualifiedTable, connectionIDs, nil)\n\t\t\t},\n\t\t\texpected: expected,\n\t\t},\n\t\t{\n\t\t\tname: \"get-all-connections\",\n\t\t\tmethod: func(controlConnection) ([]UnresolvedClientRoute, error) {\n\t\t\t\treturn getHostPortMappingFromCluster(session.control, qualifiedTable, nil, hostIDs)\n\t\t\t},\n\t\t\texpected: expected,\n\t\t},\n\t\t{\n\t\t\tname: \"get-concrete\",\n\t\t\tmethod: func(controlConnection) ([]UnresolvedClientRoute, error) {\n\t\t\t\treturn getHostPortMappingFromCluster(session.control, qualifiedTable, connectionIDs, hostIDs)\n\t\t\t},\n\t\t\texpected: expected,\n\t\t},\n\t\t{\n\t\t\tname: \"get-concrete-host\",\n\t\t\tmethod: func(controlConnection) ([]UnresolvedClientRoute, error) {\n\t\t\t\treturn getHostPortMappingFromCluster(session.control, qualifiedTable, connectionIDs, hostIDs)\n\t\t\t},\n\t\t\texpected: expected,\n\t\t},\n\t}\n\n\tfor _, tc := range tcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tgot, err := tc.method(session.control)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tsortUnresolvedHostPorts(got)\n\n\t\t\tif diff := cmp.Diff(got, tc.expected); diff != \"\" {\n\t\t\t\tt.Errorf(\"got unexpected result: %s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc sortUnresolvedHostPorts(xs []UnresolvedClientRoute) {\n\tsort.Slice(xs, func(i, j int) bool {\n\t\ta, b := xs[i], xs[j]\n\n\t\tif a.ConnectionID != b.ConnectionID {\n\t\t\treturn a.ConnectionID < b.ConnectionID // or bytes.Compare if raw [16]byte\n\t\t}\n\t\treturn a.HostID < b.HostID\n\t})\n}\n"
  },
  {
    "path": "client_routes_unit_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage gocql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype dnsResolverFunc func(string) ([]net.IP, error)\n\n// LookupIP implements DNSResolver for dnsResolverFunc.\nfunc (f dnsResolverFunc) LookupIP(host string) ([]net.IP, error) { return f(host) }\n\ntype clientRoutesResolverFunc func(endpoint ResolvedClientRoute) ([]net.IP, net.IP, error)\n\nfunc (f clientRoutesResolverFunc) Resolve(endpoint ResolvedClientRoute) ([]net.IP, net.IP, error) {\n\treturn f(endpoint)\n}\n\ntype fakeControlConn struct {\n\tstatement string\n\tvalues    []any\n}\n\nfunc (f *fakeControlConn) getConn() *connHost          { return nil }\nfunc (f *fakeControlConn) awaitSchemaAgreement() error { return nil }\nfunc (f *fakeControlConn) query(statement string, values ...any) *Iter {\n\tf.statement = statement\n\tf.values = values\n\treturn &Iter{}\n}\nfunc (f *fakeControlConn) querySystem(statement string, values ...any) *Iter {\n\treturn &Iter{}\n}\nfunc (f *fakeControlConn) discoverProtocol(hosts []*HostInfo) (int, error) { return 0, nil }\nfunc (f *fakeControlConn) connect(hosts []*HostInfo) error                 { return nil }\nfunc (f *fakeControlConn) close()                                          {}\nfunc (f *fakeControlConn) getSession() *Session                            { return nil }\nfunc (f *fakeControlConn) reconnect() error                                { return nil }\n\ntype testHostInfo struct {\n\thostID string\n}\n\nfunc (t testHostInfo) HostID() string                     { return t.hostID }\nfunc (t testHostInfo) Rack() string                       { return \"\" }\nfunc (t testHostInfo) DataCenter() string                 { return \"\" }\nfunc (t testHostInfo) BroadcastAddress() net.IP           { return nil }\nfunc (t testHostInfo) ListenAddress() net.IP              { return nil }\nfunc (t testHostInfo) RPCAddress() net.IP                 { return nil }\nfunc (t testHostInfo) PreferredIP() net.IP                { return nil }\nfunc (t testHostInfo) Peer() net.IP                       { return nil }\nfunc (t testHostInfo) UntranslatedConnectAddress() net.IP { return nil }\nfunc (t testHostInfo) Port() int                          { return 0 }\nfunc (t testHostInfo) Partitioner() string                { return \"\" }\nfunc (t testHostInfo) ClusterName() string                { return \"\" }\nfunc (t testHostInfo) ScyllaShardAwarePort() uint16       { return 0 }\nfunc (t testHostInfo) ScyllaShardAwarePortTLS() uint16    { return 0 }\nfunc (t testHostInfo) ScyllaShardCount() int              { return 0 }\n\nfunc TestResolvedClientRouteCloneNewerNeedsUpdate(t *testing.T) {\n\tip1 := net.ParseIP(\"127.0.0.1\")\n\tip2 := net.ParseIP(\"127.0.0.2\")\n\tbase := ResolvedClientRoute{\n\t\tUnresolvedClientRoute: UnresolvedClientRoute{\n\t\t\tConnectionID: \"c1\",\n\t\t\tHostID:       \"h1\",\n\t\t\tAddress:      \"host\",\n\t\t\tCQLPort:      9042,\n\t\t},\n\t\tallKnownIPs: []net.IP{ip1},\n\t\tcurrentIP:   ip1,\n\t\tupdateTime:  time.Unix(10, 0),\n\t}\n\n\tclone := base.Clone()\n\tclone.allKnownIPs[0][0] = 8\n\tclone.currentIP[0] = 9\n\n\tif base.allKnownIPs[0][0] == 8 {\n\t\tt.Fatalf(\"Clone should not share allKnownIPs slices\")\n\t}\n\tif base.currentIP[0] == 9 {\n\t\tt.Fatalf(\"Clone should not share currentIP slices\")\n\t}\n\n\tnewerIP := ResolvedClientRoute{currentIP: ip2}\n\tif !(ResolvedClientRoute{}).Newer(newerIP) {\n\t\tt.Fatalf(\"expected Newer to prefer non-nil currentIP\")\n\t}\n\n\tnewerTime := ResolvedClientRoute{updateTime: time.Unix(20, 0)}\n\tif !base.Newer(newerTime) {\n\t\tt.Fatalf(\"expected Newer to prefer newer updateTime\")\n\t}\n\n\tif !(ResolvedClientRoute{currentIP: nil}).NeedsUpdate() {\n\t\tt.Fatalf(\"expected NeedsUpdate for missing currentIP\")\n\t}\n\tif !(ResolvedClientRoute{currentIP: ip1}).NeedsUpdate() {\n\t\tt.Fatalf(\"expected NeedsUpdate for missing allKnownIPs\")\n\t}\n\tif !(ResolvedClientRoute{currentIP: ip1, allKnownIPs: []net.IP{ip1}, forcedResolve: true}).NeedsUpdate() {\n\t\tt.Fatalf(\"expected NeedsUpdate when forcedResolve is set\")\n\t}\n\tif (ResolvedClientRoute{currentIP: ip1, allKnownIPs: []net.IP{ip1}}).NeedsUpdate() {\n\t\tt.Fatalf(\"did not expect NeedsUpdate for fully resolved route\")\n\t}\n}\n\nfunc TestResolvedClientRouteListMergeWithUnresolved(t *testing.T) {\n\tlist := ResolvedClientRouteList{\n\t\t{\n\t\t\tUnresolvedClientRoute: UnresolvedClientRoute{\n\t\t\t\tConnectionID: \"c1\",\n\t\t\t\tHostID:       \"h1\",\n\t\t\t\tAddress:      \"a1\",\n\t\t\t\tCQLPort:      9042,\n\t\t\t},\n\t\t\tforcedResolve: false,\n\t\t},\n\t}\n\n\tlist.MergeWithUnresolved(UnresolvedClientRouteList{\n\t\t{\n\t\t\tConnectionID: \"c1\",\n\t\t\tHostID:       \"h1\",\n\t\t\tAddress:      \"a1\",\n\t\t\tCQLPort:      9042,\n\t\t},\n\t})\n\tif len(list) != 1 || list[0].forcedResolve {\n\t\tt.Fatalf(\"expected unchanged record when unresolved is equal\")\n\t}\n\n\tlist.MergeWithUnresolved(UnresolvedClientRouteList{\n\t\t{\n\t\t\tConnectionID: \"c1\",\n\t\t\tHostID:       \"h1\",\n\t\t\tAddress:      \"a2\",\n\t\t\tCQLPort:      9043,\n\t\t},\n\t})\n\tif list[0].Address != \"a2\" || list[0].CQLPort != 9043 || !list[0].forcedResolve {\n\t\tt.Fatalf(\"expected record to update and force resolve\")\n\t}\n\n\tlist = ResolvedClientRouteList{}\n\tlist.MergeWithUnresolved(UnresolvedClientRouteList{\n\t\t{\n\t\t\tConnectionID: \"c2\",\n\t\t\tHostID:       \"h2\",\n\t\t\tAddress:      \"a3\",\n\t\t\tCQLPort:      9044,\n\t\t},\n\t})\n\tif len(list) != 1 || !list[0].forcedResolve {\n\t\tt.Fatalf(\"expected new record to be appended with forcedResolve\")\n\t}\n}\n\nfunc TestResolvedClientRouteListMergeWithResolved(t *testing.T) {\n\tolder := ResolvedClientRoute{\n\t\tUnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c1\", HostID: \"h1\"},\n\t\tupdateTime:            time.Unix(10, 0),\n\t}\n\tnewer := ResolvedClientRoute{\n\t\tUnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c1\", HostID: \"h1\"},\n\t\tupdateTime:            time.Unix(20, 0),\n\t\tcurrentIP:             net.ParseIP(\"10.0.0.1\"),\n\t}\n\n\tlist := ResolvedClientRouteList{older}\n\tother := ResolvedClientRouteList{newer, {UnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c2\", HostID: \"h2\"}}}\n\tlist.MergeWithResolved(&other)\n\n\tif list[0].updateTime != newer.updateTime || list[0].currentIP == nil {\n\t\tt.Fatalf(\"expected newer record to replace older one\")\n\t}\n\tif len(list) != 2 {\n\t\tt.Fatalf(\"expected new record to be appended\")\n\t}\n\n\tlist = ResolvedClientRouteList{newer}\n\tstale := ResolvedClientRouteList{older}\n\tlist.MergeWithResolved(&stale)\n\tif list[0].updateTime != newer.updateTime {\n\t\tt.Fatalf(\"expected newer record to be preserved when other is stale\")\n\t}\n}\n\nfunc TestResolvedClientRouteListUpdateIfNewerAndFindByHostID(t *testing.T) {\n\tlist := ResolvedClientRouteList{{\n\t\tUnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c1\", HostID: \"h1\"},\n\t\tupdateTime:            time.Unix(10, 0),\n\t}}\n\n\tolder := ResolvedClientRoute{UnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c1\", HostID: \"h1\"}, updateTime: time.Unix(5, 0)}\n\tif list.UpdateIfNewer(older) {\n\t\tt.Fatalf(\"expected UpdateIfNewer to ignore older record\")\n\t}\n\n\tnewer := ResolvedClientRoute{UnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c1\", HostID: \"h1\"}, updateTime: time.Unix(15, 0)}\n\tif !list.UpdateIfNewer(newer) {\n\t\tt.Fatalf(\"expected UpdateIfNewer to accept newer record\")\n\t}\n\n\trec := list.FindByHostID(\"h1\")\n\tif rec == nil {\n\t\tt.Fatalf(\"expected FindByHostID to locate record\")\n\t}\n\trec.ConnectionID = \"updated\"\n\tif list[0].ConnectionID != \"updated\" {\n\t\tt.Fatalf(\"expected FindByHostID to return pointer to list element\")\n\t}\n}\n\nfunc TestSimpleClientRoutesResolverResolve(t *testing.T) {\n\tcalls := 0\n\tresolver := dnsResolverFunc(func(host string) ([]net.IP, error) {\n\t\tcalls++\n\t\treturn []net.IP{net.ParseIP(\"10.0.0.1\"), net.ParseIP(\"10.0.0.2\")}, nil\n\t})\n\n\tres := newSimpleClientRoutesResolver(time.Hour, resolver)\n\tendpoint := ResolvedClientRoute{\n\t\tUnresolvedClientRoute: UnresolvedClientRoute{Address: \"example\"},\n\t\tcurrentIP:             net.ParseIP(\"10.0.0.2\"),\n\t}\n\n\tall, current, err := res.Resolve(endpoint)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif calls != 1 {\n\t\tt.Fatalf(\"expected resolver to be called once, got %d\", calls)\n\t}\n\tif current == nil || !current.Equal(endpoint.currentIP) {\n\t\tt.Fatalf(\"expected currentIP to be preserved when present\")\n\t}\n\tif len(all) != 2 {\n\t\tt.Fatalf(\"expected allKnownIPs to be returned\")\n\t}\n\n\t_, _, err = res.Resolve(endpoint)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error from cached resolve: %v\", err)\n\t}\n\tif calls != 1 {\n\t\tt.Fatalf(\"expected cached resolve to avoid LookupIP, got %d\", calls)\n\t}\n\n\tresolveErr := errors.New(\"resolve failed\")\n\terrorResolver := dnsResolverFunc(func(host string) ([]net.IP, error) {\n\t\treturn nil, resolveErr\n\t})\n\terrorRes := newSimpleClientRoutesResolver(0, errorResolver)\n\tendpoint.allKnownIPs = []net.IP{net.ParseIP(\"10.0.0.9\")}\n\tall, current, err = errorRes.Resolve(endpoint)\n\tif !errors.Is(err, resolveErr) {\n\t\tt.Fatalf(\"expected resolver error to propagate\")\n\t}\n\tif len(all) != 1 || current == nil || !current.Equal(endpoint.currentIP) {\n\t\tt.Fatalf(\"expected existing values to be returned on error\")\n\t}\n\n\temptyResolver := dnsResolverFunc(func(host string) ([]net.IP, error) {\n\t\treturn []net.IP{}, nil\n\t})\n\temptyRes := newSimpleClientRoutesResolver(0, emptyResolver)\n\t_, _, err = emptyRes.Resolve(ResolvedClientRoute{UnresolvedClientRoute: UnresolvedClientRoute{Address: \"example\"}})\n\tif err == nil {\n\t\tt.Fatalf(\"expected error when resolver returns empty list\")\n\t}\n}\n\nfunc TestClientRoutesHandlerTranslateHost(t *testing.T) {\n\taddr := AddressPort{Address: net.ParseIP(\"1.1.1.1\"), Port: 9042}\n\tnoHost := testHostInfo{hostID: \"\"}\n\tmissingHost := testHostInfo{hostID: \"missing\"}\n\n\thandler := &ClientRoutesHandler{}\n\thandler.resolvedEndpoints.Store(&ResolvedClientRouteList{})\n\n\tres, err := handler.TranslateHost(noHost, addr)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error for empty hostID: %v\", err)\n\t}\n\tif !res.Equal(addr) {\n\t\tt.Fatalf(\"expected address to pass through when hostID is empty\")\n\t}\n\n\t_, err = handler.TranslateHost(missingHost, addr)\n\tif err == nil {\n\t\tt.Fatalf(\"expected error for missing host entry\")\n\t}\n\n\tresolvedList := ResolvedClientRouteList{\n\t\t{\n\t\t\tUnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c1\", HostID: \"h1\", CQLPort: 9042, SecureCQLPort: 9142},\n\t\t\tcurrentIP:             net.ParseIP(\"10.0.0.1\"),\n\t\t},\n\t}\n\n\thandler.pickTLSPorts = false\n\thandler.resolvedEndpoints.Store(&resolvedList)\n\tres, err = handler.TranslateHost(testHostInfo{hostID: \"h1\"}, addr)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif res.Port != 9042 {\n\t\tt.Fatalf(\"expected non-TLS port, got %d\", res.Port)\n\t}\n\n\thandler.pickTLSPorts = true\n\tres, err = handler.TranslateHost(testHostInfo{hostID: \"h1\"}, addr)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif res.Port != 9142 {\n\t\tt.Fatalf(\"expected TLS port, got %d\", res.Port)\n\t}\n\n\terrorHandler := &ClientRoutesHandler{\n\t\tresolver: clientRoutesResolverFunc(func(endpoint ResolvedClientRoute) ([]net.IP, net.IP, error) {\n\t\t\treturn nil, nil, errors.New(\"lookup failed\")\n\t\t}),\n\t}\n\terrorList := ResolvedClientRouteList{{UnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c2\", HostID: \"h2\", Address: \"host\"}}}\n\terrorHandler.resolvedEndpoints.Store(&errorList)\n\t_, err = errorHandler.TranslateHost(testHostInfo{hostID: \"h2\"}, addr)\n\tif err == nil {\n\t\tt.Fatalf(\"expected resolver error to bubble up\")\n\t}\n}\n\nfunc TestClientRoutesHandlerTranslateHost_CASCollision(t *testing.T) {\n\taddr := AddressPort{Address: net.ParseIP(\"1.1.1.1\"), Port: 9042}\n\tresolverStarted := make(chan struct{})\n\treleaseResolver := make(chan struct{})\n\tresolver := clientRoutesResolverFunc(func(endpoint ResolvedClientRoute) ([]net.IP, net.IP, error) {\n\t\tclose(resolverStarted)\n\t\t<-releaseResolver\n\t\tip := net.ParseIP(\"10.0.0.1\")\n\t\treturn []net.IP{ip}, ip, nil\n\t})\n\n\thandler := &ClientRoutesHandler{resolver: resolver, pickTLSPorts: false}\n\torigList := ResolvedClientRouteList{{UnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c1\", HostID: \"h1\", Address: \"host\", CQLPort: 9042}}}\n\thandler.resolvedEndpoints.Store(&origList)\n\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\t_, err := handler.TranslateHost(testHostInfo{hostID: \"h1\"}, addr)\n\t\tdone <- err\n\t}()\n\n\t<-resolverStarted\n\taltList := ResolvedClientRouteList{}\n\thandler.resolvedEndpoints.Store(&altList)\n\tclose(releaseResolver)\n\ttime.Sleep(10 * time.Millisecond)\n\thandler.resolvedEndpoints.Store(&origList)\n\n\tselect {\n\tcase err := <-done:\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error after CAS collision: %v\", err)\n\t\t}\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"TranslateHost timed out after CAS collision\")\n\t}\n}\n\nfunc TestClientRoutesHandlerResolveAndUpdateInPlace(t *testing.T) {\n\tvar inFlight int32\n\tvar maxInFlight int32\n\tcalled := make(chan string, 4)\n\tresolveErr := errors.New(\"resolve error\")\n\n\tresolver := clientRoutesResolverFunc(func(endpoint ResolvedClientRoute) ([]net.IP, net.IP, error) {\n\t\tcurr := atomic.AddInt32(&inFlight, 1)\n\t\tfor {\n\t\t\tprev := atomic.LoadInt32(&maxInFlight)\n\t\t\tif curr > prev && atomic.CompareAndSwapInt32(&maxInFlight, prev, curr) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif curr <= prev {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tdefer atomic.AddInt32(&inFlight, -1)\n\n\t\tcalled <- endpoint.Address\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\tif endpoint.Address == \"err\" {\n\t\t\treturn nil, nil, resolveErr\n\t\t}\n\t\tip := net.ParseIP(\"10.0.0.1\")\n\t\treturn []net.IP{ip}, ip, nil\n\t})\n\n\thandler := &ClientRoutesHandler{\n\t\tresolver: resolver,\n\t\tcfg: ClientRoutesConfig{\n\t\t\tMaxResolverConcurrency:       2,\n\t\t\tResolveHealthyEndpointPeriod: time.Hour,\n\t\t},\n\t}\n\n\tnow := time.Now().UTC()\n\tip := net.ParseIP(\"10.0.0.2\")\n\trecords := ResolvedClientRouteList{\n\t\t{\n\t\t\tUnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c1\", HostID: \"h1\", Address: \"healthy\"},\n\t\t\tcurrentIP:             ip,\n\t\t\tallKnownIPs:           []net.IP{ip},\n\t\t\tupdateTime:            now,\n\t\t},\n\t\t{\n\t\t\tUnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c2\", HostID: \"h2\", Address: \"forced\"},\n\t\t\tforcedResolve:         true,\n\t\t},\n\t\t{\n\t\t\tUnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c3\", HostID: \"h3\", Address: \"empty\"},\n\t\t},\n\t\t{\n\t\t\tUnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c4\", HostID: \"h4\", Address: \"stale\"},\n\t\t\tcurrentIP:             ip,\n\t\t\tallKnownIPs:           []net.IP{ip},\n\t\t\tupdateTime:            now.Add(-2 * time.Hour),\n\t\t},\n\t\t{\n\t\t\tUnresolvedClientRoute: UnresolvedClientRoute{ConnectionID: \"c5\", HostID: \"h5\", Address: \"err\"},\n\t\t},\n\t}\n\n\terr := handler.resolveAndUpdateInPlace(records)\n\tif err == nil || !errors.Is(err, resolveErr) {\n\t\tt.Fatalf(\"expected aggregated error to include resolver error\")\n\t}\n\tclose(called)\n\n\tcalledMap := map[string]bool{}\n\tfor addr := range called {\n\t\tcalledMap[addr] = true\n\t}\n\n\tif calledMap[\"healthy\"] {\n\t\tt.Fatalf(\"did not expect healthy endpoint to be resolved\")\n\t}\n\tfor _, addr := range []string{\"forced\", \"empty\", \"stale\", \"err\"} {\n\t\tif !calledMap[addr] {\n\t\t\tt.Fatalf(\"expected resolver to be called for %s\", addr)\n\t\t}\n\t}\n\n\tif atomic.LoadInt32(&maxInFlight) > int32(handler.cfg.MaxResolverConcurrency) {\n\t\tt.Fatalf(\"expected max concurrency <= %d, got %d\", handler.cfg.MaxResolverConcurrency, maxInFlight)\n\t}\n\n\tif records[1].currentIP == nil || len(records[1].allKnownIPs) == 0 || records[1].forcedResolve {\n\t\tt.Fatalf(\"expected forced endpoint to be resolved and forcedResolve cleared\")\n\t}\n}\n\nfunc TestGetHostPortMappingFromClusterQuery(t *testing.T) {\n\ttcases := []struct {\n\t\tname          string\n\t\tconnectionIDs []string\n\t\thostIDs       []string\n\t\texpectedStmt  string\n\t\texpectedVals  []any\n\t}{\n\t\t{\n\t\t\tname:         \"all\",\n\t\t\texpectedStmt: \"select connection_id, host_id, address, port, tls_port from system.client_routes allow filtering\",\n\t\t},\n\t\t{\n\t\t\tname:          \"connections-only\",\n\t\t\tconnectionIDs: []string{\"c1\", \"c2\"},\n\t\t\texpectedStmt:  \"select connection_id, host_id, address, port, tls_port from system.client_routes where connection_id in (?,?) allow filtering\",\n\t\t\texpectedVals:  []any{\"c1\", \"c2\"},\n\t\t},\n\t\t{\n\t\t\tname:         \"hosts-only\",\n\t\t\thostIDs:      []string{\"h1\"},\n\t\t\texpectedStmt: \"select connection_id, host_id, address, port, tls_port from system.client_routes where host_id in (?) allow filtering\",\n\t\t\texpectedVals: []any{\"h1\"},\n\t\t},\n\t\t{\n\t\t\tname:          \"connections-and-hosts\",\n\t\t\tconnectionIDs: []string{\"c1\"},\n\t\t\thostIDs:       []string{\"h1\", \"h2\"},\n\t\t\texpectedStmt:  \"select connection_id, host_id, address, port, tls_port from system.client_routes where connection_id in (?) and host_id in (?,?)\",\n\t\t\texpectedVals:  []any{\"c1\", \"h1\", \"h2\"},\n\t\t},\n\t\t{\n\t\t\tname:          \"empty-slices\",\n\t\t\tconnectionIDs: []string{},\n\t\t\thostIDs:       []string{},\n\t\t\texpectedStmt:  \"select connection_id, host_id, address, port, tls_port from system.client_routes allow filtering\",\n\t\t},\n\t}\n\n\tfor _, tc := range tcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tctrl := &fakeControlConn{}\n\t\t\t_, err := getHostPortMappingFromCluster(ctrl, \"system.client_routes\", tc.connectionIDs, tc.hostIDs)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t\t}\n\t\t\tif ctrl.statement != tc.expectedStmt {\n\t\t\t\tt.Fatalf(\"statement mismatch: got %q want %q\", ctrl.statement, tc.expectedStmt)\n\t\t\t}\n\t\t\tif fmt.Sprint(ctrl.values) != fmt.Sprint(tc.expectedVals) {\n\t\t\t\tt.Fatalf(\"values mismatch: got %v want %v\", ctrl.values, tc.expectedVals)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cloud_cluster_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"sigs.k8s.io/yaml\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/scyllacloud\"\n)\n\nfunc TestCloudConnection(t *testing.T) {\n\tt.Parallel()\n\n\tif !*gocql.FlagRunSslTest {\n\t\tt.Skip(\"Skipping because SSL is not enabled on cluster\")\n\t}\n\n\tif *gocql.FlagDistribution != \"scylla\" {\n\t\tt.Skip(\"Skipping because it is designed for scylla, but running on something else\")\n\t}\n\n\tconst (\n\t\tsslPort        = 9142\n\t\tdatacenterName = \"datacenter1\"\n\t)\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\thosts := map[string]string{}\n\n\tcluster := gocql.CreateCluster(func(config *gocql.ClusterConfig) {\n\t\tconfig.Port = sslPort\n\t})\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar localAddress string\n\tvar localHostID gocql.UUID\n\tscanner := session.Query(\"SELECT broadcast_address, host_id FROM system.local WHERE key='local'\").Iter().Scanner()\n\tif scanner.Next() {\n\t\tif err := scanner.Scan(&localAddress, &localHostID); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\thosts[localHostID.String()] = net.JoinHostPort(localAddress, fmt.Sprintf(\"%d\", sslPort))\n\t}\n\n\tvar peerAddress string\n\tvar peerHostID gocql.UUID\n\tscanner = session.Query(\"SELECT peer, host_id FROM system.peers\").Iter().Scanner()\n\tfor scanner.Next() {\n\t\tif err := scanner.Scan(&peerAddress, &peerHostID); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\thosts[peerHostID.String()] = net.JoinHostPort(peerAddress, fmt.Sprintf(\"%d\", sslPort))\n\t}\n\n\tsession.Close()\n\n\tlogger := gocql.TestLogger\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tos.Stdout.WriteString(logger.String())\n\t\t}\n\t}()\n\n\tproxy := &sniProxy{\n\t\thosts:          hosts,\n\t\tdefaultBackend: net.JoinHostPort(localAddress, fmt.Sprintf(\"%d\", sslPort)),\n\t\tlogger:         logger,\n\t}\n\n\tproxyAddress, err := proxy.Run(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer proxy.Close()\n\n\tcc := &scyllacloud.ConnectionConfig{\n\t\tDatacenters: map[string]*scyllacloud.Datacenter{\n\t\t\tdatacenterName: {\n\t\t\t\tCertificateAuthorityPath: \"testdata/pki/ca.crt\",\n\t\t\t\tServer:                   proxyAddress,\n\t\t\t\tTLSServerName:            \"any\",\n\t\t\t\tNodeDomain:               \"cloud.scylladb.com\",\n\t\t\t\tInsecureSkipTLSVerify:    true,\n\t\t\t},\n\t\t},\n\t\tAuthInfos: map[string]*scyllacloud.AuthInfo{\n\t\t\t\"ai-1\": {\n\t\t\t\tUsername:              \"username\",\n\t\t\t\tPassword:              \"password\",\n\t\t\t\tClientKeyPath:         \"testdata/pki/gocql.key\",\n\t\t\t\tClientCertificatePath: \"testdata/pki/gocql.crt\",\n\t\t\t},\n\t\t},\n\t\tContexts: map[string]*scyllacloud.Context{\n\t\t\t\"default-context\": {\n\t\t\t\tAuthInfoName:   \"ai-1\",\n\t\t\t\tDatacenterName: datacenterName,\n\t\t\t},\n\t\t},\n\t\tCurrentContext: \"default-context\",\n\t}\n\n\tconfigPath, err := writeYamlToTempFile(cc)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(configPath)\n\n\tcluster, err = scyllacloud.NewCloudCluster(configPath)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Forward connections directed to node domain to our test sni proxy.\n\tcluster.Dialer = dialerContextFunc(func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\tif strings.Contains(addr, cc.Datacenters[datacenterName].NodeDomain) {\n\t\t\taddr = cc.Datacenters[datacenterName].Server\n\t\t}\n\t\treturn net.Dial(network, addr)\n\t})\n\n\tsession, err = cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := gocql.WaitUntilPoolsStopFilling(ctx, session, 10*time.Second); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tringHosts := gocql.GetRingAllHosts(session)\n\tif len(ringHosts) != len(hosts) {\n\t\tt.Errorf(\"expected %d hosts in ring, got %d\", len(hosts), len(ringHosts))\n\t}\n\n\tsnisCount := map[string]int{}\n\tevents := proxy.GetEvents()\n\tfor _, event := range events {\n\t\tsnisCount[event]++\n\t}\n\n\tfor hostID := range hosts {\n\t\tsni := fmt.Sprintf(\"%s.%s\", hostID, cc.Datacenters[datacenterName].NodeDomain)\n\t\tcount, ok := snisCount[sni]\n\t\tif !ok {\n\t\t\tt.Errorf(\"not found connection to host %q\", hostID)\n\t\t}\n\t\tif count != cluster.NumConns {\n\t\t\tt.Errorf(\"expected %d connections to host %q, got %d\", cluster.NumConns, sni, count)\n\t\t}\n\t}\n}\n\nfunc writeYamlToTempFile(obj any) (string, error) {\n\tf, err := os.CreateTemp(os.TempDir(), \"gocql-cloud\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"create temp file: %w\", err)\n\t}\n\tif err := f.Close(); err != nil {\n\t\treturn \"\", fmt.Errorf(\"close temp file: %w\", err)\n\t}\n\n\tbuf, err := yaml.Marshal(obj)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"marshal yaml: %w\", err)\n\t}\n\tif err := os.WriteFile(f.Name(), buf, 0600); err != nil {\n\t\treturn \"\", fmt.Errorf(\"write to file %q: %w\", f.Name(), err)\n\t}\n\n\treturn f.Name(), nil\n}\n\ntype dialerContextFunc func(ctx context.Context, network, addr string) (net.Conn, error)\n\nfunc (d dialerContextFunc) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {\n\treturn d(ctx, network, addr)\n}\n\ntype sniProxy struct {\n\thosts          map[string]string\n\tdefaultBackend string\n\tlogger         gocql.StdLogger\n\n\tlistener net.Listener\n\tevents   []string\n\tmu       sync.Mutex\n}\n\nfunc (p *sniProxy) Run(ctx context.Context) (string, error) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to listen: %w\", err)\n\t}\n\n\tp.listener = listener\n\n\tgo func() {\n\t\tfor {\n\t\t\tconn, err := p.listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Println(\"failed to accept connection\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tgo p.handleConnection(conn)\n\t\t}\n\n\t}()\n\n\treturn listener.Addr().String(), nil\n}\n\nfunc (p *sniProxy) handleConnection(conn net.Conn) {\n\tdefer conn.Close()\n\n\tvar hello *tls.ClientHelloInfo\n\n\tpeekedBytes := &bytes.Buffer{}\n\t// Ignore error because TLS library returns it when nil TLSConfig is returned.\n\t_ = tls.Server(readOnlyConn{reader: io.TeeReader(conn, peekedBytes)}, &tls.Config{\n\t\tGetConfigForClient: func(argHello *tls.ClientHelloInfo) (*tls.Config, error) {\n\t\t\thello = &tls.ClientHelloInfo{}\n\t\t\t*hello = *argHello\n\t\t\treturn nil, nil\n\t\t},\n\t}).Handshake()\n\n\tif hello == nil {\n\t\tp.logger.Println(\"client hello not sent\")\n\t\treturn\n\t}\n\n\tp.mu.Lock()\n\tp.events = append(p.events, hello.ServerName)\n\tp.mu.Unlock()\n\n\tbackend, ok := p.hosts[hello.ServerName]\n\tif !ok {\n\t\tbackend = p.defaultBackend\n\t}\n\n\tp.logger.Println(\"Dialing backend\", backend, \"SNI\", hello.ServerName)\n\tbackendConn, err := net.Dial(\"tcp\", backend)\n\tif err != nil {\n\t\tp.logger.Println(\"failed to dial backend\", backend, err)\n\t\treturn\n\t}\n\tdefer backendConn.Close()\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tgo func() {\n\t\t_, _ = io.Copy(conn, backendConn)\n\t\twg.Done()\n\t}()\n\tgo func() {\n\t\t_, _ = io.Copy(backendConn, peekedBytes)\n\t\t_, _ = io.Copy(backendConn, conn)\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n}\n\nfunc (p *sniProxy) Close() error {\n\treturn p.listener.Close()\n}\n\nfunc (p *sniProxy) GetEvents() []string {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tevents := make([]string, 0, len(p.events))\n\tfor _, e := range p.events {\n\t\tevents = append(events, e)\n\t}\n\treturn events\n}\n\ntype readOnlyConn struct {\n\treader io.Reader\n}\n\nvar _ net.Conn = readOnlyConn{}\n\nfunc (conn readOnlyConn) Read(p []byte) (int, error)         { return conn.reader.Read(p) }\nfunc (conn readOnlyConn) Write(p []byte) (int, error)        { return 0, io.ErrClosedPipe }\nfunc (conn readOnlyConn) Close() error                       { return nil }\nfunc (conn readOnlyConn) LocalAddr() net.Addr                { return nil }\nfunc (conn readOnlyConn) RemoteAddr() net.Addr               { return nil }\nfunc (conn readOnlyConn) SetDeadline(t time.Time) error      { return nil }\nfunc (conn readOnlyConn) SetReadDeadline(t time.Time) error  { return nil }\nfunc (conn readOnlyConn) SetWriteDeadline(t time.Time) error { return nil }\n"
  },
  {
    "path": "cluster.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/internal/eventbus\"\n)\n\nconst defaultDriverName = \"ScyllaDB GoCQL Driver\"\n\n// PoolConfig configures the connection pool used by the driver, it defaults to\n// using a round-robin host selection policy and a round-robin connection selection\n// policy for each host.\ntype PoolConfig struct {\n\t// HostSelectionPolicy sets the policy for selecting which host to use for a\n\t// given query (default: RoundRobinHostPolicy())\n\t// It is not supported to use a single HostSelectionPolicy in multiple sessions\n\t// (even if you close the old session before using in a new session).\n\tHostSelectionPolicy HostSelectionPolicy\n}\n\nfunc (p PoolConfig) buildPool(session *Session) *policyConnPool {\n\treturn newPolicyConnPool(session)\n}\n\n// ClusterConfig is a struct to configure the default cluster implementation\n// of gocql. It has a variety of attributes that can be used to modify the\n// behavior to fit the most common use cases. Applications that require a\n// different setup must implement their own cluster.\ntype ClusterConfig struct {\n\t// BatchObserver will set the provided batch observer on all queries created from this session.\n\t// Use it to collect metrics / stats from batch queries by providing an implementation of BatchObserver.\n\tBatchObserver BatchObserver\n\t// Dialer will be used to establish all connections created for this Cluster.\n\t// If not provided, a default dialer configured with ConnectTimeout will be used.\n\t// Dialer is ignored if HostDialer is provided.\n\tDialer Dialer\n\t// ApplicationInfo reports application information to the server by inserting it into options of the STARTUP frame\n\tApplicationInfo ApplicationInfo\n\t// DNSResolver Resolves DNS names to IP addresses\n\tDNSResolver DNSResolver\n\t// Logger for this ClusterConfig.\n\t// If not specified, defaults to the gocql.defaultLogger.\n\tLogger StdLogger\n\t// HostDialer will be used to establish all connections for this Cluster.\n\t// Unlike Dialer, HostDialer is responsible for setting up the entire connection, including the TLS session.\n\t// To support shard-aware port, HostDialer should implement ShardDialer.\n\t// If not provided, Dialer will be used instead.\n\tHostDialer HostDialer\n\t// StreamObserver will be notified of stream state changes.\n\t// This can be used to track in-flight protocol requests and responses.\n\tStreamObserver StreamObserver\n\t// FrameHeaderObserver will set the provided frame header observer on all frames' headers created from this session.\n\t// Use it to collect metrics / stats from frames by providing an implementation of FrameHeaderObserver.\n\tFrameHeaderObserver FrameHeaderObserver\n\t// ConnectObserver will set the provided connect observer on all queries\n\t// created from this session.\n\tConnectObserver ConnectObserver\n\t// QueryObserver will set the provided query observer on all queries created from this session.\n\t// Use it to collect metrics / stats from queries by providing an implementation of QueryObserver.\n\tQueryObserver QueryObserver\n\t// AddressTranslator will translate addresses found on peer discovery and/or\n\t// node change events.\n\tAddressTranslator AddressTranslator\n\t// HostFilter will filter all incoming events for host, any which don't pass\n\t// the filter will be ignored. If set will take precedence over any options set\n\t// via Discovery\n\tHostFilter HostFilter\n\t// Compression algorithm.\n\t// Default: nil\n\tCompressor Compressor\n\t// Default: nil\n\tAuthenticator Authenticator\n\tactualSslOpts atomic.Value\n\t// PoolConfig configures the underlying connection pool, allowing the\n\t// configuration of host selection and connection selection policies.\n\tPoolConfig PoolConfig\n\t// Default retry policy to use for queries.\n\t// Default: SimpleRetryPolicy{NumRetries: 3}.\n\tRetryPolicy RetryPolicy\n\t// ConvictionPolicy decides whether to mark host as down based on the error and host info.\n\t// Default: SimpleConvictionPolicy\n\tConvictionPolicy ConvictionPolicy\n\t// Default reconnection policy to use for reconnecting before trying to mark host as down.\n\tReconnectionPolicy ReconnectionPolicy\n\t// A reconnection policy to use for reconnecting when connecting to the cluster first time.\n\tInitialReconnectionPolicy ReconnectionPolicy\n\tWarningsHandlerBuilder    WarningHandlerBuilder\n\t// SslOpts configures TLS use when HostDialer is not set.\n\t// SslOpts is ignored if HostDialer is set.\n\tSslOpts *SslOptions\n\t// An Authenticator factory. Can be used to create alternative authenticators.\n\t// Default: nil\n\tAuthProvider       func(h *HostInfo) (Authenticator, error)\n\tClientRoutesConfig *ClientRoutesConfig\n\t// The version of the driver that is going to be reported to the server.\n\t// Defaulted to current library version\n\tDriverVersion string\n\t// The name of the driver that is going to be reported to the server.\n\t// Default: \"ScyllaDB GoLang Driver\"\n\tDriverName string\n\t// Initial keyspace. Optional.\n\tKeyspace string\n\t// CQL version (default: 3.0.0)\n\tCQLVersion string\n\t// addresses for the initial connections. It is recommended to use the value set in\n\t// the Cassandra config for broadcast_address or listen_address, an IP address not\n\t// a domain name. This is because events from Cassandra will use the configured IP\n\t// address, which is used to index connected hosts. If the domain name specified\n\t// resolves to more than 1 IP address then the driver may connect multiple times to\n\t// the same host, and will not mark the node being down or up from events.\n\tHosts []string\n\t// The time to wait for frames before flushing the frames connection to Cassandra.\n\t// Can help reduce syscall overhead by making less calls to write. Set to 0 to\n\t// disable.\n\t//\n\t// (default: 200 microseconds)\n\tWriteCoalesceWaitTime time.Duration\n\t// WriteTimeout limits the time the driver waits to write a request to a network connection.\n\t// WriteTimeout should be lower than or equal to Timeout.\n\t// WriteTimeout defaults to the value of Timeout.\n\tWriteTimeout time.Duration\n\t// The keepalive period to use, enabled if > 0 (default: 15 seconds)\n\t// SocketKeepalive is used to set up the default dialer and is ignored if Dialer or HostDialer is provided.\n\tSocketKeepalive time.Duration\n\t// If not zero, gocql attempt to reconnect known DOWN nodes in every ReconnectInterval.\n\tReconnectInterval time.Duration\n\t// The maximum amount of time to wait for schema agreement in a cluster after\n\t// receiving a schema change frame. (default: 60s)\n\tMaxWaitSchemaAgreement time.Duration\n\t// ProtoVersion sets the version of the native protocol to use, this will\n\t// enable features in the driver for specific protocol versions, generally this\n\t// should be set to a known version (2,3,4) for the cluster being connected to.\n\t//\n\t// If it is 0 or unset (the default) then the driver will attempt to discover the\n\t// highest supported protocol for the cluster. In clusters with nodes of different\n\t// versions the protocol selected is not defined (ie, it can be any of the supported in the cluster)\n\tProtoVersion int\n\t// Maximum number of inflight requests allowed per connection.\n\t// Default: 32768 for CQL v3 and newer\n\t// Default: 128 for older CQL versions\n\tMaxRequestsPerConn int\n\t// Timeout defines the maximum time to wait for a single server response.\n\t// The default is 11 seconds, which is slightly higher than the default\n\t// server-side timeout for most query types.\n\t//\n\t// When a session creates a Query or Batch, it inherits this timeout as\n\t// the request timeout.\n\t//\n\t// Important notes:\n\t// 1. This value should be greater than the server timeout for all queries\n\t//    you execute. Otherwise, you risk creating retry storms: the server\n\t//    may still be processing the request while the client times out and retries.\n\t// 2. This timeout does not apply during initial connection setup.\n\t//    For that, see ConnectTimeout.\n\tTimeout time.Duration\n\t// The timeout for the requests to the schema tables. (default: 60s)\n\tMetadataSchemaRequestTimeout time.Duration\n\t// ConnectTimeout limits the time spent during connection setup.\n\t// During initial connection setup, internal queries, AUTH requests will return an error if the client\n\t// does not receive a response within the ConnectTimeout period.\n\t// ConnectTimeout is applied to the connection setup queries independently.\n\t// ConnectTimeout also limits the duration of dialing a new TCP connection\n\t// in case there is no Dialer nor HostDialer configured.\n\t// ConnectTimeout has a default value of 11 seconds.\n\tConnectTimeout time.Duration\n\t// Port used when dialing.\n\t// Default: 9042\n\tPort int\n\t// The size of the connection pool for each host.\n\t// The pool filling runs in separate gourutine during the session initialization phase.\n\t// gocql will always try to get 1 connection on each host pool\n\t// during session initialization AND it will attempt\n\t// to fill each pool afterward asynchronously if NumConns > 1.\n\t// Notice: There is no guarantee that pool filling will be finished in the initialization phase.\n\t// Also, it describes a maximum number of connections at the same time.\n\t// Default: 2\n\tNumConns int\n\t// The gocql driver may hold excess shard connections to reuse them when existing connections are dropped.\n\t// This configuration variable defines the limit for such excess connections. Once the limit is reached,\n\t// gocql starts dropping any additional excess connections.\n\t// The limit is computed as `MaxExcessShardConnectionsRate` * <number_of_shards>.\n\tMaxExcessShardConnectionsRate float32\n\t// Maximum cache size for prepared statements globally for gocql.\n\t// Default: 1000\n\tMaxPreparedStmts int\n\t// Default page size to use for created sessions.\n\t// Default: 5000\n\tPageSize int\n\t// Maximum cache size for query info about statements for each session.\n\t// Default: 1000\n\tMaxRoutingKeyInfo int\n\t// ReadTimeout limits the time the driver waits for data from the connection.\n\t// It has only one purpose, identify faulty connection early and drop it.\n\t// Default: 11 Seconds\n\tReadTimeout time.Duration\n\t// Consistency for the serial part of queries, values can be either SERIAL or LOCAL_SERIAL.\n\t// Default: unset\n\tSerialConsistency Consistency\n\t// Default consistency level.\n\t// Default: Quorum\n\tConsistency Consistency\n\t// Configure events the driver will register for\n\tEvents struct {\n\t\t// disable registering for status events (node up/down)\n\t\tDisableNodeStatusEvents bool\n\t\t// disable registering for topology events (node added/removed/moved)\n\t\tDisableTopologyEvents bool\n\t\t// disable registering for schema events (keyspace/table/function removed/created/updated)\n\t\tDisableSchemaEvents bool\n\t}\n\t// Default idempotence for queries\n\tDefaultIdempotence bool\n\t// Sends a client side timestamp for all requests which overrides the timestamp at which it arrives at the server.\n\t// Default: true, only enabled for protocol 3 and above.\n\tDefaultTimestamp bool\n\t// DisableSkipMetadata will override the internal result metadata cache so that the driver does not\n\t// send skip_metadata for queries, this means that the result will always contain\n\t// the metadata to parse the rows and will not reuse the metadata from the prepared\n\t// statement.\n\t//\n\t// See https://issues.apache.org/jira/browse/CASSANDRA-10786\n\t// See https://github.com/scylladb/scylladb/issues/20860\n\t//\n\t// Default: true\n\tDisableSkipMetadata bool\n\t// DisableShardAwarePort will prevent the driver from connecting to Scylla's shard-aware port,\n\t// even if there are nodes in the cluster that support it.\n\t//\n\t// It is generally recommended to leave this option turned off because gocql can use\n\t// the shard-aware port to make the process of establishing more robust.\n\t// However, if you have a cluster with nodes which expose shard-aware port\n\t// but the port is unreachable due to network configuration issues, you can use\n\t// this option to work around the issue. Set it to true only if you neither can fix\n\t// your network nor disable shard-aware port on your nodes.\n\tDisableShardAwarePort bool\n\t// If DisableInitialHostLookup then the driver will not attempt to get host info\n\t// from the system.peers table, this will mean that the driver will connect to\n\t// hosts supplied and will not attempt to lookup the hosts information, this will\n\t// mean that data_center, rack and token information will not be available and as\n\t// such host filtering and token aware query routing will not be available.\n\tDisableInitialHostLookup bool\n\t// internal config for testing\n\tdisableControlConn bool\n\tdisableInit        bool\n\t// If IgnorePeerAddr is true and the address in system.peers does not match\n\t// the supplied host by either initial hosts or discovered via events then the\n\t// host will be replaced with the supplied address.\n\t//\n\t// For example if an event comes in with host=10.0.0.1 but when looking up that\n\t// address in system.local or system.peers returns 127.0.0.1, the peer will be\n\t// set to 10.0.0.1 which is what will be used to connect to.\n\tIgnorePeerAddr bool\n\t// An event bus configuration\n\tEventBusConfig eventbus.EventBusConfig\n}\n\ntype DNSResolver interface {\n\tLookupIP(host string) ([]net.IP, error)\n}\n\ntype ApplicationInfo interface {\n\tUpdateStartupOptions(map[string]string)\n}\n\ntype StaticApplicationInfo struct {\n\tapplicationName    string\n\tapplicationVersion string\n\tclientID           string\n}\n\nfunc NewStaticApplicationInfo(name, version, clientID string) *StaticApplicationInfo {\n\treturn &StaticApplicationInfo{\n\t\tapplicationName:    name,\n\t\tapplicationVersion: version,\n\t\tclientID:           clientID,\n\t}\n}\n\nfunc (i *StaticApplicationInfo) UpdateStartupOptions(opts map[string]string) {\n\tif i.applicationName != \"\" {\n\t\topts[\"APPLICATION_NAME\"] = i.applicationName\n\t}\n\tif i.applicationVersion != \"\" {\n\t\topts[\"APPLICATION_VERSION\"] = i.applicationVersion\n\t}\n\tif i.clientID != \"\" {\n\t\topts[\"CLIENT_ID\"] = i.clientID\n\t}\n}\n\ntype SimpleDNSResolver struct {\n\thostLookupPreferV4 bool\n}\n\nfunc NewSimpleDNSResolver(hostLookupPreferV4 bool) *SimpleDNSResolver {\n\treturn &SimpleDNSResolver{\n\t\thostLookupPreferV4,\n\t}\n}\n\nfunc (r SimpleDNSResolver) LookupIP(host string) ([]net.IP, error) {\n\tips, err := net.LookupIP(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Filter to v4 addresses if any present\n\tif r.hostLookupPreferV4 {\n\t\tvar preferredIPs []net.IP\n\t\tfor _, v := range ips {\n\t\t\tif v4 := v.To4(); v4 != nil {\n\t\t\t\tpreferredIPs = append(preferredIPs, v4)\n\t\t\t}\n\t\t}\n\t\tif len(preferredIPs) != 0 {\n\t\t\tips = preferredIPs\n\t\t}\n\t}\n\treturn ips, nil\n}\n\nvar defaultDnsResolver = NewSimpleDNSResolver(os.Getenv(\"GOCQL_HOST_LOOKUP_PREFER_V4\") == \"true\")\n\ntype Dialer interface {\n\tDialContext(ctx context.Context, network, addr string) (net.Conn, error)\n}\n\n// NewCluster generates a new config for the default cluster implementation.\n//\n// The supplied hosts are used to initially connect to the cluster then the rest of\n// the ring will be automatically discovered. It is recommended to use the value set in\n// the Cassandra config for broadcast_address or listen_address, an IP address not\n// a domain name. This is because events from Cassandra will use the configured IP\n// address, which is used to index connected hosts. If the domain name specified\n// resolves to more than 1 IP address then the driver may connect multiple times to\n// the same host, and will not mark the node being down or up from events.\nfunc NewCluster(hosts ...string) *ClusterConfig {\n\tlogger := &defaultLogger{}\n\tcfg := &ClusterConfig{\n\t\tHosts:                         hosts,\n\t\tCQLVersion:                    \"3.0.0\",\n\t\tTimeout:                       11 * time.Second,\n\t\tConnectTimeout:                60 * time.Second,\n\t\tReadTimeout:                   11 * time.Second,\n\t\tWriteTimeout:                  11 * time.Second,\n\t\tPort:                          9042,\n\t\tMaxExcessShardConnectionsRate: 2,\n\t\tNumConns:                      2,\n\t\tConsistency:                   Quorum,\n\t\tMaxPreparedStmts:              defaultMaxPreparedStmts,\n\t\tMaxRoutingKeyInfo:             1000,\n\t\tPageSize:                      5000,\n\t\tDefaultTimestamp:              true,\n\t\tDriverName:                    defaultDriverName,\n\t\tDriverVersion:                 defaultDriverVersion,\n\t\tMaxWaitSchemaAgreement:        60 * time.Second,\n\t\tReconnectInterval:             60 * time.Second,\n\t\tConvictionPolicy:              &SimpleConvictionPolicy{},\n\t\tReconnectionPolicy:            &ConstantReconnectionPolicy{MaxRetries: 3, Interval: 1 * time.Second},\n\t\tInitialReconnectionPolicy:     &NoReconnectionPolicy{},\n\t\tSocketKeepalive:               15 * time.Second,\n\t\tWriteCoalesceWaitTime:         200 * time.Microsecond,\n\t\tMetadataSchemaRequestTimeout:  60 * time.Second,\n\t\tDisableSkipMetadata:           true,\n\t\tWarningsHandlerBuilder:        DefaultWarningHandlerBuilder,\n\t\tLogger:                        logger,\n\t\tDNSResolver:                   defaultDnsResolver,\n\t\tEventBusConfig: eventbus.EventBusConfig{\n\t\t\tInputEventsQueueSize: 10240,\n\t\t},\n\t}\n\n\treturn cfg\n}\n\nfunc (cfg *ClusterConfig) logger() StdLogger {\n\tif cfg.Logger == nil {\n\t\treturn &defaultLogger{}\n\t}\n\treturn cfg.Logger\n}\n\n// CreateSession initializes the cluster based on this config and returns a\n// session object that can be used to interact with the database.\nfunc (cfg *ClusterConfig) CreateSession() (*Session, error) {\n\treturn NewSession(*cfg)\n}\n\nfunc (cfg *ClusterConfig) CreateSessionNonBlocking() (*Session, error) {\n\treturn NewSessionNonBlocking(*cfg)\n}\n\nfunc (cfg *ClusterConfig) filterHost(host *HostInfo) bool {\n\treturn !(cfg.HostFilter == nil || cfg.HostFilter.Accept(host))\n}\n\nfunc (cfg *ClusterConfig) ValidateAndInitSSL() error {\n\tif cfg.SslOpts == nil {\n\t\treturn nil\n\t}\n\tactualTLSConfig, err := setupTLSConfig(cfg.SslOpts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to initialize ssl configuration: %s\", err.Error())\n\t}\n\n\tcfg.actualSslOpts.Store(actualTLSConfig)\n\treturn nil\n}\n\nfunc (cfg *ClusterConfig) getActualTLSConfig() *tls.Config {\n\tval, ok := cfg.actualSslOpts.Load().(*tls.Config)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn val.Clone()\n}\n\ntype ClusterOption func(*ClusterConfig)\n\nfunc (cfg *ClusterConfig) WithOptions(opts ...ClusterOption) *ClusterConfig {\n\tfor _, opt := range opts {\n\t\topt(cfg)\n\t}\n\treturn cfg\n}\n\ntype ClientRoutesOption func(*ClientRoutesConfig)\n\nfunc WithMaxResolverConcurrency(val int) func(*ClientRoutesConfig) {\n\treturn func(cfg *ClientRoutesConfig) {\n\t\tcfg.MaxResolverConcurrency = val\n\t}\n}\n\nfunc WithResolveHealthyEndpointPeriod(val time.Duration) func(*ClientRoutesConfig) {\n\treturn func(cfg *ClientRoutesConfig) {\n\t\tcfg.ResolveHealthyEndpointPeriod = val\n\t}\n}\n\nfunc WithEndpoints(endpoints ...ClientRoutesEndpoint) func(*ClientRoutesConfig) {\n\treturn func(cfg *ClientRoutesConfig) {\n\t\tcfg.Endpoints = endpoints\n\t}\n}\n\nfunc WithTable(tableName string) func(*ClientRoutesConfig) {\n\treturn func(cfg *ClientRoutesConfig) {\n\t\tcfg.TableName = tableName\n\t}\n}\n\nfunc WithClientRoutes(opts ...ClientRoutesOption) func(*ClusterConfig) {\n\tpmCfg := ClientRoutesConfig{\n\t\t// Don't resolve healthy nodes by default\n\t\tResolveHealthyEndpointPeriod: 0,\n\t\tMaxResolverConcurrency:       1,\n\t\tTableName:                    \"system.client_routes\",\n\t\tResolverCacheDuration:        time.Millisecond * 500,\n\t}\n\tfor _, opt := range opts {\n\t\topt(&pmCfg)\n\t}\n\treturn func(cfg *ClusterConfig) {\n\t\tcfg.ClientRoutesConfig = &pmCfg\n\t\tif len(cfg.Hosts) == 0 {\n\t\t\tfor _, ep := range pmCfg.Endpoints {\n\t\t\t\tif ep.ConnectionAddr != \"\" {\n\t\t\t\t\tcfg.Hosts = append(cfg.Hosts, ep.ConnectionAddr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// TODO: cfg.ControlConnectionOnlyToInitialNodes\n\t}\n}\n\nfunc (cfg *ClusterConfig) Validate() error {\n\tif len(cfg.Hosts) == 0 {\n\t\treturn ErrNoHosts\n\t}\n\n\tif cfg.Authenticator != nil && cfg.AuthProvider != nil {\n\t\treturn errors.New(\"Can't use both Authenticator and AuthProvider in cluster config.\")\n\t}\n\n\tif cfg.InitialReconnectionPolicy == nil {\n\t\treturn errors.New(\"InitialReconnectionPolicy is nil\")\n\t}\n\n\tif cfg.InitialReconnectionPolicy.GetMaxRetries() <= 0 {\n\t\treturn errors.New(\"InitialReconnectionPolicy.GetMaxRetries returns negative number\")\n\t}\n\n\tif cfg.ReconnectionPolicy == nil {\n\t\treturn errors.New(\"ReconnectionPolicy is nil\")\n\t}\n\n\tif cfg.InitialReconnectionPolicy.GetMaxRetries() <= 0 {\n\t\treturn errors.New(\"ReconnectionPolicy.GetMaxRetries returns negative number\")\n\t}\n\n\tif cfg.PageSize < 0 {\n\t\treturn errors.New(\"PageSize should be positive number or zero\")\n\t}\n\n\tif cfg.MaxRoutingKeyInfo < 0 {\n\t\treturn errors.New(\"MaxRoutingKeyInfo should be positive number or zero\")\n\t}\n\n\tif cfg.MaxPreparedStmts < 0 {\n\t\treturn errors.New(\"MaxPreparedStmts should be positive number or zero\")\n\t}\n\n\tif cfg.SocketKeepalive < 0 {\n\t\treturn errors.New(\"SocketKeepalive should be positive time.Duration or zero\")\n\t}\n\n\tif cfg.MaxRequestsPerConn < 0 {\n\t\treturn errors.New(\"MaxRequestsPerConn should be positive number or zero\")\n\t}\n\n\tif cfg.NumConns < 0 {\n\t\treturn errors.New(\"NumConns should be positive non-zero number or zero\")\n\t}\n\n\tif cfg.Port <= 0 || cfg.Port > 65535 {\n\t\treturn errors.New(\"Port should be a valid port number: a number between 1 and 65535\")\n\t}\n\n\tif cfg.WriteTimeout < 0 {\n\t\treturn errors.New(\"WriteTimeout should be positive time.Duration or zero\")\n\t}\n\n\tif cfg.Timeout < 0 {\n\t\treturn errors.New(\"Timeout should be positive time.Duration or zero\")\n\t}\n\n\tif cfg.ConnectTimeout < 0 {\n\t\treturn errors.New(\"ConnectTimeout should be positive time.Duration or zero\")\n\t}\n\n\tif cfg.MetadataSchemaRequestTimeout < 0 {\n\t\treturn errors.New(\"MetadataSchemaRequestTimeout should be positive time.Duration or zero\")\n\t}\n\n\tif cfg.WriteCoalesceWaitTime < 0 {\n\t\treturn errors.New(\"WriteCoalesceWaitTime should be positive time.Duration or zero\")\n\t}\n\n\tif cfg.ReconnectInterval < 0 {\n\t\treturn errors.New(\"ReconnectInterval should be positive time.Duration or zero\")\n\t}\n\n\tif cfg.MaxWaitSchemaAgreement < 0 {\n\t\treturn errors.New(\"MaxWaitSchemaAgreement should be positive time.Duration or zero\")\n\t}\n\n\tif cfg.ProtoVersion < 0 {\n\t\treturn errors.New(\"ProtoVersion should be positive number or zero\")\n\t}\n\n\tif !cfg.DisableSkipMetadata {\n\t\tcfg.Logger.Println(\"warning: enabling skipping metadata can lead to unpredictable results when executing query and altering columns involved in the query.\")\n\t}\n\n\tif cfg.SerialConsistency > 0 && !cfg.SerialConsistency.IsSerial() {\n\t\treturn fmt.Errorf(\"the default SerialConsistency level is not allowed to be anything else but SERIAL or LOCAL_SERIAL. Recived value: %v\", cfg.SerialConsistency)\n\t}\n\n\tif cfg.DNSResolver == nil {\n\t\treturn fmt.Errorf(\"DNSResolver is empty\")\n\t}\n\n\tif cfg.MaxExcessShardConnectionsRate < 0 {\n\t\treturn fmt.Errorf(\"MaxExcessShardConnectionsRate should be positive number or zero\")\n\t}\n\n\tif cfg.ClientRoutesConfig != nil {\n\t\tif cfg.AddressTranslator != nil {\n\t\t\treturn fmt.Errorf(\"AddressTranslator and ClientRoutesConfig should not be set at the same time\")\n\t\t}\n\t\tif err := cfg.ClientRoutesConfig.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"ClientRoutesConfig is invalid: %v\", err)\n\t\t}\n\t}\n\n\treturn cfg.ValidateAndInitSSL()\n}\n\nvar (\n\tErrNoHosts              = errors.New(\"no hosts provided\")\n\tErrNoConnectionsStarted = errors.New(\"no connections were made when creating the session\")\n\tErrHostQueryFailed      = errors.New(\"unable to populate Hosts\")\n)\n\nfunc setupTLSConfig(sslOpts *SslOptions) (*tls.Config, error) {\n\t//  Config.InsecureSkipVerify | EnableHostVerification | Result\n\t//  Config is nil             | true                   | verify host\n\t//  Config is nil             | false                  | do not verify host\n\t//  false                     | false                  | verify host\n\t//  true                      | false                  | do not verify host\n\t//  false                     | true                   | verify host\n\t//  true                      | true                   | verify host\n\tvar tlsConfig *tls.Config\n\tif sslOpts.Config == nil {\n\t\ttlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: !sslOpts.EnableHostVerification,\n\t\t\t// Ticket max size is 16371 bytes, so it can grow up to 16mb max.\n\t\t\tClientSessionCache: tls.NewLRUClientSessionCache(1024),\n\t\t}\n\t} else {\n\t\t// use clone to avoid race.\n\t\ttlsConfig = sslOpts.Config.Clone()\n\t}\n\n\tif tlsConfig.InsecureSkipVerify && sslOpts.EnableHostVerification {\n\t\ttlsConfig.InsecureSkipVerify = false\n\t}\n\n\t// ca cert is optional\n\tif sslOpts.CaPath != \"\" {\n\t\tif tlsConfig.RootCAs == nil {\n\t\t\ttlsConfig.RootCAs = x509.NewCertPool()\n\t\t}\n\n\t\tpem, err := os.ReadFile(sslOpts.CaPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to open CA certs: %v\", err)\n\t\t}\n\n\t\tif !tlsConfig.RootCAs.AppendCertsFromPEM(pem) {\n\t\t\treturn nil, errors.New(\"failed parsing or CA certs\")\n\t\t}\n\t}\n\n\tif sslOpts.CertPath != \"\" || sslOpts.KeyPath != \"\" {\n\t\tmycert, err := tls.LoadX509KeyPair(sslOpts.CertPath, sslOpts.KeyPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to load X509 key pair: %v\", err)\n\t\t}\n\t\ttlsConfig.Certificates = append(tlsConfig.Certificates, mycert)\n\t}\n\n\treturn tlsConfig, nil\n}\n"
  },
  {
    "path": "cluster_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"net\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n)\n\nfunc TestNewCluster_Defaults(t *testing.T) {\n\tt.Parallel()\n\n\tcfg := NewCluster()\n\ttests.AssertEqual(t, \"cluster config cql version\", \"3.0.0\", cfg.CQLVersion)\n\ttests.AssertEqual(t, \"cluster config timeout\", 11*time.Second, cfg.Timeout)\n\ttests.AssertEqual(t, \"cluster config port\", 9042, cfg.Port)\n\ttests.AssertEqual(t, \"cluster config num-conns\", 2, cfg.NumConns)\n\ttests.AssertEqual(t, \"cluster config consistency\", Quorum, cfg.Consistency)\n\ttests.AssertEqual(t, \"cluster config max prepared statements\", defaultMaxPreparedStmts, cfg.MaxPreparedStmts)\n\ttests.AssertEqual(t, \"cluster config max routing key info\", 1000, cfg.MaxRoutingKeyInfo)\n\ttests.AssertEqual(t, \"cluster config page-size\", 5000, cfg.PageSize)\n\ttests.AssertEqual(t, \"cluster config default timestamp\", true, cfg.DefaultTimestamp)\n\ttests.AssertEqual(t, \"cluster config max wait schema agreement\", 60*time.Second, cfg.MaxWaitSchemaAgreement)\n\ttests.AssertEqual(t, \"cluster config reconnect interval\", 60*time.Second, cfg.ReconnectInterval)\n\ttests.AssertTrue(t, \"cluster config conviction policy\",\n\t\treflect.DeepEqual(&SimpleConvictionPolicy{}, cfg.ConvictionPolicy))\n\ttests.AssertTrue(t, \"cluster config reconnection policy\",\n\t\treflect.DeepEqual(&ConstantReconnectionPolicy{MaxRetries: 3, Interval: 1 * time.Second}, cfg.ReconnectionPolicy))\n}\n\nfunc TestNewCluster_WithHosts(t *testing.T) {\n\tt.Parallel()\n\n\tcfg := NewCluster(\"addr1\", \"addr2\")\n\ttests.AssertEqual(t, \"cluster config hosts length\", 2, len(cfg.Hosts))\n\ttests.AssertEqual(t, \"cluster config host 0\", \"addr1\", cfg.Hosts[0])\n\ttests.AssertEqual(t, \"cluster config host 1\", \"addr2\", cfg.Hosts[1])\n}\n\nfunc TestClusterConfig_translateAddressAndPort_NilTranslator(t *testing.T) {\n\tt.Parallel()\n\thh := HostInfoBuilder{\n\t\tConnectAddress: net.ParseIP(\"10.0.0.1\"),\n\t\tPort:           1234,\n\t}.Build()\n\tnewAddr, err := translateAddressPort(nil, &hh, AddressPort{\n\t\tAddress: hh.UntranslatedConnectAddress(),\n\t\tPort:    uint16(hh.Port()),\n\t}, nil)\n\ttests.AssertNil(t, \"should return no error\", err)\n\ttests.AssertTrue(t, \"same address as provided\", net.ParseIP(\"10.0.0.1\").Equal(newAddr.Address))\n\ttests.AssertEqual(t, \"translated host and port\", uint16(1234), newAddr.Port)\n}\n\nfunc TestClusterConfig_translateAddressAndPort_EmptyAddr(t *testing.T) {\n\tt.Parallel()\n\n\ttranslator := staticAddressTranslator(net.ParseIP(\"10.10.10.10\"), 5432)\n\thh := HostInfoBuilder{\n\t\tConnectAddress: []byte{},\n\t\tPort:           0,\n\t}.Build()\n\tnewAddr, err := translateAddressPort(translator, &hh, AddressPort{\n\t\tAddress: hh.UntranslatedConnectAddress(),\n\t\tPort:    uint16(hh.Port()),\n\t}, nil)\n\ttests.AssertNil(t, \"should return no error\", err)\n\ttests.AssertTrue(t, \"translated address is still empty\", len(newAddr.Address) == 0)\n\ttests.AssertEqual(t, \"translated port\", uint16(0), newAddr.Port)\n}\n\nfunc TestClusterConfig_translateAddressAndPort_Success(t *testing.T) {\n\tt.Parallel()\n\n\ttranslator := staticAddressTranslator(net.ParseIP(\"10.10.10.10\"), 5432)\n\thh := HostInfoBuilder{\n\t\tConnectAddress: net.ParseIP(\"10.0.0.1\"),\n\t\tPort:           2345,\n\t}.Build()\n\tnewAddr, err := translateAddressPort(translator, &hh, AddressPort{\n\t\tAddress: hh.UntranslatedConnectAddress(),\n\t\tPort:    uint16(hh.Port()),\n\t}, nil)\n\ttests.AssertNil(t, \"should return no error\", err)\n\ttests.AssertTrue(t, \"translated address\", net.ParseIP(\"10.10.10.10\").Equal(newAddr.Address))\n\ttests.AssertEqual(t, \"translated port\", uint16(5432), newAddr.Port)\n}\n"
  },
  {
    "path": "common_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"hash/fnv\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar (\n\tflagCluster       = flag.String(\"cluster\", \"127.0.0.1\", \"a comma-separated list of host:port tuples\")\n\tflagProto         = flag.Int(\"proto\", 0, \"protcol version\")\n\tflagCQL           = flag.String(\"cql\", \"3.0.0\", \"CQL version\")\n\tflagRF            = flag.Int(\"rf\", 1, \"replication factor for test keyspace\")\n\tclusterSize       = flag.Int(\"clusterSize\", 1, \"the expected size of the cluster\")\n\tflagRetry         = flag.Int(\"retries\", 5, \"number of times to retry queries\")\n\tflagAutoWait      = flag.Duration(\"autowait\", 1000*time.Millisecond, \"time to wait for autodiscovery to fill the hosts poll\")\n\tflagRunSslTest    = flag.Bool(\"runssl\", false, \"Set to true to run ssl test\")\n\tflagRunAuthTest   = flag.Bool(\"runauth\", false, \"Set to true to run authentication test\")\n\tflagCompressTest  = flag.String(\"compressor\", \"\", \"compressor to use\")\n\tflagTimeout       = flag.Duration(\"gocql.timeout\", 5*time.Second, \"sets the connection `timeout` for all operations\")\n\tflagClusterSocket = flag.String(\"cluster-socket\", \"\", \"nodes socket files separated by comma\")\n\tflagDistribution  = flag.String(\"distribution\", \"scylla\", \"database distribution - scylla or cassandra\")\n\tflagCassVersion   cassVersion\n)\n\n// integrationTestSetup is set by an init() in an integration-tagged file to run\n// one-time setup (e.g. tablet probes) before any test executes.\nvar integrationTestSetup func()\n\nfunc init() {\n\tflag.Var(&flagCassVersion, \"gocql.cversion\", \"the cassandra version being tested against\")\n\n\tlog.SetFlags(log.Lshortfile | log.LstdFlags)\n}\n\nfunc TestMain(m *testing.M) {\n\tflag.Parse()\n\tif integrationTestSetup != nil {\n\t\tintegrationTestSetup()\n\t}\n\tos.Exit(m.Run())\n}\n\nfunc getClusterHosts() []string {\n\treturn strings.Split(*flagCluster, \",\")\n}\n\nfunc addSslOptions(cluster *ClusterConfig) *ClusterConfig {\n\tif *flagRunSslTest {\n\t\tif *flagDistribution == \"cassandra\" {\n\t\t\tcluster.Port = 9042\n\t\t} else {\n\t\t\tcluster.Port = 9142\n\t\t}\n\t\tcluster.SslOpts = &SslOptions{\n\t\t\tCertPath:               \"testdata/pki/gocql.crt\",\n\t\t\tKeyPath:                \"testdata/pki/gocql.key\",\n\t\t\tCaPath:                 \"testdata/pki/ca.crt\",\n\t\t\tEnableHostVerification: false,\n\t\t}\n\t}\n\treturn cluster\n}\n\ntype OnceManager struct {\n\tkeyspaces map[string]*sync.Once\n\tmu        sync.Mutex\n}\n\nfunc NewOnceManager() *OnceManager {\n\treturn &OnceManager{\n\t\tkeyspaces: make(map[string]*sync.Once),\n\t}\n}\n\nfunc (o *OnceManager) GetOnce(key string) *sync.Once {\n\to.mu.Lock()\n\tdefer o.mu.Unlock()\n\n\tif once, exists := o.keyspaces[key]; exists {\n\t\treturn once\n\t}\n\to.keyspaces[key] = &sync.Once{}\n\treturn o.keyspaces[key]\n}\n\nvar initKeyspaceOnce = NewOnceManager()\n\nvar isTabletsSupportedFlag *bool\nvar isTabletsSupportedOnce sync.Once\n\nfunc isTabletsSupported() bool {\n\tisTabletsSupportedOnce.Do(probeTabletsSupported)\n\tif isTabletsSupportedFlag == nil {\n\t\treturn false\n\t}\n\treturn *isTabletsSupportedFlag\n}\n\nfunc probeTabletsSupported() {\n\ts, err := createCluster().CreateSession()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"failed to create session: %v\", err))\n\t}\n\tdefer s.Close()\n\n\tres := make(map[string]any)\n\terr = s.Query(\"select * from system.local\").MapScan(res)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"failed to read system.local: %v\", err))\n\t}\n\n\tfeatures, _ := res[\"supported_features\"]\n\tfeaturesCasted, _ := features.(string)\n\tfor _, feature := range strings.Split(featuresCasted, \",\") {\n\t\tif feature == \"TABLETS\" {\n\t\t\tresult := true\n\t\t\tisTabletsSupportedFlag = &result\n\t\t\treturn\n\t\t}\n\t}\n\tresult := false\n\tisTabletsSupportedFlag = &result\n}\n\nvar isTabletsAutoEnabledFlag *bool\nvar isTabletsAutoEnabledOnce sync.Once\n\nfunc isTabletsAutoEnabled() bool {\n\tisTabletsAutoEnabledOnce.Do(probeTabletsAutoEnabled)\n\tif isTabletsAutoEnabledFlag == nil {\n\t\treturn false\n\t}\n\treturn *isTabletsAutoEnabledFlag\n}\n\nfunc probeTabletsAutoEnabled() {\n\ts, err := createCluster().CreateSession()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"failed to create session: %v\", err))\n\t}\n\tdefer s.Close()\n\n\terr = s.Query(\"DROP KEYSPACE IF EXISTS gocql_check_tablets_enabled\").Exec()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"failed to delete keyspace: %v\", err))\n\t}\n\terr = s.Query(\"CREATE KEYSPACE gocql_check_tablets_enabled WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': '1'}\").Exec()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"failed to create keyspace: %v\", err))\n\t}\n\n\tres := make(map[string]any)\n\terr = s.Query(\"describe keyspace gocql_check_tablets_enabled\").MapScan(res)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"failed to describe keyspace: %v\", err))\n\t}\n\n\terr = s.Query(\"DROP KEYSPACE IF EXISTS gocql_check_tablets_enabled\").Exec()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"failed to drop probe keyspace: %v\", err))\n\t}\n\n\tcreateStmt, _ := res[\"create_statement\"]\n\tcreateStmtCasted, _ := createStmt.(string)\n\tresult := strings.Contains(strings.ToLower(createStmtCasted), \"and tablets\")\n\tisTabletsAutoEnabledFlag = &result\n}\n\n// initTabletProbes runs the tablet-support and tablet-auto-enabled probes eagerly.\n// Called from TestMain before any tests run to avoid races with parallel test startup.\nfunc initTabletProbes() {\n\tprobeTabletsSupported()\n\tif isTabletsSupportedFlag != nil && *isTabletsSupportedFlag {\n\t\tprobeTabletsAutoEnabled()\n\t}\n}\n\nfunc createTable(s *Session, table string) error {\n\tif err := s.Query(table).RetryPolicy(&SimpleRetryPolicy{NumRetries: 3}).Idempotent(true).Exec(); err != nil {\n\t\tlog.Printf(\"error creating table table=%q err=%v\\n\", table, err)\n\t\treturn err\n\t}\n\n\tif err := s.control.awaitSchemaAgreement(); err != nil {\n\t\tlog.Printf(\"error waiting for schema agreement post create table=%q err=%v\\n\", table, err)\n\t\treturn err\n\t}\n\n\t// Invalidate schema cache to avoid races with debounced schema events.\n\t// Use per-table invalidation when possible (cheaper than keyspace-wide)\n\t// to reduce cache thrashing when parallel tests all perform DDL on the\n\t// same shared keyspace. Falls back to keyspace-wide invalidation for\n\t// non-TABLE DDL (e.g. DROP KEYSPACE, CREATE TYPE).\n\tks, tbl := extractKeyspaceTableFromDDL(table)\n\tif ks == \"\" {\n\t\tks = s.cfg.Keyspace\n\t}\n\tif ks != \"\" && tbl != \"\" {\n\t\ts.metadataDescriber.invalidateTableSchema(ks, tbl)\n\t} else if ks != \"\" {\n\t\ts.metadataDescriber.invalidateKeyspaceSchema(ks)\n\t}\n\n\treturn nil\n}\n\n// createTables executes multiple DDL statements with a single\n// awaitSchemaAgreement call at the end, reducing the serialization bottleneck\n// when parallel tests all need schema agreement. Each statement is still\n// executed and cache-invalidated individually.\nfunc createTables(s *Session, ddls ...string) error {\n\tfor _, ddl := range ddls {\n\t\tif err := s.Query(ddl).RetryPolicy(&SimpleRetryPolicy{NumRetries: 3}).Idempotent(true).Exec(); err != nil {\n\t\t\tlog.Printf(\"error creating table table=%q err=%v\\n\", ddl, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := s.control.awaitSchemaAgreement(); err != nil {\n\t\tlog.Printf(\"error waiting for schema agreement after batch DDL err=%v\\n\", err)\n\t\treturn err\n\t}\n\n\t// Invalidate caches for all affected tables/keyspaces.\n\tfor _, ddl := range ddls {\n\t\tks, tbl := extractKeyspaceTableFromDDL(ddl)\n\t\tif ks == \"\" {\n\t\t\tks = s.cfg.Keyspace\n\t\t}\n\t\tif ks != \"\" && tbl != \"\" {\n\t\t\ts.metadataDescriber.invalidateTableSchema(ks, tbl)\n\t\t} else if ks != \"\" {\n\t\t\ts.metadataDescriber.invalidateKeyspaceSchema(ks)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// extractKeyspaceTableFromDDL extracts the keyspace and table names from a DDL\n// statement like \"CREATE TABLE gocql_test.table_name (...)\".\n// Returns (\"\", \"\") for non-TABLE DDL or when keyspace is not qualified.\nfunc extractKeyspaceTableFromDDL(ddl string) (keyspace, table string) {\n\tupper := strings.ToUpper(ddl)\n\tidx := strings.Index(upper, \"TABLE\")\n\tif idx < 0 {\n\t\treturn \"\", \"\"\n\t}\n\trest := strings.TrimSpace(ddl[idx+len(\"TABLE\"):])\n\t// Skip optional \"IF [NOT] EXISTS\" between TABLE and the name.\n\tupperRest := strings.ToUpper(rest)\n\tif strings.HasPrefix(upperRest, \"IF NOT EXISTS\") {\n\t\trest = strings.TrimSpace(rest[len(\"IF NOT EXISTS\"):])\n\t} else if strings.HasPrefix(upperRest, \"IF EXISTS\") {\n\t\trest = strings.TrimSpace(rest[len(\"IF EXISTS\"):])\n\t}\n\t// Extract keyspace.table\n\tdot := strings.Index(rest, \".\")\n\tif dot < 0 {\n\t\treturn \"\", \"\"\n\t}\n\tks := rest[:dot]\n\t// Extract table name: everything after the dot until whitespace or '('\n\tnameRest := rest[dot+1:]\n\tend := strings.IndexAny(nameRest, \" \\t\\n(\")\n\tif end < 0 {\n\t\treturn ks, nameRest\n\t}\n\treturn ks, nameRest[:end]\n}\n\nfunc createCluster(opts ...func(*ClusterConfig)) *ClusterConfig {\n\tclusterHosts := getClusterHosts()\n\tcluster := NewCluster(clusterHosts...)\n\tcluster.ProtoVersion = *flagProto\n\tcluster.CQLVersion = *flagCQL\n\tcluster.Timeout = *flagTimeout\n\tcluster.Consistency = Quorum\n\tcluster.MaxWaitSchemaAgreement = 2 * time.Minute // travis might be slow\n\tif *flagRetry > 0 {\n\t\tcluster.RetryPolicy = &SimpleRetryPolicy{NumRetries: *flagRetry}\n\t}\n\n\tswitch *flagCompressTest {\n\tcase \"snappy\":\n\t\tcluster.Compressor = &SnappyCompressor{}\n\tcase \"\":\n\tdefault:\n\t\tpanic(\"invalid compressor: \" + *flagCompressTest)\n\t}\n\n\tcluster = addSslOptions(cluster)\n\n\tfor _, opt := range opts {\n\t\topt(cluster)\n\t}\n\n\treturn cluster\n}\n\nfunc createKeyspace(tb testing.TB, cluster *ClusterConfig, keyspace string, disableTablets bool) {\n\ttb.Helper()\n\n\tc := *cluster\n\tc.Keyspace = \"system\"\n\tc.Timeout = 30 * time.Second\n\t// Create a fresh policy to avoid sharing the policy instance with the caller.\n\t// Shallow copy of cluster config shares the HostSelectionPolicy pointer, which\n\t// would cause \"sharing token aware host selection policy between sessions\" panic\n\t// when both createKeyspace's session and the caller's session try to Init() it.\n\tc.PoolConfig.HostSelectionPolicy = nil\n\tsession, err := c.CreateSession()\n\tif err != nil {\n\t\ttb.Fatalf(\"failed to create session: %v\", err)\n\t}\n\tdefer session.Close()\n\n\terr = createTable(session, `DROP KEYSPACE IF EXISTS `+keyspace)\n\tif err != nil {\n\t\ttb.Fatalf(\"unable to drop keyspace: %v\", err)\n\t}\n\n\tquery := fmt.Sprintf(`CREATE KEYSPACE %s\n\tWITH replication = {\n\t\t'class' : 'NetworkTopologyStrategy',\n\t\t'replication_factor' : %d\n\t}`, keyspace, *flagRF)\n\n\tif isTabletsSupported() {\n\t\tif disableTablets {\n\t\t\tquery += \" AND tablets = {'enabled': false}\"\n\t\t} else if !isTabletsAutoEnabled() {\n\t\t\tquery += \" AND tablets = {'enabled': true};\"\n\t\t}\n\t}\n\n\terr = createTable(session, query)\n\tif err != nil {\n\t\ttb.Fatalf(\"unable to create table: %v\", err)\n\t}\n}\n\ntype testKeyspaceOpts struct {\n\ttabletsDisabled bool\n}\n\nfunc (o *testKeyspaceOpts) KeyspaceName() string {\n\tif o.tabletsDisabled {\n\t\treturn \"gocql_test_tablets_disabled\"\n\t}\n\treturn \"gocql_test\"\n}\n\nfunc createSessionFromClusterHelper(cluster *ClusterConfig, tb testing.TB, opts testKeyspaceOpts) *Session {\n\t// Drop and re-create the keyspace once. Different tests should use their own\n\t// individual tables, but can assume that the table does not exist before.\n\tinitKeyspaceOnce.GetOnce(opts.KeyspaceName()).Do(func() {\n\t\tcreateKeyspace(tb, cluster, opts.KeyspaceName(), opts.tabletsDisabled)\n\t})\n\n\tcluster.Keyspace = opts.KeyspaceName()\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\ttb.Fatalf(\"failed to create session: %v\", err)\n\t}\n\n\tif err := session.control.awaitSchemaAgreement(); err != nil {\n\t\ttb.Fatalf(\"failed to wait on schema agreement: %v\", err)\n\t}\n\n\treturn session\n}\n\nfunc getClusterSocketFile() []string {\n\tvar res []string\n\tfor _, socketFile := range strings.Split(*flagClusterSocket, \",\") {\n\t\tif socketFile != \"\" {\n\t\t\tres = append(res, socketFile)\n\t\t}\n\t}\n\treturn res\n}\n\nfunc createSessionFromClusterTabletsDisabled(cluster *ClusterConfig, tb testing.TB) *Session {\n\treturn createSessionFromClusterHelper(cluster, tb, testKeyspaceOpts{tabletsDisabled: true})\n}\n\nfunc createSessionFromCluster(cluster *ClusterConfig, tb testing.TB) *Session {\n\treturn createSessionFromClusterHelper(cluster, tb, testKeyspaceOpts{tabletsDisabled: false})\n}\n\nfunc createSession(tb testing.TB, opts ...func(config *ClusterConfig)) *Session {\n\tcluster := createCluster(opts...)\n\treturn createSessionFromCluster(cluster, tb)\n}\n\nfunc createViews(t *testing.T, session *Session) {\n\tif err := session.Query(`\n\t\tCREATE TYPE IF NOT EXISTS gocql_test.basicView (\n\t\tbirthday timestamp,\n\t\tnationality text,\n\t\tweight text,\n\t\theight text);\t`).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to create view with err: %v\", err)\n\t}\n}\n\nfunc createMaterializedViews(t *testing.T, session *Session) {\n\tif flagCassVersion.Before(3, 0, 0) {\n\t\treturn\n\t}\n\ttable1 := testTableName(t, \"1\")\n\ttable2 := testTableName(t, \"2\")\n\tview1 := testTableName(t, \"view1\")\n\tview2 := testTableName(t, \"view2\")\n\tif err := session.Query(fmt.Sprintf(`CREATE TABLE IF NOT EXISTS gocql_test.%s (\n\t\t    userid text,\n\t\t    year int,\n\t\t    month int,\n    \t\t    PRIMARY KEY (userid));`, table1)).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to create materialized view with err: %v\", err)\n\t}\n\tif err := session.Query(fmt.Sprintf(`CREATE TABLE IF NOT EXISTS gocql_test.%s (\n\t\t    userid text,\n\t\t    year int,\n\t\t    month int,\n    \t\t    PRIMARY KEY (userid));`, table2)).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to create materialized view with err: %v\", err)\n\t}\n\tif err := session.Query(fmt.Sprintf(`CREATE MATERIALIZED VIEW IF NOT EXISTS gocql_test.%s AS\n\t\t   SELECT year, month, userid\n\t\t   FROM gocql_test.%s\n\t\t   WHERE year IS NOT NULL AND month IS NOT NULL AND userid IS NOT NULL\n\t\t   PRIMARY KEY (userid, year);`, view1, table1)).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to create materialized view with err: %v\", err)\n\t}\n\tif err := session.Query(fmt.Sprintf(`CREATE MATERIALIZED VIEW IF NOT EXISTS gocql_test.%s AS\n\t\t   SELECT year, month, userid\n\t\t   FROM gocql_test.%s\n\t\t   WHERE year IS NOT NULL AND month IS NOT NULL AND userid IS NOT NULL\n\t\t   PRIMARY KEY (userid, year);`, view2, table2)).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to create materialized view with err: %v\", err)\n\t}\n}\n\nfunc createFunctions(t *testing.T, session *Session) {\n\tfnState := testTableName(t, \"avgstate\")\n\tfnFinal := testTableName(t, \"avgfinal\")\n\tif err := session.Query(fmt.Sprintf(`\n\t\tCREATE OR REPLACE FUNCTION gocql_test.%s ( state tuple<int,bigint>, val int )\n\t\tCALLED ON NULL INPUT\n\t\tRETURNS tuple<int,bigint>\n\t\tLANGUAGE java AS\n\t\t$$if (val !=null) {state.setInt(0, state.getInt(0)+1); state.setLong(1, state.getLong(1)+val.intValue());}return state;$$;\t`, fnState)).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to create function with err: %v\", err)\n\t}\n\tif err := session.Query(fmt.Sprintf(`\n\t\tCREATE OR REPLACE FUNCTION gocql_test.%s ( state tuple<int,bigint> )\n\t\tCALLED ON NULL INPUT\n\t\tRETURNS double\n\t\tLANGUAGE java AS\n\t\t$$double r = 0; if (state.getInt(0) == 0) return null; r = state.getLong(1); r/= state.getInt(0); return Double.valueOf(r);$$\n\t`, fnFinal)).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to create function with err: %v\", err)\n\t}\n}\n\nfunc createAggregate(t *testing.T, session *Session) {\n\tfnState := testTableName(t, \"avgstate\")\n\tfnFinal := testTableName(t, \"avgfinal\")\n\taggName := testTableName(t, \"average\")\n\taggName2 := testTableName(t, \"average2\")\n\tcreateFunctions(t, session)\n\tif err := session.Query(fmt.Sprintf(`\n\t\tCREATE OR REPLACE AGGREGATE gocql_test.%s(int)\n\t\tSFUNC %s\n\t\tSTYPE tuple<int,bigint>\n\t\tFINALFUNC %s\n\t\tINITCOND (0,0);\n\t`, aggName, fnState, fnFinal)).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to create aggregate with err: %v\", err)\n\t}\n\tif err := session.Query(fmt.Sprintf(`\n\t\tCREATE OR REPLACE AGGREGATE gocql_test.%s(int)\n\t\tSFUNC %s\n\t\tSTYPE tuple<int,bigint>\n\t\tFINALFUNC %s\n\t\tINITCOND (0,0);\n\t`, aggName2, fnState, fnFinal)).Exec(); err != nil {\n\t\tt.Fatalf(\"failed to create aggregate with err: %v\", err)\n\t}\n}\n\nconst maxCQLIdentifierLen = 48\nconst testTableNameHashLen = 16\n\n// testTableName builds a CQL-safe table name from t.Name() and optional parts.\n// Truncates to 48 chars (CQL limit) using <first-n>_<fnv64a hash>_<last-n>\n// when needed.\nfunc testTableName(t testing.TB, parts ...string) string {\n\tname := strings.ToLower(t.Name())\n\tfor _, p := range parts {\n\t\tname += \"_\" + strings.ToLower(p)\n\t}\n\n\tvar b strings.Builder\n\tprevUnderscore := false\n\tfor _, r := range name {\n\t\tif (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') {\n\t\t\tb.WriteRune(r)\n\t\t\tprevUnderscore = false\n\t\t} else if !prevUnderscore {\n\t\t\tb.WriteByte('_')\n\t\t\tprevUnderscore = true\n\t\t}\n\t}\n\tname = strings.Trim(b.String(), \"_\")\n\n\tif len(name) > maxCQLIdentifierLen {\n\t\th := fnv.New64a()\n\t\th.Write([]byte(name))\n\t\thash := fmt.Sprintf(\"%016x\", h.Sum64()) // 16 hex chars for better collision resistance\n\t\tremaining := maxCQLIdentifierLen - testTableNameHashLen - 2\n\t\tprefixLen := remaining / 2\n\t\tsuffixLen := remaining - prefixLen\n\t\tname = name[:prefixLen] + \"_\" + hash + \"_\" + name[len(name)-suffixLen:]\n\t}\n\treturn name\n}\n\n// testTypeName builds a CQL-safe UDT type name from t.Name() and optional parts.\n// Analogous to testTableName but intended for CREATE TYPE / frozen<type> references.\nfunc testTypeName(t testing.TB, parts ...string) string {\n\treturn testTableName(t, parts...)\n}\n\n// testKeyspaceName builds a CQL-safe keyspace name from t.Name() and optional parts.\n// Analogous to testTableName but intended for CREATE/DROP KEYSPACE statements.\nfunc testKeyspaceName(t testing.TB, parts ...string) string {\n\treturn testTableName(t, parts...)\n}\n\nfunc staticAddressTranslator(newAddr net.IP, newPort int) AddressTranslator {\n\treturn AddressTranslatorFunc(func(addr net.IP, port int) (net.IP, int) {\n\t\treturn newAddr, newPort\n\t})\n}\n"
  },
  {
    "path": "compressor.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"github.com/klauspost/compress/s2\"\n)\n\ntype Compressor interface {\n\tName() string\n\tEncode(data []byte) ([]byte, error)\n\tDecode(data []byte) ([]byte, error)\n}\n\n// SnappyCompressor implements the Compressor interface and can be used to\n// compress incoming and outgoing frames. It uses S2 compression algorithm\n// that is compatible with snappy and aims for high throughput, which is why\n// it features concurrent compression for bigger payloads.\ntype SnappyCompressor struct{}\n\nfunc (s SnappyCompressor) Name() string {\n\treturn \"snappy\"\n}\n\nfunc (s SnappyCompressor) Encode(data []byte) ([]byte, error) {\n\treturn s2.EncodeSnappy(nil, data), nil\n}\n\nfunc (s SnappyCompressor) Decode(data []byte) ([]byte, error) {\n\treturn s2.Decode(nil, data)\n}\n"
  },
  {
    "path": "compressor_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/klauspost/compress/s2\"\n\n\t\"github.com/gocql/gocql\"\n)\n\ntype frameExample struct {\n\tName     string\n\tFrame    []byte\n\tFilePath string\n}\n\nvar frameExamples = struct {\n\tRequests  []frameExample\n\tResponses []frameExample\n}{\n\tRequests: []frameExample{\n\t\t{\n\t\t\tName:     \"Small query request\",\n\t\t\tFilePath: \"testdata/frames/small_query_request.bin\",\n\t\t},\n\t\t{\n\t\t\tName:     \"Medium query request\",\n\t\t\tFilePath: \"testdata/frames/medium_query_request.bin\",\n\t\t},\n\t\t{\n\t\t\tName:     \"Big query request\",\n\t\t\tFilePath: \"testdata/frames/big_query_request.bin\",\n\t\t},\n\t\t{\n\t\t\tName:     \"Prepare statement request\",\n\t\t\tFilePath: \"testdata/frames/prepare_statement_request.bin\",\n\t\t},\n\t},\n\tResponses: []frameExample{\n\t\t{\n\t\t\tName:     \"Small query response\",\n\t\t\tFilePath: \"testdata/frames/small_query_response.bin\",\n\t\t},\n\t\t{\n\t\t\tName:     \"Medium query response\",\n\t\t\tFilePath: \"testdata/frames/medium_query_response.bin\",\n\t\t},\n\t\t{\n\t\t\tName:     \"Big query response\",\n\t\t\tFilePath: \"testdata/frames/big_query_response.bin\",\n\t\t},\n\t\t{\n\t\t\tName:     \"Prepare statement response\",\n\t\t\tFilePath: \"testdata/frames/prepare_statement_response.bin\",\n\t\t},\n\t},\n}\n\nfunc TestSnappyCompressor(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"basic\", func(t *testing.T) {\n\t\tc := gocql.SnappyCompressor{}\n\t\tif c.Name() != \"snappy\" {\n\t\t\tt.Fatalf(\"expected name to be 'snappy', got %v\", c.Name())\n\t\t}\n\n\t\tstr := \"My Test String\"\n\t\t//Test Encoding with S2 library, Snappy compatible encoding.\n\t\texpected := s2.EncodeSnappy(nil, []byte(str))\n\t\tif res, err := c.Encode([]byte(str)); err != nil {\n\t\t\tt.Fatalf(\"failed to encode '%v' with error %v\", str, err)\n\t\t} else if bytes.Compare(expected, res) != 0 {\n\t\t\tt.Fatal(\"failed to match the expected encoded value with the result encoded value.\")\n\t\t}\n\n\t\tval, err := c.Encode([]byte(str))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to encode '%v' with error '%v'\", str, err)\n\t\t}\n\n\t\t//Test Decoding with S2 library, Snappy compatible encoding.\n\t\tif expected, err := s2.Decode(nil, val); err != nil {\n\t\t\tt.Fatalf(\"failed to decode '%v' with error %v\", val, err)\n\t\t} else if res, err := c.Decode(val); err != nil {\n\t\t\tt.Fatalf(\"failed to decode '%v' with error %v\", val, err)\n\t\t} else if bytes.Compare(expected, res) != 0 {\n\t\t\tt.Fatal(\"failed to match the expected decoded value with the result decoded value.\")\n\t\t}\n\t})\n\n\tt.Run(\"frame-examples\", func(t *testing.T) {\n\t\tc := gocql.SnappyCompressor{}\n\n\t\tt.Run(\"Encode\", func(t *testing.T) {\n\t\t\tfor id := range frameExamples.Requests {\n\t\t\t\tframe := frameExamples.Requests[id]\n\t\t\t\tt.Run(frame.Name, func(t *testing.T) {\n\t\t\t\t\tt.Parallel()\n\n\t\t\t\t\tencoded, err := c.Encode(frame.Frame)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"failed to encode frame %s\", frame.Name)\n\t\t\t\t\t}\n\t\t\t\t\tdecoded, err := c.Decode(encoded)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"failed to decode frame %s\", frame.Name)\n\t\t\t\t\t}\n\n\t\t\t\t\tif bytes.Compare(decoded, frame.Frame) != 0 {\n\t\t\t\t\t\tt.Fatalf(\"failed to match the decoded value with the original value\")\n\t\t\t\t\t}\n\t\t\t\t\tt.Logf(\"Compression rate %f\", float64(len(encoded))/float64(len(frame.Frame)))\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"Decode\", func(t *testing.T) {\n\t\t\tfor id := range frameExamples.Responses {\n\t\t\t\tframe := frameExamples.Responses[id]\n\t\t\t\tt.Run(frame.Name, func(t *testing.T) {\n\t\t\t\t\tt.Parallel()\n\n\t\t\t\t\tdecoded, err := c.Decode(frame.Frame)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"failed to decode frame %s\", frame.Name)\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(decoded) == 0 {\n\t\t\t\t\t\tt.Fatalf(\"frame was decoded to empty slice\")\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc BenchmarkSnappyCompressor(b *testing.B) {\n\tc := gocql.SnappyCompressor{}\n\tb.Run(\"Decode\", func(b *testing.B) {\n\t\tfor _, frame := range frameExamples.Responses {\n\t\t\tb.Run(frame.Name, func(b *testing.B) {\n\t\t\t\tfor x := 0; x < b.N; x++ {\n\t\t\t\t\t_, _ = c.Decode(frame.Frame)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tb.Run(\"Encode\", func(b *testing.B) {\n\t\tfor _, frame := range frameExamples.Requests {\n\t\t\tb.Run(frame.Name, func(b *testing.B) {\n\t\t\t\tfor x := 0; x < b.N; x++ {\n\t\t\t\t\t_, _ = c.Encode(frame.Frame)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc init() {\n\tvar err error\n\tfor id, def := range frameExamples.Requests {\n\t\tframeExamples.Requests[id].Frame, err = os.ReadFile(def.FilePath)\n\t\tif err != nil {\n\t\t\tpanic(\"can't read file \" + def.FilePath)\n\t\t}\n\t}\n\tfor id, def := range frameExamples.Responses {\n\t\tframeExamples.Responses[id].Frame, err = os.ReadFile(def.FilePath)\n\t\tif err != nil {\n\t\t\tpanic(\"can't read file \" + def.FilePath)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "conn.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n\t\"github.com/gocql/gocql/tablets\"\n\n\t\"github.com/gocql/gocql/internal/lru\"\n\t\"github.com/gocql/gocql/internal/streams\"\n)\n\n// approve the authenticator with the list of allowed authenticators. If the provided list is empty,\n// the given authenticator is allowed.\nfunc approve(authenticator string, approvedAuthenticators []string) bool {\n\tif len(approvedAuthenticators) == 0 {\n\t\treturn true\n\t}\n\tfor _, s := range approvedAuthenticators {\n\t\tif authenticator == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype Authenticator interface {\n\tChallenge(req []byte) (resp []byte, auth Authenticator, err error)\n\tSuccess(data []byte) error\n}\n\ntype WarningHandlerBuilder func(session *Session) WarningHandler\n\ntype WarningHandler interface {\n\tHandleWarnings(qry ExecutableQuery, host *HostInfo, warnings []string)\n}\n\n// PasswordAuthenticator specifies credentials to be used when authenticating.\n// It can be configured with an \"allow list\" of authenticator class names to avoid\n// attempting to authenticate with Cassandra if it doesn't provide an expected authenticator.\ntype PasswordAuthenticator struct {\n\tUsername string\n\tPassword string\n\t// Setting this to nil or empty will allow authenticating with any authenticator\n\t// provided by the server.  This is the default behavior of most other driver\n\t// implementations.\n\tAllowedAuthenticators []string\n}\n\nfunc (p PasswordAuthenticator) Challenge(req []byte) ([]byte, Authenticator, error) {\n\tif !approve(string(req), p.AllowedAuthenticators) {\n\t\treturn nil, nil, fmt.Errorf(\"unexpected authenticator %q\", req)\n\t}\n\tresp := make([]byte, 2+len(p.Username)+len(p.Password))\n\tresp[0] = 0\n\tcopy(resp[1:], p.Username)\n\tresp[len(p.Username)+1] = 0\n\tcopy(resp[2+len(p.Username):], p.Password)\n\treturn resp, nil, nil\n}\n\nfunc (p PasswordAuthenticator) Success(data []byte) error {\n\treturn nil\n}\n\n// SslOptions configures TLS use.\n//\n// Warning: Due to historical reasons, the SslOptions is insecure by default, so you need to set EnableHostVerification\n// to true if no Config is set. Most users should set SslOptions.Config to a *tls.Config.\n// SslOptions and Config.InsecureSkipVerify interact as follows:\n//\n//\tConfig.InsecureSkipVerify | EnableHostVerification | Result\n//\tConfig is nil             | false                  | do not verify host\n//\tConfig is nil             | true                   | verify host\n//\tfalse                     | false                  | verify host\n//\ttrue                      | false                  | do not verify host\n//\tfalse                     | true                   | verify host\n//\ttrue                      | true                   | verify host\ntype SslOptions struct {\n\t*tls.Config\n\n\t// CertPath and KeyPath are optional depending on server\n\t// config, but both fields must be omitted to avoid using a\n\t// client certificate\n\tCertPath string\n\tKeyPath  string\n\tCaPath   string //optional depending on server config\n\t// If you want to verify the hostname and server cert (like a wildcard for cass cluster) then you should turn this\n\t// on.\n\t// This option is basically the inverse of tls.Config.InsecureSkipVerify.\n\t// See InsecureSkipVerify in http://golang.org/pkg/crypto/tls/ for more info.\n\t//\n\t// See SslOptions documentation to see how EnableHostVerification interacts with the provided tls.Config.\n\tEnableHostVerification bool\n}\n\ntype ConnConfig struct {\n\tDialer          Dialer\n\tLogger          StdLogger\n\tAuthenticator   Authenticator\n\tCompressor      Compressor\n\tHostDialer      HostDialer\n\tAuthProvider    func(h *HostInfo) (Authenticator, error)\n\ttlsConfig       *tls.Config\n\tCQLVersion      string\n\tConnectTimeout  time.Duration\n\tReadTimeout     time.Duration\n\tWriteTimeout    time.Duration\n\tProtoVersion    int\n\tKeepalive       time.Duration\n\tdisableCoalesce bool\n}\n\nfunc (c *ConnConfig) logger() StdLogger {\n\tif c.Logger == nil {\n\t\treturn &defaultLogger{}\n\t}\n\treturn c.Logger\n}\n\ntype ConnErrorHandler interface {\n\tHandleError(conn *Conn, err error, closed bool)\n}\n\ntype connErrorHandlerFn func(conn *Conn, err error, closed bool)\n\nfunc (fn connErrorHandlerFn) HandleError(conn *Conn, err error, closed bool) {\n\tfn(conn, err, closed)\n}\n\ntype ConnInterface interface {\n\tClose()\n\texec(ctx context.Context, req frameBuilder, tracer Tracer, requestTimeout time.Duration) (*framer, error)\n\tawaitSchemaAgreement(ctx context.Context) error\n\texecuteQuery(ctx context.Context, qry *Query) *Iter\n\tquerySystem(ctx context.Context, query string, values ...any) *Iter\n\tgetIsSchemaV2() bool\n\tsetSchemaV2(s bool)\n\tgetScyllaSupported() ScyllaConnectionFeatures\n}\n\n// Conn is a single connection to a Cassandra node. It can be used to execute\n// queries, but users are usually advised to use a more reliable, higher\n// level API.\ntype Conn struct {\n\tauth           Authenticator\n\tstreamObserver StreamObserver\n\tw              contextWriter\n\tlogger         StdLogger\n\tframeObserver  FrameHeaderObserver\n\tctx            context.Context\n\terrorHandler   ConnErrorHandler\n\tcompressor     Compressor\n\tconn           net.Conn\n\tcfg            *ConnConfig\n\tsupported      map[string][]string\n\tstreams        *streams.IDGenerator\n\thost           *HostInfo\n\t// calls stores a map from stream ID to callReq.\n\t// This map is protected by mu.\n\t// calls should not be used when closed is true, calls is set to nil when closed=true.\n\tcalls                map[int]*callReq\n\tr                    *bufio.Reader\n\tsession              *Session\n\tframers              connFramers\n\tcancel               context.CancelFunc\n\taddr                 string\n\tusingTimeoutClause   string\n\tcurrentKeyspace      string\n\tcqlProtoExts         []cqlProtocolExtension\n\tscyllaSupported      ScyllaConnectionFeatures\n\tsystemRequestTimeout time.Duration\n\twriteTimeout         atomic.Int64\n\treadTimeout          atomic.Int64\n\tmu                   sync.Mutex\n\ttabletsRoutingV1     int32\n\theaderBuf            [headSize]byte\n\tisShardAware         bool\n\t// true if connection close process for the connection started.\n\t// closed is protected by mu.\n\tclosed     bool\n\tisSchemaV2 bool\n\tversion    uint8\n}\n\nfunc (c *Conn) getIsSchemaV2() bool {\n\treturn c.isSchemaV2\n}\n\nfunc (c *Conn) setSchemaV2(s bool) {\n\tc.isSchemaV2 = s\n}\n\nfunc (c *Conn) setSystemRequestTimeout(t time.Duration) {\n\tc.systemRequestTimeout = t\n\tc.recalculateSystemRequestTimeout()\n}\n\nfunc (c *Conn) recalculateSystemRequestTimeout() {\n\tif c.systemRequestTimeout > time.Duration(0) && c.isScyllaConn() {\n\t\tc.usingTimeoutClause = \" USING TIMEOUT \" + strconv.FormatInt(c.systemRequestTimeout.Milliseconds(), 10) + \"ms\"\n\t}\n}\n\nfunc (c *Conn) finalizeConnection() {\n\t// When connection just created all timeouts are set to `cfg.ConnectTimeout`\n\t// It is done to make sure that connection is easy to establish when users set very low `WriteTimeout` and/or `Timeout`\n\t// This method sets timeouts to `operational` values after connection successfully created\n\tc.writeTimeout.Store(int64(c.cfg.WriteTimeout))\n\tc.readTimeout.Store(int64(c.cfg.ReadTimeout))\n\tc.setSystemRequestTimeout(c.session.cfg.MetadataSchemaRequestTimeout)\n\tc.w.setWriteTimeout(c.cfg.WriteTimeout)\n}\n\nfunc (c *Conn) getScyllaSupported() ScyllaConnectionFeatures {\n\treturn c.scyllaSupported\n}\n\n// connect establishes a connection to a Cassandra node using session's connection config.\n// note: every connection needs to get `conn.finalizeConnection` called ont it when initialization process is done\nfunc (s *Session) connect(ctx context.Context, host *HostInfo, errorHandler ConnErrorHandler) (*Conn, error) {\n\treturn s.dial(ctx, host, s.connCfg, errorHandler)\n}\n\n// connectShard establishes a connection to a shard.\n// If nrShards is zero, shard-aware dialing is disabled.\n// note: every connection needs to get `conn.finalizeConnection` called ont it when initialization process is done\nfunc (s *Session) connectShard(ctx context.Context, host *HostInfo, errorHandler ConnErrorHandler,\n\tshardID, nrShards int) (*Conn, error) {\n\treturn s.dialShard(ctx, host, s.connCfg, errorHandler, shardID, nrShards)\n}\n\n// dial establishes a connection to a Cassandra node and notifies the session's connectObserver.\n// note: every connection needs to get `conn.finalizeConnection` called on it when initialization process is done\nfunc (s *Session) dial(ctx context.Context, host *HostInfo, connConfig *ConnConfig, errorHandler ConnErrorHandler) (*Conn, error) {\n\treturn s.dialShard(ctx, host, connConfig, errorHandler, 0, 0)\n}\n\nfunc translateHostAddresses(addressTranslator AddressTranslator, host *HostInfo, logger StdLogger) (translatedAddresses, error) {\n\taddr, err := translateAddressPort(addressTranslator, host, AddressPort{\n\t\tAddress: host.UntranslatedConnectAddress(),\n\t\tPort:    uint16(host.Port()),\n\t}, logger)\n\tif err != nil {\n\t\treturn translatedAddresses{}, fmt.Errorf(\"unable to translate regular cql address: %w\", err)\n\t}\n\tresultedInfo := translatedAddresses{\n\t\tCQL: addr,\n\t}\n\n\tscyllaFeatures := host.ScyllaFeatures()\n\tif port := scyllaFeatures.ShardAwarePort(); port != 0 {\n\t\taddr, err = translateAddressPort(addressTranslator, host,\n\t\t\tAddressPort{\n\t\t\t\tAddress: host.UntranslatedConnectAddress(),\n\t\t\t\tPort:    port,\n\t\t\t}, logger)\n\t\tif err != nil {\n\t\t\treturn translatedAddresses{}, fmt.Errorf(\"unable to translate shard aware address: %w\", err)\n\t\t}\n\t\tresultedInfo.ShardAware = addr\n\t}\n\tif port := scyllaFeatures.ShardAwarePortTLS(); port != 0 {\n\t\taddr, err = translateAddressPort(addressTranslator, host,\n\t\t\tAddressPort{\n\t\t\t\tAddress: host.UntranslatedConnectAddress(),\n\t\t\t\tPort:    port,\n\t\t\t}, logger)\n\t\tif err != nil {\n\t\t\treturn translatedAddresses{}, fmt.Errorf(\"unable to translate shard aware tls address: %w\", err)\n\t\t}\n\t\tresultedInfo.ShardAwareTLS = addr\n\t}\n\treturn resultedInfo, nil\n}\n\n// dialShard establishes a connection to a host/shard and notifies the session's connectObserver.\n// If nrShards is zero, shard-aware dialing is disabled.\n// note: every connection needs to get `conn.finalizeConnection` called on it when initialization process is done\nfunc (s *Session) dialShard(ctx context.Context, host *HostInfo, connConfig *ConnConfig, errorHandler ConnErrorHandler,\n\tshardID, nrShards int) (*Conn, error) {\n\tvar obs ObservedConnect\n\n\tcurrent := host.getTranslatedConnectionInfo()\n\tupdated, err := translateHostAddresses(s.addressTranslator, host, s.logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif current == nil || !updated.Equal(current) {\n\t\thost.setTranslatedConnectionInfo(updated)\n\t}\n\n\tif s.connectObserver != nil {\n\t\tobs.Host = host\n\t\tobs.Start = time.Now()\n\t}\n\n\tconn, err := s.dialWithoutObserver(ctx, host, connConfig, errorHandler, shardID, nrShards)\n\n\tif s.connectObserver != nil {\n\t\tobs.End = time.Now()\n\t\tobs.Err = err\n\t\ts.connectObserver.ObserveConnect(obs)\n\t}\n\n\treturn conn, err\n}\n\n// dialWithoutObserver establishes connection to a Cassandra node.\n//\n// dialWithoutObserver does not notify the connection observer, so you most probably want to call dial() instead.\n//\n// If nrShards is zero, shard-aware dialing is disabled.\nfunc (s *Session) dialWithoutObserver(ctx context.Context, host *HostInfo, cfg *ConnConfig, errorHandler ConnErrorHandler,\n\tshardID, nrShards int) (*Conn, error) {\n\n\tshardDialer, ok := cfg.HostDialer.(ShardDialer)\n\tvar (\n\t\tdialedHost *DialedHost\n\t\terr        error\n\t)\n\n\tisShardAware := false\n\tif ok && nrShards > 0 {\n\t\tisShardAware = true\n\t\tdialedHost, err = shardDialer.DialShard(ctx, host, shardID, nrShards)\n\t} else {\n\t\tdialedHost, err = cfg.HostDialer.DialHost(ctx, host)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tc := &Conn{\n\t\tconn:          dialedHost.Conn,\n\t\tr:             bufio.NewReader(dialedHost.Conn),\n\t\tcfg:           cfg,\n\t\tcalls:         make(map[int]*callReq),\n\t\tversion:       uint8(cfg.ProtoVersion),\n\t\tisShardAware:  isShardAware,\n\t\taddr:          dialedHost.Conn.RemoteAddr().String(),\n\t\terrorHandler:  errorHandler,\n\t\tcompressor:    cfg.Compressor,\n\t\tsession:       s,\n\t\tstreams:       s.streamIDGenerator(),\n\t\thost:          host,\n\t\tisSchemaV2:    true, // Try using \"system.peers_v2\" until proven otherwise\n\t\tframeObserver: s.frameObserver,\n\t\tw: &deadlineContextWriter{\n\t\t\tw:         dialedHost.Conn,\n\t\t\tsemaphore: make(chan struct{}, 1),\n\t\t\tquit:      make(chan struct{}),\n\t\t},\n\t\tctx:                  ctx,\n\t\tcancel:               cancel,\n\t\tlogger:               cfg.logger(),\n\t\tstreamObserver:       s.streamObserver,\n\t\tsystemRequestTimeout: cfg.ConnectTimeout,\n\t}\n\n\tif err := c.init(ctx, dialedHost); err != nil {\n\t\tcancel()\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\nfunc (s *Session) streamIDGenerator() *streams.IDGenerator {\n\tif s.cfg.MaxRequestsPerConn > 0 {\n\t\treturn streams.NewLimited(s.cfg.MaxRequestsPerConn)\n\t}\n\treturn streams.New()\n}\n\nfunc (c *Conn) init(ctx context.Context, dialedHost *DialedHost) error {\n\tc.readTimeout.Store(int64(c.cfg.ConnectTimeout))\n\tc.writeTimeout.Store(int64(c.cfg.ConnectTimeout))\n\tc.w.setWriteTimeout(c.cfg.ConnectTimeout)\n\n\tif c.session.cfg.AuthProvider != nil {\n\t\tvar err error\n\t\tc.auth, err = c.cfg.AuthProvider(c.host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tc.auth = c.cfg.Authenticator\n\t}\n\n\tstartup := &startupCoordinator{\n\t\tframeTicker: make(chan struct{}),\n\t\tconn:        c,\n\t}\n\n\tif err := startup.setupConn(ctx); err != nil {\n\t\treturn err\n\t}\n\n\t// dont coalesce startup frames\n\tif c.session.cfg.WriteCoalesceWaitTime > 0 && !c.cfg.disableCoalesce && !dialedHost.DisableCoalesce {\n\t\tc.w = newWriteCoalescer(c.conn, c.cfg.ConnectTimeout, c.session.cfg.WriteCoalesceWaitTime, ctx.Done())\n\t}\n\n\tif c.isScyllaConn() { // ScyllaDB does not support system.peers_v2\n\t\tc.setSchemaV2(false)\n\t}\n\n\tgo c.serve(ctx)\n\tgo c.heartBeat(ctx)\n\n\treturn nil\n}\n\nfunc (c *Conn) Write(p []byte) (n int, err error) {\n\treturn c.w.writeContext(context.Background(), p)\n}\n\nfunc (c *Conn) Read(p []byte) (n int, err error) {\n\tconst maxAttempts = 5\n\ttimeout := c.readTimeout.Load()\n\n\tfor i := 0; i < maxAttempts; i++ {\n\t\tvar nn int\n\t\tif timeout > 0 {\n\t\t\terr = c.conn.SetReadDeadline(time.Now().Add(time.Duration(timeout)))\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\n\t\tnn, err = io.ReadFull(c.r, p[n:])\n\t\tn += nn\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif verr, ok := err.(net.Error); !ok || !verr.Temporary() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}\n\ntype startupCoordinator struct {\n\tconn        *Conn\n\tframeTicker chan struct{}\n}\n\nfunc (s *startupCoordinator) setupConn(ctx context.Context) error {\n\tvar cancel context.CancelFunc\n\tif s.conn.cfg.ConnectTimeout > 0 {\n\t\tctx, cancel = context.WithTimeout(ctx, s.conn.cfg.ConnectTimeout)\n\t} else {\n\t\tctx, cancel = context.WithCancel(ctx)\n\t}\n\tdefer cancel()\n\n\tstartupErr := make(chan error)\n\tgo func() {\n\t\tfor range s.frameTicker {\n\t\t\terr := s.conn.recv(ctx)\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase startupErr <- err:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer close(s.frameTicker)\n\t\terr := s.options(ctx)\n\t\tselect {\n\t\tcase startupErr <- err:\n\t\tcase <-ctx.Done():\n\t\t}\n\t}()\n\n\tselect {\n\tcase err := <-startupErr:\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase <-ctx.Done():\n\t\treturn errors.New(\"gocql: no response to connection startup within timeout\")\n\t}\n\n\treturn nil\n}\n\n// write sends the given frame on the connection during startup and returns\n// the parsed response frame.\n//\n// NOTE: The returned frame must not retain any byte-slice references to the\n// framer's read buffer, because the framer is released back to the pool\n// immediately after parseFrame returns (via defer). Frame types that use\n// readBytesCopy (e.g. SupportedFrame, AuthChallengeFrame, AuthSuccessFrame)\n// are safe; frame types that use readBytes and expose []byte fields would not\n// be safe and must not be returned from this function.\nfunc (s *startupCoordinator) write(ctx context.Context, frame frameBuilder) (frame, error) {\n\tselect {\n\tcase s.frameTicker <- struct{}{}:\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n\n\tframer, err := s.conn.exec(ctx, frame, nil, s.conn.cfg.ConnectTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer framer.Release()\n\n\treturn framer.parseFrame()\n}\n\nfunc (s *startupCoordinator) options(ctx context.Context) error {\n\tframe, err := s.write(ctx, &writeOptionsFrame{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv, ok := frame.(*frm.SupportedFrame)\n\tif !ok {\n\t\treturn NewErrProtocol(\"Unknown type of response to startup frame: %T\", frame)\n\t}\n\t// Keep raw supported multimap for debug purposes\n\ts.conn.supported = v.Supported\n\ts.conn.scyllaSupported = parseSupported(s.conn.supported, s.conn.logger)\n\ts.conn.recalculateSystemRequestTimeout()\n\tif current := s.conn.host.ScyllaFeatures(); current != s.conn.scyllaSupported.ScyllaHostFeatures {\n\t\ts.conn.host.setScyllaFeatures(s.conn.scyllaSupported.ScyllaHostFeatures)\n\t}\n\ts.conn.cqlProtoExts = parseCQLProtocolExtensions(s.conn.supported, s.conn.logger)\n\n\t// initFramerCache must be called after startup(), because startup() may\n\t// nil out c.compressor if the server does not support the requested\n\t// compression algorithm. Calling it before would snapshot a stale\n\t// compressor and set FlagCompress, causing protocol errors.\n\terr = s.startup(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.conn.initFramerCache()\n\treturn nil\n}\n\nfunc (s *startupCoordinator) startup(ctx context.Context) error {\n\tm := map[string]string{}\n\n\tif s.conn.session.cfg.ApplicationInfo != nil {\n\t\ts.conn.session.cfg.ApplicationInfo.UpdateStartupOptions(m)\n\t}\n\n\tm[\"CQL_VERSION\"] = s.conn.cfg.CQLVersion\n\tm[\"DRIVER_NAME\"] = s.conn.session.cfg.DriverName\n\tm[\"DRIVER_VERSION\"] = s.conn.session.cfg.DriverVersion\n\n\tif s.conn.compressor != nil {\n\t\tcomp := s.conn.supported[\"COMPRESSION\"]\n\t\tname := s.conn.compressor.Name()\n\t\tfor _, compressor := range comp {\n\t\t\tif compressor == name {\n\t\t\t\tm[\"COMPRESSION\"] = compressor\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif _, ok := m[\"COMPRESSION\"]; !ok {\n\t\t\ts.conn.compressor = nil\n\t\t}\n\t}\n\n\tfor _, ext := range s.conn.cqlProtoExts {\n\t\tserialized := ext.serialize()\n\t\tfor k, v := range serialized {\n\t\t\tm[k] = v\n\t\t}\n\t}\n\n\tframe, err := s.write(ctx, &writeStartupFrame{opts: m})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch v := frame.(type) {\n\tcase error:\n\t\treturn v\n\tcase *frm.ReadyFrame:\n\t\treturn nil\n\tcase *frm.AuthenticateFrame:\n\t\treturn s.authenticateHandshake(ctx, v)\n\tdefault:\n\t\treturn NewErrProtocol(\"Unknown type of response to startup frame: %s\", v)\n\t}\n}\n\nfunc (s *startupCoordinator) authenticateHandshake(ctx context.Context, authFrame *frm.AuthenticateFrame) error {\n\tif s.conn.auth == nil {\n\t\treturn fmt.Errorf(\"authentication required (using %q)\", authFrame.Class)\n\t}\n\n\tresp, challenger, err := s.conn.auth.Challenge([]byte(authFrame.Class))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := &writeAuthResponseFrame{data: resp}\n\tfor {\n\t\tframe, err := s.write(ctx, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch v := frame.(type) {\n\t\tcase error:\n\t\t\treturn v\n\t\tcase *frm.AuthSuccessFrame:\n\t\t\tif challenger != nil {\n\t\t\t\treturn challenger.Success(v.Data)\n\t\t\t}\n\t\t\treturn nil\n\t\tcase *frm.AuthChallengeFrame:\n\t\t\tresp, challenger, err = challenger.Challenge(v.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treq = &writeAuthResponseFrame{\n\t\t\t\tdata: resp,\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown frame response during authentication: %v\", v)\n\t\t}\n\t}\n}\n\nfunc (c *Conn) closeWithError(err error) {\n\tif c == nil {\n\t\treturn\n\t}\n\n\tc.mu.Lock()\n\tif c.closed {\n\t\tc.mu.Unlock()\n\t\treturn\n\t}\n\tc.closed = true\n\tcallsToClose := c.calls\n\t// It is safe to change c.calls to nil. Nobody should use it after c.closed is set to true.\n\tc.calls = nil\n\tc.mu.Unlock()\n\n\tvar cerr error\n\tif err == nil {\n\t\t// Graceful closes do not inject an error into call.resp, so cancel the\n\t\t// connection first to unblock any exec() calls before waiting for them.\n\t\tc.cancel()\n\t\tcerr = c.close()\n\t}\n\n\tfor _, req := range callsToClose {\n\t\tif err != nil {\n\t\t\t// We need to send the error to all waiting queries.\n\t\t\tselect {\n\t\t\tcase req.resp <- callResp{err: err}:\n\t\t\t\t// exec() received the error. Wait for it to finish touching the callReq\n\t\t\t\t// before recycling it.\n\t\t\tcase <-req.timeout:\n\t\t\t\t// exec() already timed out and returned.\n\t\t\t}\n\t\t}\n\t\treq.waitExecDone(\"closeWithError\")\n\t\tif req.streamObserverContext != nil {\n\t\t\treq.streamObserverEndOnce.Do(func() {\n\t\t\t\treq.streamObserverContext.StreamAbandoned(ObservedStream{\n\t\t\t\t\tHost: c.host,\n\t\t\t\t})\n\t\t\t})\n\t\t}\n\t\tputCallReq(req)\n\t}\n\n\t// Allow GC of pooled framers. Safe to do after the drain loop above has\n\t// resolved all in-flight exec() calls. Any event goroutines still running\n\t// will see pool==nil in releaseFramer and simply drop the framer.\n\tc.framers.close()\n\n\tif err != nil {\n\t\tc.cancel()\n\t\tcerr = c.close()\n\t}\n\n\tif err != nil {\n\t\tc.errorHandler.HandleError(c, err, true)\n\t} else if cerr != nil {\n\t\t// TODO(zariel): is it a good idea to do this?\n\t\tc.errorHandler.HandleError(c, cerr, true)\n\t}\n}\n\nfunc (c *Conn) isTabletSupported() bool {\n\treturn atomic.LoadInt32(&c.tabletsRoutingV1) == 1\n}\n\nfunc (c *Conn) setTabletSupported(val bool) {\n\tintVal := int32(0)\n\tif val {\n\t\tintVal = 1\n\t}\n\tatomic.StoreInt32(&c.tabletsRoutingV1, intVal)\n}\n\nfunc (c *Conn) close() error {\n\treturn c.conn.Close()\n}\n\nfunc (c *Conn) Close() {\n\tc.closeWithError(nil)\n}\n\n// Serve starts the stream multiplexer for this connection, which is required\n// to execute any queries. This method runs as long as the connection is\n// open and is therefore usually called in a separate goroutine.\nfunc (c *Conn) serve(ctx context.Context) {\n\tvar err error\n\tfor err == nil {\n\t\terr = c.recv(ctx)\n\t}\n\n\tc.closeWithError(err)\n}\n\nfunc (c *Conn) discardFrame(head frm.FrameHeader) error {\n\t_, err := io.CopyN(io.Discard, c, int64(head.Length))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype protocolError struct {\n\tframe frame\n}\n\nfunc (p *protocolError) Error() string {\n\tif err, ok := p.frame.(error); ok {\n\t\treturn err.Error()\n\t}\n\treturn fmt.Sprintf(\"gocql: received unexpected frame on stream %d: %v\", p.frame.Header().Stream, p.frame)\n}\n\nfunc (c *Conn) heartBeat(ctx context.Context) {\n\tsleepTime := 1 * time.Second\n\ttimer := time.NewTimer(sleepTime)\n\tdefer timer.Stop()\n\n\tvar failures int\n\n\tfor {\n\t\tif failures > 5 {\n\t\t\tc.closeWithError(fmt.Errorf(\"gocql: heartbeat failed\"))\n\t\t\treturn\n\t\t}\n\n\t\ttimer.Reset(sleepTime)\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-timer.C:\n\t\t}\n\n\t\tframer, err := c.exec(context.Background(), &writeOptionsFrame{}, nil, c.cfg.ConnectTimeout)\n\t\tif err != nil {\n\t\t\tfailures++\n\t\t\tcontinue\n\t\t}\n\n\t\tresp, err := framer.parseFrame()\n\t\tframer.Release()\n\t\tif err != nil {\n\t\t\t// invalid frame\n\t\t\tfailures++\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch resp.(type) {\n\t\tcase *frm.SupportedFrame:\n\t\t\t// Everything ok\n\t\t\tsleepTime = 30 * time.Second\n\t\t\tfailures = 0\n\t\tcase error:\n\t\t\t// TODO: should we do something here?\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"gocql: unknown frame in response to options: %T\", resp))\n\t\t}\n\t}\n}\n\nfunc (c *Conn) recv(ctx context.Context) error {\n\t// not safe for concurrent reads\n\n\t// read a full header, ignore timeouts, as this is being ran in a loop\n\t// TODO: TCP level deadlines? or just query level deadlines?\n\tif c.readTimeout.Load() > 0 {\n\t\tc.conn.SetReadDeadline(time.Time{})\n\t}\n\n\tvar headStartTime time.Time\n\tif c.frameObserver != nil {\n\t\theadStartTime = time.Now()\n\t}\n\t// were just reading headers over and over and copy bodies\n\thead, err := readHeader(c.r, c.headerBuf[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c.frameObserver != nil {\n\t\theadEndTime := time.Now()\n\t\tc.frameObserver.ObserveFrameHeader(context.Background(), ObservedFrameHeader{\n\t\t\tVersion: head.Version,\n\t\t\tFlags:   head.Flags,\n\t\t\tStream:  int16(head.Stream),\n\t\t\tOpcode:  head.Op,\n\t\t\tLength:  int32(head.Length),\n\t\t\tStart:   headStartTime,\n\t\t\tEnd:     headEndTime,\n\t\t\tHost:    c.host,\n\t\t})\n\t}\n\n\tif head.Stream > c.streams.NumStreams {\n\t\treturn fmt.Errorf(\"gocql: frame header stream is beyond call expected bounds: %d\", head.Stream)\n\t} else if head.Stream <= 0 {\n\t\t// reserved stream that we dont use, probably due to a protocol error\n\t\t// or a bug in Cassandra, this should be an error, parse it and return.\n\t\tframer, err := c.readFrameIntoFramer(head)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tframe, err := framer.parseFrame()\n\t\t// NOTE: Safe to release the framer here because all error frame types\n\t\t// (from parseErrorFrame) contain only strings, scalars, and defensively-\n\t\t// copied []byte fields. None retain sub-slices of the framer's read buffer.\n\t\tc.releaseReadFramer(framer)\n\t\tif err != nil {\n\t\t\tif head.Stream == -1 {\n\t\t\t\t// Event frame parse errors should not close the connection.\n\t\t\t\tc.logger.Printf(\"gocql: unable to parse event frame: %v\\n\", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tif head.Stream == -1 { // reserved stream for events\n\t\t\tif c.session != nil {\n\t\t\t\tgo c.session.handleEvent(frame)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\treturn &protocolError{\n\t\t\tframe: frame,\n\t\t}\n\t}\n\n\tc.mu.Lock()\n\tif c.closed {\n\t\tc.mu.Unlock()\n\t\treturn ErrConnectionClosed\n\t}\n\tcall, ok := c.calls[head.Stream]\n\tdelete(c.calls, head.Stream)\n\tc.mu.Unlock()\n\tif call == nil || !ok {\n\t\tc.logger.Printf(\"gocql: received response for stream which has no handler: header=%v\\n\", head)\n\t\treturn c.discardFrame(head)\n\t} else if head.Stream != call.streamID {\n\t\tpanic(fmt.Sprintf(\"call has incorrect streamID: got %d expected %d\", call.streamID, head.Stream))\n\t}\n\n\tframer := c.getReadFramer()\n\n\terr = framer.readFrame(c, &head)\n\tif err != nil {\n\t\t// only net errors should cause the connection to be closed. Though\n\t\t// cassandra returning corrupt frames will be returned here as well.\n\t\tif _, ok := err.(net.Error); ok {\n\t\t\tc.releaseReadFramer(framer)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// we either, return a response to the caller, the caller timedout, or the\n\t// connection has closed. Either way we should never block indefinatly here\n\tselect {\n\tcase call.resp <- callResp{framer: framer, err: err}:\n\t\t// Framer ownership transferred to caller\n\tcase <-call.timeout:\n\t\tc.abandonRecvCall(call, framer)\n\tcase <-ctx.Done():\n\t\tc.abandonRecvCall(call, framer)\n\t}\n\n\treturn nil\n}\n\nfunc (c *Conn) readFrameIntoFramer(head frm.FrameHeader) (*framer, error) {\n\tframer := c.getReadFramer()\n\tif err := framer.readFrame(c, &head); err != nil {\n\t\tc.releaseReadFramer(framer)\n\t\treturn nil, err\n\t}\n\treturn framer, nil\n}\n\nfunc (c *Conn) abandonRecvCall(call *callReq, framer *framer) {\n\tc.releaseReadFramer(framer)\n\tc.releaseStream(call)\n\tcall.waitExecDone(\"abandonRecvCall\")\n\tputCallReq(call)\n}\n\nfunc (c *Conn) releaseStream(call *callReq) {\n\tc.streams.Clear(call.streamID)\n\n\tif call.streamObserverContext != nil {\n\t\tcall.streamObserverEndOnce.Do(func() {\n\t\t\tcall.streamObserverContext.StreamFinished(ObservedStream{\n\t\t\t\tHost: c.host,\n\t\t\t})\n\t\t})\n\t}\n}\n\ntype callReq struct {\n\t// streamObserverContext is notified about events regarding this stream\n\tstreamObserverContext StreamObserverContext\n\t// resp will receive the frame that was sent as a response to this stream.\n\tresp     chan callResp\n\ttimeout  chan struct{} // indicates to recv() that a call has timed out\n\ttimer    *time.Timer\n\tstreamID int // current stream in use\n\t// streamObserverEndOnce ensures that either StreamAbandoned or StreamFinished is called,\n\t// but not both.\n\tstreamObserverEndOnce sync.Once\n\tdone                  sync.WaitGroup\n}\n\nvar callReqPool = sync.Pool{\n\tNew: func() any {\n\t\treturn &callReq{\n\t\t\tresp: make(chan callResp),\n\t\t}\n\t},\n}\n\nfunc getCallReq(streamID int) *callReq {\n\tcall := callReqPool.Get().(*callReq)\n\tcall.timeout = make(chan struct{})\n\tcall.streamID = streamID\n\tcall.streamObserverContext = nil\n\tcall.streamObserverEndOnce = sync.Once{}\n\tcall.done = sync.WaitGroup{}\n\tcall.done.Add(1)\n\treturn call\n}\n\nfunc putCallReq(call *callReq) {\n\tif call.timer != nil {\n\t\tif !call.timer.Stop() {\n\t\t\tselect {\n\t\t\tcase <-call.timer.C:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}\n\tcall.streamObserverContext = nil\n\tcall.streamObserverEndOnce = sync.Once{}\n\tcall.streamID = 0\n\tcall.timeout = nil\n\tcallReqPool.Put(call)\n}\n\nfunc (call *callReq) finishExec() {\n\tcall.done.Done()\n}\n\nfunc (call *callReq) waitExecDone(where string) {\n\twaitCallReqDone(call, where)\n}\n\n// removeCallIfOpen removes a call from c.calls only if exec() still owns its\n// cleanup. Once the connection has started closing, closeWithError() becomes\n// responsible for draining and recycling detached callReqs.\nfunc (c *Conn) removeCallIfOpen(streamID int) bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.closed || c.calls == nil {\n\t\treturn false\n\t}\n\n\tdelete(c.calls, streamID)\n\treturn true\n}\n\ntype callResp struct {\n\t// framer is the response frame.\n\t// May be nil if err is not nil.\n\tframer *framer\n\t// err is error encountered, if any.\n\terr error\n}\n\n// contextWriter is like io.Writer, but takes context as well.\ntype contextWriter interface {\n\t// writeContext writes p to the connection.\n\t//\n\t// If ctx is canceled before we start writing p (e.g. during waiting while another write is currently in progress),\n\t// p is not written and ctx.Err() is returned. Context is ignored after we start writing p (i.e. we don't interrupt\n\t// blocked writes that are in progress) so that we always either write the full frame or not write it at all.\n\t//\n\t// It returns the number of bytes written from p (0 <= n <= len(p)) and any error that caused the write to stop\n\t// early. writeContext must return a non-nil error if it returns n < len(p). writeContext must not modify the\n\t// data in p, even temporarily.\n\twriteContext(ctx context.Context, p []byte) (n int, err error)\n\n\tsetWriteTimeout(timeout time.Duration)\n}\n\ntype deadlineWriter interface {\n\tSetWriteDeadline(time.Time) error\n\tio.Writer\n}\n\ntype deadlineContextWriter struct {\n\tw deadlineWriter\n\t// semaphore protects critical section for SetWriteDeadline/Write.\n\t// It is a channel with capacity 1.\n\tsemaphore chan struct{}\n\t// quit closed once the connection is closed.\n\tquit    chan struct{}\n\ttimeout atomic.Int64\n}\n\nfunc (c *deadlineContextWriter) setWriteTimeout(timeout time.Duration) {\n\tc.timeout.Store(int64(timeout))\n}\n\n// writeContext implements contextWriter.\nfunc (c *deadlineContextWriter) writeContext(ctx context.Context, p []byte) (int, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn 0, ctx.Err()\n\tcase <-c.quit:\n\t\treturn 0, ErrConnectionClosed\n\tcase c.semaphore <- struct{}{}:\n\t\t// acquired\n\t}\n\n\tdefer func() {\n\t\t// release\n\t\t<-c.semaphore\n\t}()\n\n\ttimeout := c.timeout.Load()\n\tif timeout > 0 {\n\t\terr := c.w.SetWriteDeadline(time.Now().Add(time.Duration(timeout)))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn c.w.Write(p)\n}\n\nfunc newWriteCoalescer(conn deadlineWriter, writeTimeout, coalesceDuration time.Duration,\n\tquit <-chan struct{}) *writeCoalescer {\n\twc := &writeCoalescer{\n\t\twriteCh: make(chan writeRequest),\n\t\tc:       conn,\n\t\tquit:    quit,\n\t}\n\twc.setWriteTimeout(writeTimeout)\n\tgo wc.writeFlusher(coalesceDuration)\n\treturn wc\n}\n\ntype writeCoalescer struct {\n\tc                deadlineWriter\n\tquit             <-chan struct{}\n\twriteCh          chan writeRequest\n\ttestEnqueuedHook func()\n\ttestFlushedHook  func()\n\ttimeout          atomic.Int64\n\tmu               sync.Mutex\n}\n\nfunc (w *writeCoalescer) setWriteTimeout(timeout time.Duration) {\n\tw.timeout.Store(int64(timeout))\n}\n\ntype writeRequest struct {\n\t// resultChan is a channel (with buffer size 1) where to send results of the write.\n\tresultChan chan<- writeResult\n\t// data to write.\n\tdata []byte\n}\n\ntype writeResult struct {\n\terr error\n\tn   int\n}\n\n// writeResultChanPool pools buffered channels used for write coalescer results.\n// Each channel is used in a strict produce-once/consume-once pattern:\n// the flusher goroutine sends exactly one writeResult, and writeContext\n// reads exactly one. After reading, the channel is empty and safe to reuse.\nvar writeResultChanPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn make(chan writeResult, 1)\n\t},\n}\n\n// writeContext implements contextWriter.\nfunc (w *writeCoalescer) writeContext(ctx context.Context, p []byte) (int, error) {\n\tresultChan := writeResultChanPool.Get().(chan writeResult)\n\twr := writeRequest{\n\t\tresultChan: resultChan,\n\t\tdata:       p,\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\twriteResultChanPool.Put(resultChan)\n\t\treturn 0, ctx.Err()\n\tcase <-w.quit:\n\t\twriteResultChanPool.Put(resultChan)\n\t\treturn 0, io.EOF // TODO: better error here?\n\tcase w.writeCh <- wr:\n\t\t// enqueued for writing\n\t}\n\n\tif w.testEnqueuedHook != nil {\n\t\tw.testEnqueuedHook()\n\t}\n\n\tresult := <-resultChan\n\twriteResultChanPool.Put(resultChan)\n\treturn result.n, result.err\n}\n\nfunc (w *writeCoalescer) writeFlusher(interval time.Duration) {\n\ttimer := time.NewTimer(interval)\n\tdefer timer.Stop()\n\n\tif !timer.Stop() {\n\t\t<-timer.C\n\t}\n\n\tw.writeFlusherImpl(timer.C, func() { timer.Reset(interval) })\n}\n\nfunc (w *writeCoalescer) writeFlusherImpl(timerC <-chan time.Time, resetTimer func()) {\n\trunning := false\n\n\tvar buffers net.Buffers\n\tvar resultChans []chan<- writeResult\n\n\tfor {\n\t\tselect {\n\t\tcase req := <-w.writeCh:\n\t\t\tbuffers = append(buffers, req.data)\n\t\t\tresultChans = append(resultChans, req.resultChan)\n\t\t\tif !running {\n\t\t\t\t// Start timer on first write.\n\t\t\t\tresetTimer()\n\t\t\t\trunning = true\n\t\t\t}\n\t\tcase <-w.quit:\n\t\t\tresult := writeResult{\n\t\t\t\tn:   0,\n\t\t\t\terr: io.EOF, // TODO: better error here?\n\t\t\t}\n\t\t\t// Unblock whoever was waiting.\n\t\t\tfor _, resultChan := range resultChans {\n\t\t\t\t// resultChan has capacity 1, so it does not block.\n\t\t\t\tresultChan <- result\n\t\t\t}\n\t\t\treturn\n\t\tcase <-timerC:\n\t\t\trunning = false\n\t\t\tw.flush(resultChans, buffers)\n\t\t\tbuffers = nil\n\t\t\tresultChans = nil\n\t\t\tif w.testFlushedHook != nil {\n\t\t\t\tw.testFlushedHook()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w *writeCoalescer) flush(resultChans []chan<- writeResult, buffers net.Buffers) {\n\t// Flush everything we have so far.\n\ttimeout := w.timeout.Load()\n\tif timeout > 0 {\n\t\terr := w.c.SetWriteDeadline(time.Now().Add(time.Duration(timeout)))\n\t\tif err != nil {\n\t\t\tfor i := range resultChans {\n\t\t\t\tresultChans[i] <- writeResult{\n\t\t\t\t\tn:   0,\n\t\t\t\t\terr: err,\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\t// Copy buffers because WriteTo modifies buffers in-place.\n\tbuffers2 := make(net.Buffers, len(buffers))\n\tcopy(buffers2, buffers)\n\tn, err := buffers2.WriteTo(w.c)\n\t// Writes of bytes before n succeeded, writes of bytes starting from n failed with err.\n\t// Use n as remaining byte counter.\n\tfor i := range buffers {\n\t\tif int64(len(buffers[i])) <= n {\n\t\t\t// this buffer was fully written.\n\t\t\tresultChans[i] <- writeResult{\n\t\t\t\tn:   len(buffers[i]),\n\t\t\t\terr: nil,\n\t\t\t}\n\t\t\tn -= int64(len(buffers[i]))\n\t\t} else {\n\t\t\t// this buffer was not (fully) written.\n\t\t\tresultChans[i] <- writeResult{\n\t\t\t\tn:   int(n),\n\t\t\t\terr: err,\n\t\t\t}\n\t\t\tn = 0\n\t\t}\n\t}\n}\n\n// addCall attempts to add a call to c.calls.\n// It fails with error if the connection already started closing or if a call for the given stream\n// already exists.\nfunc (c *Conn) addCall(call *callReq) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.closed {\n\t\treturn ErrConnectionClosed\n\t}\n\texistingCall := c.calls[call.streamID]\n\tif existingCall != nil {\n\t\treturn fmt.Errorf(\"attempting to use stream already in use: %d -> %d\", call.streamID,\n\t\t\texistingCall.streamID)\n\t}\n\tc.calls[call.streamID] = call\n\treturn nil\n}\n\n// exec executes a frame on the connection and returns the response framer.\n//\n// IMPORTANT: The caller takes ownership of the returned framer and MUST call\n// framer.Release() when done reading the response. Failure to release the framer\n// will leak memory and prevent buffer reuse.\n//\n// The framer should be released as soon as the response data is no longer needed,\n// typically via defer immediately after parsing or after transferring ownership\n// to an Iter.\nfunc (c *Conn) exec(ctx context.Context, req frameBuilder, tracer Tracer, requestTimeout time.Duration) (*framer, error) {\n\tif ctxErr := ctx.Err(); ctxErr != nil {\n\t\treturn nil, &QueryError{err: ctxErr, potentiallyExecuted: false}\n\t}\n\n\t// TODO: move tracer onto conn\n\tstream, ok := c.streams.GetStream()\n\tif !ok {\n\t\treturn nil, &QueryError{err: ErrNoStreams, potentiallyExecuted: false}\n\t}\n\n\t// resp is basically a waiting semaphore protecting the framer\n\tframer := c.getWriteFramer()\n\n\tcall := getCallReq(stream)\n\n\tif c.streamObserver != nil {\n\t\tcall.streamObserverContext = c.streamObserver.StreamContext(ctx)\n\t}\n\n\tif err := c.addCall(call); err != nil {\n\t\tcall.finishExec()\n\t\tputCallReq(call)\n\t\tc.releaseWriteFramer(framer)\n\t\treturn nil, &QueryError{err: err, potentiallyExecuted: false}\n\t}\n\n\t// After this point, we need to either read from call.resp or close(call.timeout)\n\t// since closeWithError can try to write a connection close error to call.resp.\n\t// If we don't close(call.timeout) or read from call.resp, closeWithError can deadlock.\n\n\tvar (\n\t\tstopWaiting   bool\n\t\treleaseStream bool\n\t\trecycleCall   bool\n\t\tcloseErr      error\n\t)\n\n\tdefer func() {\n\t\tif closeErr != nil {\n\t\t\tc.closeWithError(closeErr)\n\t\t}\n\t}()\n\n\tdefer func() {\n\t\tif stopWaiting {\n\t\t\tclose(call.timeout)\n\t\t}\n\t\tcall.finishExec()\n\t\tif releaseStream {\n\t\t\tc.releaseStream(call)\n\t\t}\n\t\tif recycleCall {\n\t\t\tputCallReq(call)\n\t\t}\n\t}()\n\n\tif tracer != nil {\n\t\tframer.trace()\n\t}\n\n\tif call.streamObserverContext != nil {\n\t\tcall.streamObserverContext.StreamStarted(ObservedStream{\n\t\t\tHost: c.host,\n\t\t})\n\t}\n\n\terr := req.buildFrame(framer, stream)\n\tif err != nil {\n\t\tc.releaseWriteFramer(framer)\n\t\t// closeWithError waits for exec() to stop touching the callReq, so the\n\t\t// deferred epilogue below is responsible for signaling completion.\n\t\tstopWaiting = true\n\t\tif c.removeCallIfOpen(call.streamID) {\n\t\t\t// We failed to serialize the frame into a buffer. This should not affect\n\t\t\t// the connection as we didn't write anything, so exec() still owns the\n\t\t\t// stream/call cleanup.\n\t\t\treleaseStream = true\n\t\t\trecycleCall = true\n\t\t}\n\t\treturn nil, &QueryError{err: err, potentiallyExecuted: false}\n\t}\n\n\tn, err := c.w.writeContext(ctx, framer.buf)\n\tc.releaseWriteFramer(framer)\n\tif err != nil {\n\t\t// closeWithError waits for exec() to stop touching the callReq, so defer\n\t\t// the completion signal and only record the cleanup we need here.\n\t\tstopWaiting = true\n\t\tif (errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)) && n == 0 {\n\t\t\t// We have not started to write this frame.\n\t\t\t// Release the stream as no response can come from the server on the stream.\n\t\t\tif c.removeCallIfOpen(call.streamID) {\n\t\t\t\t// We need to release the stream after we remove the call from c.calls,\n\t\t\t\t// otherwise the existingCall != nil check above could fail.\n\t\t\t\treleaseStream = true\n\t\t\t\trecycleCall = true\n\t\t\t}\n\t\t} else {\n\t\t\t// I think this is the correct thing to do, im not entirely sure. It is not\n\t\t\t// ideal as readers might still get some data, but they probably wont.\n\t\t\t// Here we need to be careful as the stream is not available and if all\n\t\t\t// writes just timeout or fail then the pool might use this connection to\n\t\t\t// send a frame on, with all the streams used up and not returned.\n\t\t\tcloseErr = err\n\t\t}\n\t\treturn nil, &QueryError{err: err, potentiallyExecuted: true}\n\t}\n\n\tvar timeoutCh <-chan time.Time\n\tif requestTimeout > 0 {\n\t\tif call.timer == nil {\n\t\t\tcall.timer = time.NewTimer(requestTimeout)\n\t\t} else {\n\t\t\tif !call.timer.Stop() {\n\t\t\t\tselect {\n\t\t\t\tcase <-call.timer.C:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t\tcall.timer.Reset(requestTimeout)\n\t\t}\n\t\ttimeoutCh = call.timer.C\n\t}\n\n\tvar ctxDone <-chan struct{}\n\tif ctx != nil {\n\t\tctxDone = ctx.Done()\n\t}\n\n\tselect {\n\tcase resp := <-call.resp:\n\t\tstopWaiting = true\n\t\tif resp.err != nil {\n\t\t\tc.releaseReadFramer(resp.framer)\n\t\t\tif !c.Closed() {\n\t\t\t\t// if the connection is closed then we cant release the stream,\n\t\t\t\t// this is because the request is still outstanding and we have\n\t\t\t\t// been handed another error from another stream which caused the\n\t\t\t\t// connection to close.\n\t\t\t\treleaseStream = true\n\t\t\t\trecycleCall = true\n\t\t\t}\n\t\t\treturn nil, &QueryError{err: resp.err, potentiallyExecuted: true}\n\t\t}\n\t\t// dont release the stream if detect a timeout as another request can reuse\n\t\t// that stream and get a response for the old request, which we have no\n\t\t// easy way of detecting.\n\t\t//\n\t\t// Ensure that the stream is not released if there are potentially outstanding\n\t\t// requests on the stream to prevent nil pointer dereferences in recv().\n\t\treleaseStream = true\n\t\trecycleCall = true\n\n\t\tif v := resp.framer.header.Version.Version(); v != c.version {\n\t\t\tc.releaseReadFramer(resp.framer)\n\t\t\treturn nil, &QueryError{err: NewErrProtocol(\"unexpected protocol version in response: got %d expected %d\", v, c.version), potentiallyExecuted: true}\n\t\t}\n\n\t\t// NOTE: The returned framer becomes the caller's responsibility to release.\n\t\t// It is not released here to allow zero-copy access to the response data.\n\t\t// The caller must call Release() on the returned read framer when done reading the response.\n\t\treturn resp.framer, nil\n\tcase <-timeoutCh:\n\t\tstopWaiting = true\n\t\treturn nil, &QueryError{err: ErrTimeoutNoResponse, potentiallyExecuted: true, timeout: requestTimeout, inFlight: c.streams.InUse()}\n\tcase <-ctxDone:\n\t\tstopWaiting = true\n\t\treturn nil, &QueryError{err: ctx.Err(), potentiallyExecuted: true, timeout: requestTimeout, inFlight: c.streams.InUse()}\n\tcase <-c.ctx.Done():\n\t\tstopWaiting = true\n\t\treturn nil, &QueryError{err: ErrConnectionClosed, potentiallyExecuted: true}\n\t}\n}\n\n// ObservedStream observes a single request/response stream.\ntype ObservedStream struct {\n\t// Host of the connection used to send the stream.\n\tHost *HostInfo\n}\n\n// StreamObserver is notified about request/response pairs.\n// Streams are created for executing queries/batches or\n// internal requests to the database and might live longer than\n// execution of the query - the stream is still tracked until\n// response arrives so that stream IDs are not reused.\ntype StreamObserver interface {\n\t// StreamContext is called before creating a new stream.\n\t// ctx is context passed to Session.Query / Session.Batch,\n\t// but might also be an internal context (for example\n\t// for internal requests that use control connection).\n\t// StreamContext might return nil if it is not interested\n\t// in the details of this stream.\n\t// StreamContext is called before the stream is created\n\t// and the returned StreamObserverContext might be discarded\n\t// without any methods called on the StreamObserverContext if\n\t// creation of the stream fails.\n\t// Note that if you don't need to track per-stream data,\n\t// you can always return the same StreamObserverContext.\n\tStreamContext(ctx context.Context) StreamObserverContext\n}\n\n// StreamObserverContext is notified about state of a stream.\n// A stream is started every time a request is written to the server\n// and is finished when a response is received.\n// It is abandoned when the underlying network connection is closed\n// before receiving a response.\ntype StreamObserverContext interface {\n\t// StreamStarted is called when the stream is started.\n\t// This happens just before a request is written to the wire.\n\tStreamStarted(observedStream ObservedStream)\n\n\t// StreamAbandoned is called when we stop waiting for response.\n\t// This happens when the underlying network connection is closed.\n\t// StreamFinished won't be called if StreamAbandoned is.\n\tStreamAbandoned(observedStream ObservedStream)\n\n\t// StreamFinished is called when we receive a response for the stream.\n\tStreamFinished(observedStream ObservedStream)\n}\n\ntype preparedStatment struct {\n\tid       []byte\n\tresponse resultMetadata\n\trequest  preparedMetadata\n}\n\ntype inflightPrepare struct {\n\tdone chan struct{}\n\terr  error\n\n\tpreparedStatment *preparedStatment\n}\n\nfunc (c *Conn) prepareStatement(ctx context.Context, stmt string, tracer Tracer, requestTimeout time.Duration) (*preparedStatment, error) {\n\tcacheKey := c.session.stmtsLRU.keyFor(c.host.HostID(), c.currentKeyspace, stmt)\n\tflight, ok := c.session.stmtsLRU.execIfMissing(cacheKey, func(cache *lru.Cache[stmtCacheKey]) *inflightPrepare {\n\t\tflight := &inflightPrepare{\n\t\t\tdone: make(chan struct{}),\n\t\t}\n\t\tcache.Add(cacheKey, flight)\n\t\treturn flight\n\t})\n\n\tif !ok {\n\t\tgo func() {\n\t\t\tdefer close(flight.done)\n\n\t\t\tprep := &writePrepareFrame{\n\t\t\t\tstatement: stmt,\n\t\t\t}\n\t\t\tif c.version > protoVersion4 {\n\t\t\t\tprep.keyspace = c.currentKeyspace\n\t\t\t}\n\n\t\t\t// we won the race to do the load, if our context is canceled we shouldnt\n\t\t\t// stop the load as other callers are waiting for it but this caller should get\n\t\t\t// their context cancelled error.\n\t\t\tframer, err := c.exec(c.ctx, prep, tracer, requestTimeout)\n\t\t\tif err != nil {\n\t\t\t\tflight.err = err\n\t\t\t\tc.session.stmtsLRU.remove(cacheKey)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer framer.Release()\n\n\t\t\tframe, err := framer.parseFrame()\n\t\t\tif err != nil {\n\t\t\t\tflight.err = err\n\t\t\t\tc.session.stmtsLRU.remove(cacheKey)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// TODO(zariel): tidy this up, simplify handling of frame parsing so its not duplicated\n\t\t\t// everytime we need to parse a frame.\n\t\t\tif len(framer.traceID) > 0 && tracer != nil {\n\t\t\t\ttracer.Trace(framer.traceID)\n\t\t\t}\n\n\t\t\tswitch x := frame.(type) {\n\t\t\tcase *resultPreparedFrame:\n\t\t\t\tflight.preparedStatment = &preparedStatment{\n\t\t\t\t\tid:       x.preparedID,\n\t\t\t\t\trequest:  x.reqMeta,\n\t\t\t\t\tresponse: x.respMeta,\n\t\t\t\t}\n\t\t\tcase error:\n\t\t\t\tflight.err = x\n\t\t\tdefault:\n\t\t\t\tflight.err = NewErrProtocol(\"Unknown type in response to prepare frame: %s\", x)\n\t\t\t}\n\n\t\t\tif flight.err != nil {\n\t\t\t\tc.session.stmtsLRU.remove(cacheKey)\n\t\t\t}\n\t\t}()\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase <-flight.done:\n\t\treturn flight.preparedStatment, flight.err\n\t}\n}\n\nfunc marshalQueryValue(typ TypeInfo, value any, dst *queryValues) error {\n\tif named, ok := value.(*namedValue); ok {\n\t\tdst.name = named.name\n\t\tvalue = named.value\n\t}\n\n\tif _, ok := value.(unsetColumn); !ok {\n\t\tval, err := Marshal(typ, value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdst.value = val\n\t} else {\n\t\tdst.isUnset = true\n\t}\n\n\treturn nil\n}\n\nfunc (c *Conn) executeQuery(ctx context.Context, qry *Query) (iter *Iter) {\n\tparams := queryParams{\n\t\tconsistency: qry.cons,\n\t}\n\n\t// frame checks that it is not 0\n\tparams.serialConsistency = qry.serialCons\n\tparams.defaultTimestamp = qry.defaultTimestamp\n\tparams.defaultTimestampValue = qry.defaultTimestampValue\n\n\tif len(qry.pageState) > 0 {\n\t\tparams.pagingState = qry.pageState\n\t}\n\tif qry.pageSize > 0 {\n\t\tparams.pageSize = qry.pageSize\n\t}\n\tif c.version > protoVersion4 {\n\t\tparams.keyspace = c.currentKeyspace\n\t}\n\n\tvar (\n\t\tframe frameBuilder\n\t\tinfo  *preparedStatment\n\t)\n\n\tif !qry.skipPrepare && qry.shouldPrepare() {\n\t\t// Prepare all DML queries. Other queries can not be prepared.\n\t\tvar err error\n\t\tinfo, err = c.prepareStatement(ctx, qry.stmt, qry.trace, qry.GetRequestTimeout())\n\t\tif err != nil {\n\t\t\treturn &Iter{err: err}\n\t\t}\n\n\t\tvalues := qry.values\n\t\tif qry.binding != nil {\n\t\t\tvalues, err = qry.binding(&QueryInfo{\n\t\t\t\tId:          info.id,\n\t\t\t\tArgs:        info.request.columns,\n\t\t\t\tRval:        info.response.columns,\n\t\t\t\tPKeyColumns: info.request.pkeyColumns,\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\treturn &Iter{err: err}\n\t\t\t}\n\t\t}\n\n\t\tif len(values) != info.request.actualColCount {\n\t\t\treturn &Iter{err: fmt.Errorf(\"gocql: expected %d values send got %d\", info.request.actualColCount, len(values))}\n\t\t}\n\n\t\tparams.values = make([]queryValues, len(values))\n\t\tfor i := 0; i < len(values); i++ {\n\t\t\tv := &params.values[i]\n\t\t\tvalue := values[i]\n\t\t\ttyp := info.request.columns[i].TypeInfo\n\t\t\tif err := marshalQueryValue(typ, value, v); err != nil {\n\t\t\t\treturn &Iter{err: err}\n\t\t\t}\n\t\t}\n\n\t\t// if the metadata was not present in the response then we should not skip it\n\t\tparams.skipMeta = !(c.session.cfg.DisableSkipMetadata || qry.disableSkipMetadata) && len(info.response.columns) != 0\n\n\t\tframe = &writeExecuteFrame{\n\t\t\tpreparedID:    info.id,\n\t\t\tparams:        params,\n\t\t\tcustomPayload: qry.customPayload,\n\t\t}\n\n\t\t// Set \"lwt\", keyspace\", \"table\" property in the query if it is present in preparedMetadata\n\t\tqry.routingInfo.mu.Lock()\n\t\tqry.routingInfo.lwt = info.request.lwt\n\t\tqry.routingInfo.keyspace = info.request.keyspace\n\t\tqry.routingInfo.table = info.request.table\n\t\tqry.routingInfo.mu.Unlock()\n\t} else {\n\t\tframe = &writeQueryFrame{\n\t\t\tstatement:     qry.stmt,\n\t\t\tparams:        params,\n\t\t\tcustomPayload: qry.customPayload,\n\t\t}\n\t}\n\n\tframer, err := c.exec(ctx, frame, qry.trace, qry.GetRequestTimeout())\n\tif err != nil {\n\t\treturn &Iter{err: err}\n\t}\n\twarningHandler := WarningHandler(nil)\n\tif c.session != nil {\n\t\twarningHandler = c.session.warningHandler\n\t}\n\n\tresp, err := framer.parseFrame()\n\tif err != nil {\n\t\treturn newErrorIterWithReleasedFramer(err, framer).bindWarningHandler(qry, warningHandler)\n\t}\n\n\tif len(framer.customPayload) > 0 {\n\t\tif hint, ok := framer.customPayload[\"tablets-routing-v1\"]; ok {\n\t\t\ttablet, err := unmarshalTabletHint(hint, c.version, qry.routingInfo.keyspace, qry.routingInfo.table)\n\t\t\tif err != nil {\n\t\t\t\treturn newErrorIterWithReleasedFramer(err, framer).bindWarningHandler(qry, warningHandler)\n\t\t\t}\n\t\t\tc.session.metadataDescriber.AddTablet(tablet)\n\t\t}\n\t}\n\n\tif len(framer.traceID) > 0 && qry.trace != nil {\n\t\tqry.trace.Trace(framer.traceID)\n\t}\n\n\tswitch x := resp.(type) {\n\tcase *resultVoidFrame:\n\t\treturn (&Iter{framer: framer}).bindWarningHandler(qry, warningHandler)\n\tcase *resultRowsFrame:\n\t\titer := (&Iter{\n\t\t\tmeta:    x.meta,\n\t\t\tframer:  framer,\n\t\t\tnumRows: x.numRows,\n\t\t}).bindWarningHandler(qry, warningHandler)\n\n\t\tif params.skipMeta {\n\t\t\tif info != nil {\n\t\t\t\titer.meta = info.response\n\t\t\t\t// pagingState is already independently allocated by readBytesCopy()\n\t\t\t\t// during frame parsing, no additional copy needed.\n\t\t\t\titer.meta.pagingState = x.meta.pagingState\n\t\t\t} else {\n\t\t\t\treturn newErrorIterWithReleasedFramer(errors.New(\"gocql: did not receive metadata but prepared info is nil\"), framer).bindWarningHandler(qry, warningHandler)\n\t\t\t}\n\t\t}\n\n\t\tif x.meta.morePages() && !qry.disableAutoPage {\n\t\t\tnewQry := new(Query)\n\t\t\t*newQry = *qry\n\t\t\tnewQry.pageState = x.meta.pagingState\n\t\t\tnewQry.metrics = &queryMetrics{m: make(map[UUID]*hostMetrics)}\n\n\t\t\titer.next = newNextIter(newQry, int((1-qry.prefetch)*float64(x.numRows)))\n\n\t\t\tif iter.next.pos < 1 {\n\t\t\t\titer.next.pos = 1\n\t\t\t}\n\t\t}\n\n\t\treturn iter\n\tcase *resultKeyspaceFrame:\n\t\treturn (&Iter{framer: framer}).bindWarningHandler(qry, warningHandler)\n\tcase *frm.SchemaChangeKeyspace, *frm.SchemaChangeTable, *frm.SchemaChangeFunction, *frm.SchemaChangeAggregate, *frm.SchemaChangeType:\n\t\titer := (&Iter{framer: framer}).bindWarningHandler(qry, warningHandler)\n\t\tif err := c.awaitSchemaAgreement(ctx); err != nil {\n\t\t\t// TODO: should have this behind a flag\n\t\t\tc.logger.Println(err)\n\t\t}\n\t\t// dont return an error from this, might be a good idea to give a warning\n\t\t// though. The impact of this returning an error would be that the cluster\n\t\t// is not consistent with regards to its schema.\n\t\treturn iter\n\tcase *RequestErrUnprepared:\n\t\tstmtCacheKey := c.session.stmtsLRU.keyFor(c.host.HostID(), c.currentKeyspace, qry.stmt)\n\t\tc.session.stmtsLRU.evictPreparedID(stmtCacheKey, x.StatementId)\n\t\tframer.Release()\n\t\treturn c.executeQuery(ctx, qry)\n\tcase error:\n\t\treturn newErrorIterWithReleasedFramer(x, framer).bindWarningHandler(qry, warningHandler)\n\tdefault:\n\t\treturn newErrorIterWithReleasedFramer(NewErrProtocol(\"Unknown type in response to execute query (%T): %s\", x, x), framer).bindWarningHandler(qry, warningHandler)\n\t}\n}\n\nfunc (c *Conn) Pick(qry *Query) *Conn {\n\tif c.Closed() {\n\t\treturn nil\n\t}\n\treturn c\n}\n\nfunc (c *Conn) Closed() bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.closed\n}\n\nfunc (c *Conn) Address() string {\n\treturn c.addr\n}\n\nfunc (c *Conn) AvailableStreams() int {\n\treturn c.streams.Available()\n}\n\nfunc useKeyspaceStmt(keyspace string) string {\n\treturn `USE \"` + strings.ReplaceAll(keyspace, `\"`, `\"\"`) + `\"`\n}\n\nfunc (c *Conn) UseKeyspace(keyspace string) error {\n\tq := &writeQueryFrame{statement: useKeyspaceStmt(keyspace)}\n\tq.params.consistency = c.session.cons\n\n\tframer, err := c.exec(c.ctx, q, nil, c.cfg.ConnectTimeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer framer.Release()\n\n\tresp, err := framer.parseFrame()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch x := resp.(type) {\n\tcase *resultKeyspaceFrame:\n\tcase error:\n\t\treturn x\n\tdefault:\n\t\treturn NewErrProtocol(\"unknown frame in response to USE: %v\", x)\n\t}\n\n\tc.currentKeyspace = keyspace\n\n\treturn nil\n}\n\nfunc (c *Conn) executeBatch(ctx context.Context, batch *Batch) (iter *Iter) {\n\tn := len(batch.Entries)\n\treq := &writeBatchFrame{\n\t\ttyp:                   batch.Type,\n\t\tstatements:            make([]batchStatment, n),\n\t\tconsistency:           batch.Cons,\n\t\tserialConsistency:     batch.serialCons,\n\t\tdefaultTimestamp:      batch.defaultTimestamp,\n\t\tdefaultTimestampValue: batch.defaultTimestampValue,\n\t\tcustomPayload:         batch.CustomPayload,\n\t}\n\n\tstmts := make(map[string]string, len(batch.Entries))\n\n\thasLwtEntries := false\n\n\tfor i := 0; i < n; i++ {\n\t\tentry := &batch.Entries[i]\n\t\tb := &req.statements[i]\n\n\t\tif len(entry.Args) > 0 || entry.binding != nil {\n\t\t\tinfo, err := c.prepareStatement(batch.Context(), entry.Stmt, batch.trace, batch.GetRequestTimeout())\n\t\t\tif err != nil {\n\t\t\t\treturn &Iter{err: err}\n\t\t\t}\n\n\t\t\tvar values []any\n\t\t\tif entry.binding == nil {\n\t\t\t\tvalues = entry.Args\n\t\t\t} else {\n\t\t\t\tvalues, err = entry.binding(&QueryInfo{\n\t\t\t\t\tId:          info.id,\n\t\t\t\t\tArgs:        info.request.columns,\n\t\t\t\t\tRval:        info.response.columns,\n\t\t\t\t\tPKeyColumns: info.request.pkeyColumns,\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn &Iter{err: err}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(values) != info.request.actualColCount {\n\t\t\t\treturn &Iter{err: fmt.Errorf(\"gocql: batch statement %d expected %d values send got %d\", i, info.request.actualColCount, len(values))}\n\t\t\t}\n\n\t\t\tb.preparedID = info.id\n\t\t\tstmts[string(info.id)] = entry.Stmt\n\n\t\t\tb.values = make([]queryValues, info.request.actualColCount)\n\n\t\t\tfor j := 0; j < info.request.actualColCount; j++ {\n\t\t\t\tv := &b.values[j]\n\t\t\t\tvalue := values[j]\n\t\t\t\ttyp := info.request.columns[j].TypeInfo\n\t\t\t\tif err := marshalQueryValue(typ, value, v); err != nil {\n\t\t\t\t\treturn &Iter{err: err}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !hasLwtEntries && info.request.lwt {\n\t\t\t\thasLwtEntries = true\n\t\t\t}\n\t\t} else {\n\t\t\tb.statement = entry.Stmt\n\t\t}\n\t}\n\n\t// The batch is considered to be conditional if even one of the\n\t// statements is conditional.\n\tbatch.routingInfo.mu.Lock()\n\tbatch.routingInfo.lwt = hasLwtEntries\n\tbatch.routingInfo.mu.Unlock()\n\n\t// TODO: should batch support tracing?\n\tframer, err := c.exec(batch.Context(), req, batch.trace, batch.GetRequestTimeout())\n\tif err != nil {\n\t\treturn &Iter{err: err}\n\t}\n\twarningHandler := WarningHandler(nil)\n\tif c.session != nil {\n\t\twarningHandler = c.session.warningHandler\n\t}\n\n\tresp, err := framer.parseFrame()\n\tif err != nil {\n\t\treturn newErrorIterWithReleasedFramer(err, framer).bindWarningHandler(batch, warningHandler)\n\t}\n\n\tif len(framer.traceID) > 0 && batch.trace != nil {\n\t\tbatch.trace.Trace(framer.traceID)\n\t}\n\n\tswitch x := resp.(type) {\n\tcase *resultVoidFrame:\n\t\treturn (&Iter{framer: framer}).bindWarningHandler(batch, warningHandler)\n\tcase *RequestErrUnprepared:\n\t\tstmt, found := stmts[string(x.StatementId)]\n\t\tif found {\n\t\t\tkey := c.session.stmtsLRU.keyFor(c.host.HostID(), c.currentKeyspace, stmt)\n\t\t\tc.session.stmtsLRU.evictPreparedID(key, x.StatementId)\n\t\t}\n\t\tframer.Release()\n\t\treturn c.executeBatch(ctx, batch)\n\tcase *resultRowsFrame:\n\t\titer := (&Iter{\n\t\t\tmeta:    x.meta,\n\t\t\tframer:  framer,\n\t\t\tnumRows: x.numRows,\n\t\t}).bindWarningHandler(batch, warningHandler)\n\n\t\treturn iter\n\tcase error:\n\t\treturn newErrorIterWithReleasedFramer(x, framer).bindWarningHandler(batch, warningHandler)\n\tdefault:\n\t\treturn newErrorIterWithReleasedFramer(NewErrProtocol(\"Unknown type in response to batch statement: %s\", x), framer).bindWarningHandler(batch, warningHandler)\n\t}\n}\n\nfunc (c *Conn) querySystem(ctx context.Context, query string, values ...any) *Iter {\n\tq := c.session.Query(query+c.usingTimeoutClause, values...).Consistency(One).Trace(nil)\n\tq.skipPrepare = true\n\tq.disableSkipMetadata = true\n\t// we want to keep the query on this connection\n\tq.conn = c\n\tq.SetRequestTimeout(c.systemRequestTimeout)\n\treturn c.executeQuery(ctx, q)\n}\n\nconst qrySystemPeers = \"SELECT * FROM system.peers\"\nconst qrySystemPeersV2 = \"SELECT * FROM system.peers_v2\"\n\nconst qrySystemLocal = \"SELECT * FROM system.local WHERE key='local'\"\n\nfunc getSchemaAgreement(queryLocalSchemasRows []string, querySystemPeersRows []schemaAgreementHost, logger StdLogger) (err error) {\n\tversions := make(map[string]struct{})\n\n\tfor _, row := range querySystemPeersRows {\n\t\tif !row.IsValid() {\n\t\t\tlogger.Printf(\"invalid peer or peer with empty schema_version: peer=%q\", row)\n\t\t\tcontinue\n\t\t}\n\t\tversions[row.SchemaVersion.String()] = struct{}{}\n\t}\n\n\tfor _, schemaVersion := range queryLocalSchemasRows {\n\t\tversions[schemaVersion] = struct{}{}\n\t\tschemaVersion = \"\"\n\t}\n\n\tif len(versions) > 1 {\n\t\tschemas := make([]string, 0, len(versions))\n\t\tfor schema := range versions {\n\t\t\tschemas = append(schemas, schema)\n\t\t}\n\n\t\treturn &ErrSchemaMismatch{schemas: schemas}\n\t}\n\n\treturn nil\n}\n\ntype schemaAgreementHost struct {\n\tDataCenter    string\n\tRack          string\n\tRPCAddress    string\n\tHostID        UUID\n\tSchemaVersion UUID\n}\n\nfunc (h *schemaAgreementHost) IsValid() bool {\n\treturn h.DataCenter != \"\" && h.Rack != \"\" && h.HostID.String() != \"\" && h.SchemaVersion.String() != \"\"\n}\n\nfunc (c *Conn) awaitSchemaAgreement(ctx context.Context) error {\n\tendDeadline := time.Now().Add(c.session.cfg.MaxWaitSchemaAgreement)\n\n\tvar err error\n\tticker := time.NewTicker(200 * time.Millisecond) // Create a ticker that ticks every 200ms\n\tdefer ticker.Stop()\n\n\twaitForNextTick := func() error {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-ticker.C:\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tfor time.Now().Before(endDeadline) {\n\t\tvar iter *Iter\n\t\tif c.getIsSchemaV2() {\n\t\t\titer = c.querySystem(ctx, \"SELECT host_id, data_center, rack, schema_version, preferred_ip FROM system.peers_v2\")\n\t\t} else {\n\t\t\titer = c.querySystem(ctx, \"SELECT host_id, data_center, rack, schema_version, rpc_address FROM system.peers\")\n\t\t}\n\t\t// data_center, rack, host_id, schema_version, rpc_address\n\t\tvar hosts []schemaAgreementHost\n\t\tvar tmp schemaAgreementHost\n\t\tfor iter.Scan(&tmp.HostID, &tmp.DataCenter, &tmp.Rack, &tmp.SchemaVersion, &tmp.RPCAddress) {\n\t\t\thosts = append(hosts, tmp)\n\t\t}\n\t\terr = iter.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tschemaVersions := []string{}\n\n\t\titer = c.querySystem(ctx, \"SELECT schema_version FROM system.local WHERE key='local'\")\n\n\t\tvar schemaVersion string\n\t\tfor iter.Scan(&schemaVersion) {\n\t\t\tschemaVersions = append(schemaVersions, schemaVersion)\n\t\t\tschemaVersion = \"\"\n\t\t}\n\n\t\tif err = iter.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = getSchemaAgreement(schemaVersions, hosts, c.logger)\n\n\t\tif err == ErrConnectionClosed || err == nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tickerErr := waitForNextTick(); tickerErr != nil {\n\t\t\treturn tickerErr\n\t\t}\n\t}\n\n\treturn err\n}\n\nvar (\n\tErrQueryArgLength    = errors.New(\"gocql: query argument length mismatch\")\n\tErrTimeoutNoResponse = errors.New(\"gocql: no response received from cassandra within timeout period\")\n\t// Deprecated: ErrTooManyTimeouts is no longer produced by the library.\n\t// It will be removed in a future major release.\n\tErrTooManyTimeouts     = errors.New(\"gocql: too many query timeouts on the connection\")\n\tErrConnectionClosed    = errors.New(\"gocql: connection closed waiting for response\")\n\tErrNoStreams           = errors.New(\"gocql: no streams available on connection\")\n\tErrHostDown            = errors.New(\"gocql: host is nil or down\")\n\tErrNoPool              = errors.New(\"gocql: host does not have a pool\")\n\tErrNoConnectionsInPool = errors.New(\"gocql: host pool does not have connections\")\n)\n\ntype ErrSchemaMismatch struct {\n\tschemas []string\n}\n\nfunc (e *ErrSchemaMismatch) Error() string {\n\treturn fmt.Sprintf(\"gocql: cluster schema versions not consistent: %+v\", e.schemas)\n}\n\ntype QueryError struct {\n\terr                 error\n\ttimeout             time.Duration\n\tinFlight            int\n\tpotentiallyExecuted bool\n\tisIdempotent        bool\n}\n\nfunc (e *QueryError) IsIdempotent() bool {\n\treturn e.isIdempotent\n}\n\nfunc (e *QueryError) PotentiallyExecuted() bool {\n\treturn e.potentiallyExecuted\n}\n\nfunc (e *QueryError) Error() string {\n\tif e.timeout > 0 {\n\t\treturn fmt.Sprintf(\"%s (timeout: %v, in-flight: %d) (potentially executed: %v)\", e.err.Error(), e.timeout, e.inFlight, e.potentiallyExecuted)\n\t}\n\treturn fmt.Sprintf(\"%s (potentially executed: %v)\", e.err.Error(), e.potentiallyExecuted)\n}\n\nfunc (e *QueryError) Unwrap() error {\n\treturn e.err\n}\n\nfunc unmarshalTabletHint(hint []byte, v uint8, keyspace, table string) (tablets.TabletInfo, error) {\n\ttabletBuilder := tablets.NewTabletInfoBuilder()\n\terr := Unmarshal(TupleTypeInfo{\n\t\tNativeType: NativeType{proto: v, typ: TypeTuple},\n\t\tElems: []TypeInfo{\n\t\t\tNativeType{typ: TypeBigInt},\n\t\t\tNativeType{typ: TypeBigInt},\n\t\t\tCollectionType{\n\t\t\t\tNativeType: NativeType{proto: v, typ: TypeList},\n\t\t\t\tElem: TupleTypeInfo{\n\t\t\t\t\tNativeType: NativeType{proto: v, typ: TypeTuple},\n\t\t\t\t\tElems: []TypeInfo{\n\t\t\t\t\t\tNativeType{proto: v, typ: TypeUUID},\n\t\t\t\t\t\tNativeType{proto: v, typ: TypeInt},\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}, hint, []any{&tabletBuilder.FirstToken, &tabletBuilder.LastToken, &tabletBuilder.Replicas})\n\tif err != nil {\n\t\treturn tablets.TabletInfo{}, err\n\t}\n\ttabletBuilder.KeyspaceName = keyspace\n\ttabletBuilder.TableName = table\n\treturn tabletBuilder.Build()\n}\n"
  },
  {
    "path": "conn_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/rand\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/stretchr/testify/assert\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n\n\t\"github.com/gocql/gocql/internal/streams\"\n)\n\nconst (\n\tdefaultProto = protoVersion3\n)\n\ntype brokenDNSResolver struct{}\n\nfunc (b brokenDNSResolver) LookupIP(host string) ([]net.IP, error) {\n\terr := errors.New(\"this error comes from mocked broken resolver\")\n\treturn nil, &net.DNSError{\n\t\tUnwrapErr: err,\n\t\tErr:       err.Error(),\n\t\tServer:    host,\n\t}\n}\n\nfunc TestApprove(t *testing.T) {\n\ttests := map[bool]bool{\n\t\tapprove(\"org.apache.cassandra.auth.PasswordAuthenticator\", []string{}):                                             true,\n\t\tapprove(\"org.apache.cassandra.auth.MutualTlsWithPasswordFallbackAuthenticator\", []string{}):                        true,\n\t\tapprove(\"org.apache.cassandra.auth.MutualTlsAuthenticator\", []string{}):                                            true,\n\t\tapprove(\"com.instaclustr.cassandra.auth.SharedSecretAuthenticator\", []string{}):                                    true,\n\t\tapprove(\"com.datastax.bdp.cassandra.auth.DseAuthenticator\", []string{}):                                            true,\n\t\tapprove(\"io.aiven.cassandra.auth.AivenAuthenticator\", []string{}):                                                  true,\n\t\tapprove(\"com.amazon.helenus.auth.HelenusAuthenticator\", []string{}):                                                true,\n\t\tapprove(\"com.ericsson.bss.cassandra.ecaudit.auth.AuditAuthenticator\", []string{}):                                  true,\n\t\tapprove(\"com.scylladb.auth.SaslauthdAuthenticator\", []string{}):                                                    true,\n\t\tapprove(\"com.scylladb.auth.TransitionalAuthenticator\", []string{}):                                                 true,\n\t\tapprove(\"com.instaclustr.cassandra.auth.InstaclustrPasswordAuthenticator\", []string{}):                             true,\n\t\tapprove(\"com.apache.cassandra.auth.FakeAuthenticator\", []string{}):                                                 true,\n\t\tapprove(\"com.apache.cassandra.auth.FakeAuthenticator\", nil):                                                        true,\n\t\tapprove(\"com.apache.cassandra.auth.FakeAuthenticator\", []string{\"com.apache.cassandra.auth.FakeAuthenticator\"}):    true,\n\t\tapprove(\"com.apache.cassandra.auth.FakeAuthenticator\", []string{\"com.apache.cassandra.auth.NotFakeAuthenticator\"}): false,\n\t}\n\tfor k, v := range tests {\n\t\tif k != v {\n\t\t\tt.Fatalf(\"expected '%v', got '%v'\", k, v)\n\t\t}\n\t}\n}\n\nfunc testCluster(proto frm.ProtoVersion, addresses ...string) *ClusterConfig {\n\tcluster := NewCluster(addresses...)\n\tcluster.ProtoVersion = int(proto)\n\tcluster.disableControlConn = true\n\tcluster.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy()\n\treturn cluster\n}\n\nfunc TestSimple(t *testing.T) {\n\tsrv := NewTestServer(t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\tcluster := testCluster(defaultProto, srv.Address)\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"0x%x: NewCluster: %v\", defaultProto, err)\n\t}\n\n\tif err := db.Query(\"void\").Exec(); err != nil {\n\t\tt.Fatalf(\"0x%x: %v\", defaultProto, err)\n\t}\n}\n\nfunc TestSSLSimple(t *testing.T) {\n\tsrv := NewSSLTestServer(t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\tdb, err := createTestSslCluster(srv.Address, defaultProto, true).CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"0x%x: NewCluster: %v\", defaultProto, err)\n\t}\n\n\tif err := db.Query(\"void\").Exec(); err != nil {\n\t\tt.Fatalf(\"0x%x: %v\", defaultProto, err)\n\t}\n}\n\nfunc TestSSLSimpleNoClientCert(t *testing.T) {\n\tsrv := NewSSLTestServer(t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\tdb, err := createTestSslCluster(srv.Address, defaultProto, false).CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"0x%x: NewCluster: %v\", defaultProto, err)\n\t}\n\n\tif err := db.Query(\"void\").Exec(); err != nil {\n\t\tt.Fatalf(\"0x%x: %v\", defaultProto, err)\n\t}\n}\n\nfunc createTestSslCluster(addr string, proto frm.ProtoVersion, useClientCert bool) *ClusterConfig {\n\tcluster := testCluster(proto, addr)\n\tsslOpts := &SslOptions{\n\t\tCaPath:                 \"testdata/pki/ca.crt\",\n\t\tEnableHostVerification: false,\n\t}\n\n\tif useClientCert {\n\t\tsslOpts.CertPath = \"testdata/pki/gocql.crt\"\n\t\tsslOpts.KeyPath = \"testdata/pki/gocql.key\"\n\t}\n\n\tcluster.SslOpts = sslOpts\n\treturn cluster\n}\n\nfunc TestClosed(t *testing.T) {\n\tt.Skip(\"Skipping the execution of TestClosed for now to try to concentrate on more important test failures on Travis\")\n\n\tsrv := NewTestServer(t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\tsession, err := newTestSession(defaultProto, srv.Address)\n\tif err != nil {\n\t\tt.Fatalf(\"0x%x: NewCluster: %v\", defaultProto, err)\n\t}\n\n\tsession.Close()\n\n\tif err := session.Query(\"void\").Exec(); err != ErrSessionClosed {\n\t\tt.Fatalf(\"0x%x: expected %#v, got %#v\", defaultProto, ErrSessionClosed, err)\n\t}\n}\n\nfunc newTestSession(proto frm.ProtoVersion, addresses ...string) (*Session, error) {\n\treturn testCluster(proto, addresses...).CreateSession()\n}\n\nvar _ DNSResolver = brokenDNSResolver{}\n\nfunc TestDNSLookupConnected(t *testing.T) {\n\tlog := &testLogger{}\n\n\t// Override the default DNS resolver and restore at the end\n\n\tsrv := NewTestServer(t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\tcluster := NewCluster(\"cassandra1.invalid\", srv.Address, \"cassandra2.invalid\")\n\tcluster.Logger = log\n\tcluster.ProtoVersion = int(defaultProto)\n\tcluster.disableControlConn = true\n\tcluster.DNSResolver = brokenDNSResolver{}\n\n\t// CreateSession() should attempt to resolve the DNS name \"cassandraX.invalid\"\n\t// and fail, but continue to connect via srv.Address\n\t_, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatal(\"CreateSession() should have connected\")\n\t}\n\n\tif !strings.Contains(log.String(), \"failed to resolve endpoint\") {\n\t\tt.Fatalf(\"Expected to receive 'failed to resolve endpoint' log message  - got '%s' instead\", log.String())\n\t}\n}\n\nfunc TestDNSLookupError(t *testing.T) {\n\tlog := &testLogger{}\n\n\t// Override the default DNS resolver and restore at the end\n\thosts := []string{\"cassandra1.invalid\", \"cassandra2.invalid\"}\n\n\tcluster := NewCluster(hosts...)\n\tcluster.Logger = log\n\tcluster.ProtoVersion = int(defaultProto)\n\tcluster.disableControlConn = true\n\tcluster.DNSResolver = brokenDNSResolver{}\n\n\t// CreateSession() should attempt to resolve each DNS name \"cassandraX.invalid\"\n\t// and fail since it could not resolve any dns entries\n\t_, err := cluster.CreateSession()\n\tif err == nil {\n\t\tt.Fatal(\"CreateSession() should have returned an error\")\n\t}\n\n\tif !strings.Contains(log.String(), \"failed to resolve endpoint\") {\n\t\tt.Fatalf(\"Expected to receive 'failed to resolve endpoint' log message  - got '%s' instead\", log.String())\n\t}\n\n\tif !strings.Contains(err.Error(), \"unable to create session: failed to resolve any of the provided hostnames\") {\n\t\tt.Fatalf(\"Expected CreateSession() to fail with error message that contains 'unable to create session: failed to resolve any of the provided hostnames'\")\n\t}\n\n\tfor _, host := range hosts {\n\t\texpected := fmt.Sprintf(\"failed to resolve endpoint \\\"%s\\\": lookup  on %s: this error comes from mocked broken resolver\", host, host)\n\t\tif !strings.Contains(err.Error(), expected) {\n\t\t\tt.Fatalf(\"Expected to fail with error message that contains '%s'\", expected)\n\t\t}\n\t}\n}\n\nfunc TestStartupTimeout(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tlog := &testLogger{}\n\n\tsrv := NewTestServer(t, defaultProto, ctx)\n\tdefer srv.Stop()\n\n\t// Tell the server to never respond to Startup frame\n\tatomic.StoreInt32(&srv.TimeoutOnStartup, 1)\n\n\tstartTime := time.Now()\n\tcluster := NewCluster(srv.Address)\n\tcluster.Logger = log\n\tcluster.ProtoVersion = int(defaultProto)\n\tcluster.disableControlConn = true\n\t// Set very long query connection timeout\n\t// so we know CreateSession() is using the ConnectTimeout\n\tcluster.Timeout = time.Second * 5\n\tcluster.ConnectTimeout = 600 * time.Millisecond\n\n\t// Create session should timeout during connect attempt\n\t_, err := cluster.CreateSession()\n\tif err == nil {\n\t\tt.Fatal(\"CreateSession() should have returned a timeout error\")\n\t}\n\n\telapsed := time.Since(startTime)\n\tif elapsed > time.Second*5 {\n\t\tt.Fatal(\"ConnectTimeout is not respected\")\n\t}\n\n\tif !errors.Is(err, ErrNoConnectionsStarted) {\n\t\tt.Fatalf(\"Expected to receive no connections error - got '%s'\", err)\n\t}\n\n\tif !strings.Contains(log.String(), \"no response to connection startup within timeout\") && !strings.Contains(log.String(), \"no response received from cassandra within timeout period\") {\n\t\tt.Fatalf(\"Expected to receive timeout log message  - got '%s'\", log.String())\n\t}\n\n\tcancel()\n}\n\nfunc TestTimeout(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tsrv := NewTestServer(t, defaultProto, ctx)\n\tdefer srv.Stop()\n\n\tdb, err := newTestSession(defaultProto, srv.Address)\n\tif err != nil {\n\t\tt.Fatalf(\"NewCluster: %v\", err)\n\t}\n\tdefer db.Close()\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tselect {\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tt.Errorf(\"no timeout\")\n\t\tcase <-ctx.Done():\n\t\t}\n\t}()\n\n\tif err := db.Query(\"kill\").WithContext(ctx).Exec(); err == nil {\n\t\tt.Fatal(\"expected error got nil\")\n\t}\n\tcancel()\n\n\twg.Wait()\n}\n\nfunc TestCancel(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tsrv := NewTestServer(t, defaultProto, ctx)\n\tdefer srv.Stop()\n\n\tcluster := testCluster(defaultProto, srv.Address)\n\tcluster.Timeout = 1 * time.Second\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"NewCluster: %v\", err)\n\t}\n\tdefer db.Close()\n\n\tqry := db.Query(\"timeout\").WithContext(ctx)\n\n\t// Make sure we finish the query without leftovers\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\n\tgo func() {\n\t\terr = qry.Exec()\n\t\twg.Done()\n\t}()\n\t// The query will timeout after about 1 seconds, so cancel it after a short pause\n\ttime.AfterFunc(20*time.Millisecond, cancel)\n\twg.Wait()\n\n\tif !errors.Is(err, context.Canceled) {\n\t\tt.Fatalf(\"expected to get context cancel error: '%v', got '%v'\", context.Canceled, err)\n\t}\n}\n\ntype testQueryObserver struct {\n\tmetrics map[string]*hostMetrics\n\tverbose bool\n\tlogger  StdLogger\n}\n\nfunc (o *testQueryObserver) ObserveQuery(ctx context.Context, q ObservedQuery) {\n\thost := q.Host.ConnectAddress().String()\n\to.metrics[host] = q.Metrics\n\tif o.verbose {\n\t\to.logger.Printf(\"Observed query %q. Returned %v rows, took %v on host %q with %v attempts and total latency %v. Error: %q\\n\",\n\t\t\tq.Statement, q.Rows, q.End.Sub(q.Start), host, q.Metrics.Attempts, q.Metrics.TotalLatency, q.Err)\n\t}\n}\n\nfunc (o *testQueryObserver) GetMetrics(host *HostInfo) *hostMetrics {\n\treturn o.metrics[host.ConnectAddress().String()]\n}\n\n// TestQueryRetry will test to make sure that gocql will execute\n// the exact amount of retry queries designated by the user.\nfunc TestQueryRetry(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tsrv := NewTestServer(t, defaultProto, ctx)\n\tdefer srv.Stop()\n\n\tdb, err := newTestSession(defaultProto, srv.Address)\n\tif err != nil {\n\t\tt.Fatalf(\"NewCluster: %v\", err)\n\t}\n\tdefer db.Close()\n\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tt.Errorf(\"no timeout\")\n\t\t}\n\t}()\n\n\trt := &SimpleRetryPolicy{NumRetries: 1}\n\n\tqry := db.Query(\"kill\").RetryPolicy(rt)\n\tif err := qry.Exec(); err == nil {\n\t\tt.Fatalf(\"expected error\")\n\t}\n\n\trequests := atomic.LoadInt64(&srv.nKillReq)\n\tattempts := qry.Attempts()\n\tif requests != int64(attempts) {\n\t\tt.Fatalf(\"expected requests %v to match query attempts %v\", requests, attempts)\n\t}\n\n\t// the query will only be attempted once, but is being retried\n\tif requests != int64(rt.NumRetries) {\n\t\tt.Fatalf(\"failed to retry the query %v time(s). Query executed %v times\", rt.NumRetries, requests-1)\n\t}\n}\n\nfunc TestQueryMultinodeWithMetrics(t *testing.T) {\n\tlog := &testLogger{}\n\tdefer func() {\n\t\tos.Stdout.WriteString(log.String())\n\t}()\n\n\t// Build a 3 node cluster to test host metric mapping\n\tvar nodes []*TestServer\n\tvar addresses = []string{\n\t\t\"127.0.0.1\",\n\t\t\"127.0.0.2\",\n\t\t\"127.0.0.3\",\n\t}\n\t// Can do with 1 context for all servers\n\tctx := context.Background()\n\tfor _, ip := range addresses {\n\t\tsrv := NewTestServerWithAddress(ip+\":0\", t, defaultProto, ctx)\n\t\tdefer srv.Stop()\n\t\tnodes = append(nodes, srv)\n\t}\n\n\tdb, err := newTestSession(defaultProto, nodes[0].Address, nodes[1].Address, nodes[2].Address)\n\tif err != nil {\n\t\tt.Fatalf(\"NewCluster: %v\", err)\n\t}\n\tdefer db.Close()\n\n\t// 1 retry per host\n\trt := &SimpleRetryPolicy{NumRetries: 3}\n\tobserver := &testQueryObserver{metrics: make(map[string]*hostMetrics), verbose: false, logger: log}\n\tqry := db.Query(\"kill\").RetryPolicy(rt).Observer(observer).Idempotent(true)\n\tif err := qry.Exec(); err == nil {\n\t\tt.Fatalf(\"expected error\")\n\t}\n\n\tfor i, ip := range addresses {\n\t\tvar host *HostInfo\n\t\tfor _, clusterHost := range db.GetHosts() {\n\t\t\tif clusterHost.connectAddress.String() == ip {\n\t\t\t\thost = clusterHost\n\t\t\t}\n\t\t}\n\n\t\tif host == nil {\n\t\t\tt.Fatalf(\"failed to observe host info for address %v\", ip)\n\t\t}\n\n\t\tqueryMetric := qry.metrics.hostMetrics(host)\n\t\tobservedMetrics := observer.GetMetrics(host)\n\n\t\trequests := int(atomic.LoadInt64(&nodes[i].nKillReq))\n\t\thostAttempts := queryMetric.Attempts\n\t\tif requests != hostAttempts {\n\t\t\tt.Fatalf(\"expected requests %v to match query attempts %v\", requests, hostAttempts)\n\t\t}\n\n\t\tif hostAttempts != observedMetrics.Attempts {\n\t\t\tt.Fatalf(\"expected observed attempts %v to match query attempts %v on host %v\", observedMetrics.Attempts, hostAttempts, ip)\n\t\t}\n\n\t\thostLatency := queryMetric.TotalLatency\n\t\tobservedLatency := observedMetrics.TotalLatency\n\t\tif hostLatency != observedLatency {\n\t\t\tt.Fatalf(\"expected observed latency %v to match query latency %v on host %v\", observedLatency, hostLatency, ip)\n\t\t}\n\t}\n\t// the query will only be attempted once, but is being retried\n\tattempts := qry.Attempts()\n\tif attempts != rt.NumRetries {\n\t\tt.Fatalf(\"failed to retry the query %v time(s). Query executed %v times\", rt.NumRetries, attempts)\n\t}\n\n}\n\ntype testRetryPolicy struct {\n\tNumRetries int\n}\n\nfunc (t *testRetryPolicy) Attempt(qry RetryableQuery) bool {\n\treturn qry.Attempts() <= t.NumRetries\n}\nfunc (t *testRetryPolicy) GetRetryType(err error) RetryType {\n\tvar executedErr *QueryError\n\tif errors.As(err, &executedErr) && executedErr.PotentiallyExecuted() && !executedErr.IsIdempotent() {\n\t\treturn Rethrow\n\t}\n\treturn Retry\n}\n\nfunc TestSpeculativeExecution(t *testing.T) {\n\tlog := &testLogger{}\n\tdefer func() {\n\t\tos.Stdout.WriteString(log.String())\n\t}()\n\n\t// Build a 3 node cluster\n\tvar nodes []*TestServer\n\tvar addresses = []string{\n\t\t\"127.0.0.1\",\n\t\t\"127.0.0.2\",\n\t\t\"127.0.0.3\",\n\t}\n\t// Can do with 1 context for all servers\n\tctx := context.Background()\n\tfor _, ip := range addresses {\n\t\tsrv := NewTestServerWithAddress(ip+\":0\", t, defaultProto, ctx)\n\t\tdefer srv.Stop()\n\t\tnodes = append(nodes, srv)\n\t}\n\n\tdb, err := newTestSession(defaultProto, nodes[0].Address, nodes[1].Address, nodes[2].Address)\n\tif err != nil {\n\t\tt.Fatalf(\"NewCluster: %v\", err)\n\t}\n\tdefer db.Close()\n\n\t// Create a test retry policy, 6 retries will cover 2 executions\n\trt := &testRetryPolicy{NumRetries: 8}\n\t// test Speculative policy with 1 additional execution\n\tsp := &SimpleSpeculativeExecution{NumAttempts: 1, TimeoutDelay: 200 * time.Millisecond}\n\n\t// Build the query\n\tqry := db.Query(\"speculative\").RetryPolicy(rt).SetSpeculativeExecutionPolicy(sp).Idempotent(true)\n\n\t// Execute the query and close, check that it doesn't error out\n\tif err := qry.Exec(); err != nil {\n\t\tt.Errorf(\"The query failed with '%v'!\\n\", err)\n\t}\n\trequests1 := atomic.LoadInt64(&nodes[0].nKillReq)\n\trequests2 := atomic.LoadInt64(&nodes[1].nKillReq)\n\trequests3 := atomic.LoadInt64(&nodes[2].nKillReq)\n\n\t// Spec Attempts == 1, so expecting to see only 1 regular + 1 speculative = 2 nodes attempted\n\tif requests1 != 0 && requests2 != 0 && requests3 != 0 {\n\t\tt.Error(\"error: all 3 nodes were attempted, should have been only 2\")\n\t}\n\n\t// Only the 4th request will generate results, so\n\tif requests1 != 4 && requests2 != 4 && requests3 != 4 {\n\t\tt.Error(\"error: none of 3 nodes was attempted 4 times!\")\n\t}\n\n\t// \"speculative\" query will succeed on one arbitrary node after 4 attempts, so\n\t// expecting to see 4 (on successful node) + not more than 2 (as cancelled on another node) == 6\n\tif requests1+requests2+requests3 > 6 {\n\t\tt.Errorf(\"error: expected to see 6 attempts, got %v\\n\", requests1+requests2+requests3)\n\t}\n}\n\n// This tests that the policy connection pool handles SSL correctly\nfunc TestPolicyConnPoolSSL(t *testing.T) {\n\tsrv := NewSSLTestServer(t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\tcluster := createTestSslCluster(srv.Address, defaultProto, true)\n\tcluster.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy()\n\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create new session: %v\", err)\n\t}\n\n\tif err := db.Query(\"void\").Exec(); err != nil {\n\t\tt.Fatalf(\"query failed due to error: %v\", err)\n\t}\n\tdb.Close()\n\n\t// wait for the pool to drain\n\ttime.Sleep(100 * time.Millisecond)\n\tsize := db.pool.Size()\n\tif size != 0 {\n\t\tt.Fatalf(\"connection pool did not drain, still contains %d connections\", size)\n\t}\n}\n\nfunc TestQueryTimeout(t *testing.T) {\n\tsrv := NewTestServer(t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\tcluster := testCluster(defaultProto, srv.Address)\n\t// Set the timeout arbitrarily low so that the query hits the timeout in a\n\t// timely manner.\n\tcluster.Timeout = 1 * time.Millisecond\n\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"NewCluster: %v\", err)\n\t}\n\tdefer db.Close()\n\n\tch := make(chan error, 1)\n\n\tgo func() {\n\t\terr := db.Query(\"timeout\").Exec()\n\t\tif err != nil {\n\t\t\tch <- err\n\t\t\treturn\n\t\t}\n\t\tt.Errorf(\"err was nil, expected to get a timeout after %v\", db.cfg.Timeout)\n\t}()\n\n\tselect {\n\tcase err := <-ch:\n\t\tif !errors.Is(err, ErrTimeoutNoResponse) {\n\t\t\tt.Fatalf(\"expected to get %v for timeout got %v\", ErrTimeoutNoResponse, err)\n\t\t}\n\tcase <-time.After(40*time.Millisecond + db.cfg.Timeout):\n\t\t// ensure that the query goroutines have been scheduled\n\t\tt.Fatalf(\"query did not timeout after %v\", db.cfg.Timeout)\n\t}\n}\n\nfunc BenchmarkSingleConn(b *testing.B) {\n\tsrv := NewTestServer(b, 3, context.Background())\n\tdefer srv.Stop()\n\n\tcluster := testCluster(protoVersion3, srv.Address)\n\t// Set the timeout arbitrarily low so that the query hits the timeout in a\n\t// timely manner.\n\tcluster.Timeout = 500 * time.Millisecond\n\tcluster.NumConns = 1\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tb.Fatalf(\"NewCluster: %v\", err)\n\t}\n\tdefer db.Close()\n\n\tb.ResetTimer()\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\terr := db.Query(\"void\").Exec()\n\t\t\tif err != nil {\n\t\t\t\tb.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc TestQueryTimeoutReuseStream(t *testing.T) {\n\tt.Skip(\"no longer tests anything\")\n\t// TODO(zariel): move this to conn test, we really just want to check what\n\t// happens when a conn is\n\n\tsrv := NewTestServer(t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\tcluster := testCluster(defaultProto, srv.Address)\n\t// Set the timeout arbitrarily low so that the query hits the timeout in a\n\t// timely manner.\n\tcluster.Timeout = 1 * time.Millisecond\n\tcluster.NumConns = 1\n\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"NewCluster: %v\", err)\n\t}\n\tdefer db.Close()\n\n\tdb.Query(\"slow\").Exec()\n\n\terr = db.Query(\"void\").Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestQueryTimeoutClose(t *testing.T) {\n\tsrv := NewTestServer(t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\tcluster := testCluster(defaultProto, srv.Address)\n\t// Set the timeout arbitrarily low so that the query hits the timeout in a\n\t// timely manner.\n\tcluster.Timeout = 1000 * time.Millisecond\n\tcluster.NumConns = 1\n\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"NewCluster: %v\", err)\n\t}\n\n\tch := make(chan error)\n\tgo func() {\n\t\terr := db.Query(\"timeout\").Exec()\n\t\tch <- err\n\t}()\n\t// ensure that the above goroutine gets sheduled\n\ttime.Sleep(50 * time.Millisecond)\n\n\tdb.Close()\n\tselect {\n\tcase err = <-ch:\n\tcase <-time.After(1 * time.Second):\n\t\tt.Fatal(\"timedout waiting to get a response once cluster is closed\")\n\t}\n\n\tif !errors.Is(err, ErrConnectionClosed) {\n\t\tt.Fatalf(\"expected to get %v or an error wrapping it, got %v\", ErrConnectionClosed, err)\n\t}\n}\n\nfunc TestStream0(t *testing.T) {\n\t// TODO: replace this with type check\n\tconst expErr = \"gocql: received unexpected frame on stream 0\"\n\n\tvar buf bytes.Buffer\n\tf := newFramer(nil, protoVersion4)\n\tf.writeHeader(0, frm.OpResult, 0)\n\tf.writeInt(frm.ResultKindVoid)\n\tf.buf[0] |= 0x80\n\tif err := f.finish(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := f.writeTo(&buf); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tconn := &Conn{\n\t\tr:       bufio.NewReader(&buf),\n\t\tstreams: streams.New(),\n\t\tlogger:  &defaultLogger{},\n\t\tcfg:     &ConnConfig{},\n\t}\n\n\terr := conn.recv(context.Background())\n\tif err == nil {\n\t\tt.Fatal(\"expected to get an error on stream 0\")\n\t} else if !strings.HasPrefix(err.Error(), expErr) {\n\t\tt.Fatalf(\"expected to get error prefix %q got %q\", expErr, err.Error())\n\t}\n}\n\nfunc TestContext_Timeout(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tsrv := NewTestServer(t, defaultProto, ctx)\n\tdefer srv.Stop()\n\n\tcluster := testCluster(defaultProto, srv.Address)\n\tcluster.Timeout = 5 * time.Second\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\tctx, cancel = context.WithCancel(ctx)\n\tcancel()\n\n\terr = db.Query(\"timeout\").WithContext(ctx).Exec()\n\tif !errors.Is(err, context.Canceled) {\n\t\tt.Fatalf(\"expected to get context cancel error: %v got %v\", context.Canceled, err)\n\t}\n}\n\ntype TestReconnectionPolicy struct {\n\tNumRetries       int\n\tGetIntervalCalls []int\n}\n\nfunc (c *TestReconnectionPolicy) GetInterval(currentRetry int) time.Duration {\n\tc.GetIntervalCalls = append(c.GetIntervalCalls, currentRetry)\n\treturn time.Duration(0)\n}\n\nfunc (c *TestReconnectionPolicy) GetMaxRetries() int {\n\treturn c.NumRetries\n}\n\nfunc TestInitialRetryPolicy(t *testing.T) {\n\tt.Parallel()\n\n\ttcase := []struct {\n\t\tNumRetries               int\n\t\tProtoVersion             int\n\t\tExpectedGetIntervalCalls []int\n\t\tExpectedErr              string\n\t}{\n\t\t{\n\t\t\tNumRetries:               1,\n\t\t\tProtoVersion:             0,\n\t\t\tExpectedGetIntervalCalls: nil,\n\t\t\tExpectedErr:              \"gocql: unable to create session: unable to connect to the cluster, last error: unable to discover protocol version:\"},\n\t\t{\n\t\t\tNumRetries:               2,\n\t\t\tProtoVersion:             0,\n\t\t\tExpectedGetIntervalCalls: []int{1},\n\t\t\tExpectedErr:              \"gocql: unable to create session: unable to connect to the cluster, last error: unable to discover protocol version:\"},\n\t\t{\n\t\t\tNumRetries:               3,\n\t\t\tProtoVersion:             0,\n\t\t\tExpectedGetIntervalCalls: []int{1, 2},\n\t\t\tExpectedErr:              \"gocql: unable to create session: unable to connect to the cluster, last error: unable to discover protocol version:\"},\n\t\t{\n\t\t\tNumRetries:               1,\n\t\t\tProtoVersion:             protoVersion4,\n\t\t\tExpectedGetIntervalCalls: nil,\n\t\t\tExpectedErr:              \"gocql: unable to create session: unable to connect to the cluster, last error: unable to create control connection: unable to connect to initial hosts:\"},\n\t\t{\n\t\t\tNumRetries:               2,\n\t\t\tProtoVersion:             protoVersion4,\n\t\t\tExpectedGetIntervalCalls: []int{1},\n\t\t\tExpectedErr:              \"gocql: unable to create session: unable to connect to the cluster, last error: unable to create control connection: unable to connect to initial hosts:\"},\n\t\t{\n\t\t\tNumRetries:               3,\n\t\t\tProtoVersion:             protoVersion4,\n\t\t\tExpectedGetIntervalCalls: []int{1, 2},\n\t\t\tExpectedErr:              \"gocql: unable to create session: unable to connect to the cluster, last error: unable to create control connection: unable to connect to initial hosts:\"},\n\t}\n\n\tfor id := range tcase {\n\t\ttc := tcase[id]\n\t\tt.Run(fmt.Sprintf(\"NumRetries=%d_ProtocolVersion=%d\", tc.NumRetries, tc.ProtoVersion), func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\t// Use a loopback address with a well-known closed port so the test\n\t\t\t// remains deterministic even when a local Cassandra-compatible\n\t\t\t// service is listening on 9042.\n\t\t\tcluster := NewCluster(\"127.0.0.1:1\")\n\t\t\tpolicy := &TestReconnectionPolicy{NumRetries: tc.NumRetries}\n\t\t\tcluster.InitialReconnectionPolicy = policy\n\t\t\tcluster.ProtoVersion = tc.ProtoVersion\n\t\t\t_, err := cluster.CreateSession()\n\t\t\tif err == nil {\n\t\t\t\tt.Fatal(\"expected to get an error\")\n\t\t\t}\n\t\t\tif !strings.Contains(err.Error(), tc.ExpectedErr) {\n\t\t\t\tt.Errorf(\"expected error to contain %q got %q\", tc.ExpectedErr, err.Error())\n\t\t\t}\n\t\t\tif !cmp.Equal(tc.ExpectedGetIntervalCalls, policy.GetIntervalCalls) {\n\t\t\t\tt.Errorf(\"expected GetInterval calls to be (%+v) but was (%+v) instead\", tc.ExpectedGetIntervalCalls, policy.GetIntervalCalls)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestContext_CanceledBeforeExec(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tvar reqCount uint64\n\n\tsrv := newTestServerOpts{\n\t\taddr:     \"127.0.0.1:0\",\n\t\tprotocol: defaultProto,\n\t\trecvHook: func(f *framer) {\n\t\t\tif f.header.Op == frm.OpStartup || f.header.Op == frm.OpOptions {\n\t\t\t\t// ignore statup and heartbeat messages\n\t\t\t\treturn\n\t\t\t}\n\t\t\tatomic.AddUint64(&reqCount, 1)\n\t\t},\n\t}.newServer(t, ctx)\n\n\tdefer srv.Stop()\n\n\tcluster := testCluster(defaultProto, srv.Address)\n\tcluster.Timeout = 5 * time.Second\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\tstartupRequestCount := atomic.LoadUint64(&reqCount)\n\n\tctx, cancel = context.WithCancel(ctx)\n\tcancel()\n\n\terr = db.Query(\"timeout\").WithContext(ctx).Exec()\n\tif !errors.Is(err, context.Canceled) {\n\t\tt.Fatalf(\"expected to get context cancel error: %v got %v\", context.Canceled, err)\n\t}\n\n\t// Queries are executed by separate goroutine and we don't have a synchronization point that would allow us to\n\t// check if a request was sent or not.\n\t// Fall back to waiting a little bit.\n\ttime.Sleep(100 * time.Millisecond)\n\n\tqueryRequestCount := atomic.LoadUint64(&reqCount) - startupRequestCount\n\tif queryRequestCount != 0 {\n\t\tt.Fatalf(\"expected that no request is sent to server, sent %d requests\", queryRequestCount)\n\t}\n}\n\nfunc TestCallReqReuseDoesNotInvalidateOutstandingTimeout(t *testing.T) {\n\tt.Parallel()\n\n\toldCall := getCallReq(1)\n\toldTimeout := oldCall.timeout\n\toldCall.done.Done()\n\tputCallReq(oldCall)\n\n\tnewCall := getCallReq(2)\n\tdefer newCall.done.Done()\n\tdefer close(newCall.timeout)\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tt.Fatalf(\"closing old timeout should not panic after putCallReq: %v\", r)\n\t\t}\n\t}()\n\n\tclose(oldTimeout)\n\n\tselect {\n\tcase <-newCall.timeout:\n\t\tt.Fatal(\"closing the old timeout unexpectedly closed the new call timeout\")\n\tdefault:\n\t}\n}\n\ntype testContextWriter struct {\n\tn       int\n\terr     error\n\tonWrite func()\n}\n\nfunc (w testContextWriter) writeContext(ctx context.Context, p []byte) (int, error) {\n\tif w.onWrite != nil {\n\t\tw.onWrite()\n\t}\n\tif w.n == 0 && w.err == nil {\n\t\treturn len(p), nil\n\t}\n\treturn w.n, w.err\n}\n\nfunc (w testContextWriter) setWriteTimeout(timeout time.Duration) {}\n\ntype contextWriterFunc func(context.Context, []byte) (int, error)\n\nfunc (fn contextWriterFunc) writeContext(ctx context.Context, p []byte) (int, error) {\n\treturn fn(ctx, p)\n}\n\nfunc (fn contextWriterFunc) setWriteTimeout(timeout time.Duration) {}\n\nfunc newTestExecConn(t *testing.T, w contextWriter) (*Conn, net.Conn) {\n\tt.Helper()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tserver, client := net.Pipe()\n\tc := newTestConnWithFramerPool()\n\tc.ctx = ctx\n\tc.cancel = cancel\n\tc.conn = client\n\tc.w = w\n\tc.logger = nopLogger{}\n\tc.errorHandler = connErrorHandlerFn(func(*Conn, error, bool) {})\n\tc.streams = streams.New()\n\tc.calls = make(map[int]*callReq)\n\n\treturn c, server\n}\n\nfunc waitForSingleCall(t *testing.T, c *Conn) *callReq {\n\tt.Helper()\n\n\tdeadline := time.After(2 * time.Second)\n\tticker := time.NewTicker(time.Millisecond)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tc.mu.Lock()\n\t\tfor _, call := range c.calls {\n\t\t\tc.mu.Unlock()\n\t\t\treturn call\n\t\t}\n\t\tc.mu.Unlock()\n\n\t\tselect {\n\t\tcase <-deadline:\n\t\t\tt.Fatal(\"timed out waiting for in-flight call\")\n\t\tcase <-ticker.C:\n\t\t}\n\t}\n}\n\nfunc detachSingleCall(t *testing.T, c *Conn) *callReq {\n\tt.Helper()\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tfor streamID, call := range c.calls {\n\t\tdelete(c.calls, streamID)\n\t\treturn call\n\t}\n\n\tt.Fatal(\"expected an in-flight call\")\n\treturn nil\n}\n\ntype testStreamObserver struct {\n\tctx *testStreamObserverContext\n}\n\nfunc (o *testStreamObserver) StreamContext(context.Context) StreamObserverContext {\n\treturn o.ctx\n}\n\ntype testStreamObserverContext struct {\n\tstarted   chan struct{}\n\tabandoned chan struct{}\n\tfinished  chan struct{}\n}\n\nfunc newTestStreamObserverContext() *testStreamObserverContext {\n\treturn &testStreamObserverContext{\n\t\tstarted:   make(chan struct{}, 1),\n\t\tabandoned: make(chan struct{}, 1),\n\t\tfinished:  make(chan struct{}, 1),\n\t}\n}\n\nfunc (o *testStreamObserverContext) StreamStarted(ObservedStream) {\n\tselect {\n\tcase o.started <- struct{}{}:\n\tdefault:\n\t}\n}\n\nfunc (o *testStreamObserverContext) StreamAbandoned(ObservedStream) {\n\tselect {\n\tcase o.abandoned <- struct{}{}:\n\tdefault:\n\t}\n}\n\nfunc (o *testStreamObserverContext) StreamFinished(ObservedStream) {\n\tselect {\n\tcase o.finished <- struct{}{}:\n\tdefault:\n\t}\n}\n\nfunc TestExecCloseWithError(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"BuildFrameErrorReleasesResources\", func(t *testing.T) {\n\t\tc, server := newTestExecConn(t, testContextWriter{})\n\t\tdefer server.Close()\n\n\t\t_, err := c.exec(context.Background(), frameWriterFunc(func(f *framer, streamID int) error {\n\t\t\treturn io.ErrUnexpectedEOF\n\t\t}), nil, 0)\n\t\tif !errors.Is(err, io.ErrUnexpectedEOF) {\n\t\t\tt.Fatalf(\"expected build error %v, got %v\", io.ErrUnexpectedEOF, err)\n\t\t}\n\n\t\tc.mu.Lock()\n\t\tdefer c.mu.Unlock()\n\t\tif len(c.calls) != 0 {\n\t\t\tt.Fatalf(\"expected no in-flight calls after build error, got %d\", len(c.calls))\n\t\t}\n\t})\n\n\tt.Run(\"ContextCanceledBeforeWriteReleasesResources\", func(t *testing.T) {\n\t\twriteEntered := make(chan struct{})\n\t\tc, server := newTestExecConn(t, contextWriterFunc(func(ctx context.Context, p []byte) (int, error) {\n\t\t\tclose(writeEntered)\n\t\t\t<-ctx.Done()\n\t\t\treturn 0, ctx.Err()\n\t\t}))\n\t\tdefer server.Close()\n\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\terrCh := make(chan error, 1)\n\t\tgo func() {\n\t\t\t_, err := c.exec(ctx, frameWriterFunc(func(f *framer, streamID int) error {\n\t\t\t\tf.buf = append(f.buf[:0], 'x')\n\t\t\t\treturn nil\n\t\t\t}), nil, 0)\n\t\t\terrCh <- err\n\t\t}()\n\n\t\tselect {\n\t\tcase <-writeEntered:\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"exec never reached the write path\")\n\t\t}\n\t\tcancel()\n\n\t\tselect {\n\t\tcase err := <-errCh:\n\t\t\tif !errors.Is(err, context.Canceled) {\n\t\t\t\tt.Fatalf(\"expected context cancel error %v, got %v\", context.Canceled, err)\n\t\t\t}\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"exec deadlocked after context cancellation before write\")\n\t\t}\n\n\t\tc.mu.Lock()\n\t\tdefer c.mu.Unlock()\n\t\tif len(c.calls) != 0 {\n\t\t\tt.Fatalf(\"expected no in-flight calls after canceled write, got %d\", len(c.calls))\n\t\t}\n\t})\n\n\tt.Run(\"ResponseErrorReleasesResources\", func(t *testing.T) {\n\t\tc, server := newTestExecConn(t, testContextWriter{})\n\t\tdefer server.Close()\n\n\t\terrCh := make(chan error, 1)\n\t\tgo func() {\n\t\t\t_, err := c.exec(context.Background(), frameWriterFunc(func(f *framer, streamID int) error {\n\t\t\t\tf.buf = append(f.buf[:0], 'x')\n\t\t\t\treturn nil\n\t\t\t}), nil, 0)\n\t\t\terrCh <- err\n\t\t}()\n\n\t\twaitForSingleCall(t, c)\n\t\tcall := detachSingleCall(t, c)\n\t\tcall.resp <- callResp{err: io.EOF}\n\n\t\tselect {\n\t\tcase err := <-errCh:\n\t\t\tif !errors.Is(err, io.EOF) {\n\t\t\t\tt.Fatalf(\"expected response error %v, got %v\", io.EOF, err)\n\t\t\t}\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"exec deadlocked after response error\")\n\t\t}\n\n\t\tc.mu.Lock()\n\t\tdefer c.mu.Unlock()\n\t\tif len(c.calls) != 0 {\n\t\t\tt.Fatalf(\"expected no in-flight calls after response error, got %d\", len(c.calls))\n\t\t}\n\t})\n\n\tt.Run(\"PartialWriteDoesNotDeadlock\", func(t *testing.T) {\n\t\tc, server := newTestExecConn(t, testContextWriter{\n\t\t\tn:   1,\n\t\t\terr: io.ErrUnexpectedEOF,\n\t\t})\n\t\tdefer server.Close()\n\n\t\terrCh := make(chan error, 1)\n\t\tgo func() {\n\t\t\t_, err := c.exec(context.Background(), frameWriterFunc(func(f *framer, streamID int) error {\n\t\t\t\tf.buf = append(f.buf[:0], 'x')\n\t\t\t\treturn nil\n\t\t\t}), nil, 0)\n\t\t\terrCh <- err\n\t\t}()\n\n\t\tselect {\n\t\tcase err := <-errCh:\n\t\t\tif !errors.Is(err, io.ErrUnexpectedEOF) {\n\t\t\t\tt.Fatalf(\"expected write error %v, got %v\", io.ErrUnexpectedEOF, err)\n\t\t\t}\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"exec deadlocked after partial write failure\")\n\t\t}\n\t})\n\n\tt.Run(\"ConnectionCloseErrorDoesNotDeadlock\", func(t *testing.T) {\n\t\twriteStarted := make(chan struct{})\n\t\tvar writeStartedOnce sync.Once\n\t\tc, server := newTestExecConn(t, testContextWriter{\n\t\t\tonWrite: func() {\n\t\t\t\twriteStartedOnce.Do(func() {\n\t\t\t\t\tclose(writeStarted)\n\t\t\t\t})\n\t\t\t},\n\t\t})\n\t\tdefer server.Close()\n\n\t\tcloseDone := make(chan struct{})\n\t\tgo func() {\n\t\t\t<-writeStarted\n\t\t\tc.closeWithError(io.EOF)\n\t\t\tclose(closeDone)\n\t\t}()\n\n\t\terrCh := make(chan error, 1)\n\t\tgo func() {\n\t\t\t_, err := c.exec(context.Background(), frameWriterFunc(func(f *framer, streamID int) error {\n\t\t\t\tf.buf = append(f.buf[:0], 'x')\n\t\t\t\treturn nil\n\t\t\t}), nil, 0)\n\t\t\terrCh <- err\n\t\t}()\n\n\t\tselect {\n\t\tcase err := <-errCh:\n\t\t\tif !errors.Is(err, io.EOF) {\n\t\t\t\tt.Fatalf(\"expected close error %v, got %v\", io.EOF, err)\n\t\t\t}\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"exec deadlocked after closeWithError\")\n\t\t}\n\n\t\tselect {\n\t\tcase <-closeDone:\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"closeWithError deadlocked waiting for exec to release the call\")\n\t\t}\n\t})\n\n\tt.Run(\"TimeoutUnblocksAbandonRecvCall\", func(t *testing.T) {\n\t\tc, server := newTestExecConn(t, testContextWriter{})\n\t\tdefer server.Close()\n\n\t\terrCh := make(chan error, 1)\n\t\tgo func() {\n\t\t\t_, err := c.exec(context.Background(), frameWriterFunc(func(f *framer, streamID int) error {\n\t\t\t\tf.buf = append(f.buf[:0], 'x')\n\t\t\t\treturn nil\n\t\t\t}), nil, time.Millisecond)\n\t\t\terrCh <- err\n\t\t}()\n\n\t\tcall := waitForSingleCall(t, c)\n\n\t\tselect {\n\t\tcase err := <-errCh:\n\t\t\tif !errors.Is(err, ErrTimeoutNoResponse) {\n\t\t\t\tt.Fatalf(\"expected timeout error %v, got %v\", ErrTimeoutNoResponse, err)\n\t\t\t}\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"exec deadlocked waiting for timeout\")\n\t\t}\n\n\t\tif !c.removeCallIfOpen(call.streamID) {\n\t\t\tt.Fatal(\"expected timed out call to still be registered\")\n\t\t}\n\n\t\tdone := make(chan struct{})\n\t\tgo func() {\n\t\t\tc.abandonRecvCall(call, c.getReadFramer())\n\t\t\tclose(done)\n\t\t}()\n\n\t\tselect {\n\t\tcase <-done:\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"abandonRecvCall deadlocked after timeout\")\n\t\t}\n\t})\n\n\tt.Run(\"ContextCancelUnblocksAbandonRecvCall\", func(t *testing.T) {\n\t\tc, server := newTestExecConn(t, testContextWriter{})\n\t\tdefer server.Close()\n\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\terrCh := make(chan error, 1)\n\t\tgo func() {\n\t\t\t_, err := c.exec(ctx, frameWriterFunc(func(f *framer, streamID int) error {\n\t\t\t\tf.buf = append(f.buf[:0], 'x')\n\t\t\t\treturn nil\n\t\t\t}), nil, 0)\n\t\t\terrCh <- err\n\t\t}()\n\n\t\tcall := waitForSingleCall(t, c)\n\t\tcancel()\n\n\t\tselect {\n\t\tcase err := <-errCh:\n\t\t\tif !errors.Is(err, context.Canceled) {\n\t\t\t\tt.Fatalf(\"expected context cancel error %v, got %v\", context.Canceled, err)\n\t\t\t}\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"exec deadlocked waiting for context cancellation\")\n\t\t}\n\n\t\tif !c.removeCallIfOpen(call.streamID) {\n\t\t\tt.Fatal(\"expected canceled call to still be registered\")\n\t\t}\n\n\t\tdone := make(chan struct{})\n\t\tgo func() {\n\t\t\tc.abandonRecvCall(call, c.getReadFramer())\n\t\t\tclose(done)\n\t\t}()\n\n\t\tselect {\n\t\tcase <-done:\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"abandonRecvCall deadlocked after context cancellation\")\n\t\t}\n\t})\n\n\tt.Run(\"ConnectionCloseAbandonsInflightStream\", func(t *testing.T) {\n\t\twriteStarted := make(chan struct{})\n\t\tvar writeStartedOnce sync.Once\n\t\tobserverCtx := newTestStreamObserverContext()\n\t\tc, server := newTestExecConn(t, testContextWriter{\n\t\t\tonWrite: func() {\n\t\t\t\twriteStartedOnce.Do(func() {\n\t\t\t\t\tclose(writeStarted)\n\t\t\t\t})\n\t\t\t},\n\t\t})\n\t\tc.streamObserver = &testStreamObserver{ctx: observerCtx}\n\t\tdefer server.Close()\n\n\t\terrCh := make(chan error, 1)\n\t\tgo func() {\n\t\t\t_, err := c.exec(context.Background(), frameWriterFunc(func(f *framer, streamID int) error {\n\t\t\t\tf.buf = append(f.buf[:0], 'x')\n\t\t\t\treturn nil\n\t\t\t}), nil, 0)\n\t\t\terrCh <- err\n\t\t}()\n\n\t\tselect {\n\t\tcase <-observerCtx.started:\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"stream observer did not observe the request start\")\n\t\t}\n\n\t\tselect {\n\t\tcase <-writeStarted:\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"exec never reached the write path\")\n\t\t}\n\n\t\tcloseDone := make(chan struct{})\n\t\tgo func() {\n\t\t\tc.Close()\n\t\t\tclose(closeDone)\n\t\t}()\n\n\t\tselect {\n\t\tcase err := <-errCh:\n\t\t\tif !errors.Is(err, ErrConnectionClosed) {\n\t\t\t\tt.Fatalf(\"expected close error %v, got %v\", ErrConnectionClosed, err)\n\t\t\t}\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"exec deadlocked after Close\")\n\t\t}\n\n\t\tselect {\n\t\tcase <-observerCtx.abandoned:\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"Close did not abandon the in-flight stream\")\n\t\t}\n\n\t\tselect {\n\t\tcase <-observerCtx.finished:\n\t\t\tt.Fatal(\"Close should not mark the in-flight stream as finished\")\n\t\tdefault:\n\t\t}\n\n\t\tselect {\n\t\tcase <-closeDone:\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Fatal(\"Close did not wait for the in-flight exec cleanup\")\n\t\t}\n\n\t\tc.mu.Lock()\n\t\tdefer c.mu.Unlock()\n\t\tif c.calls != nil {\n\t\t\tt.Fatal(\"expected in-flight calls to be detached on Close\")\n\t\t}\n\t})\n}\n\n// tcpConnPair returns a matching set of a TCP client side and server side connection.\nfunc tcpConnPair() (s, c net.Conn, err error) {\n\tl, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\t// maybe ipv6 works, if ipv4 fails?\n\t\tl, err = net.Listen(\"tcp6\", \"[::1]:0\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\tdefer l.Close() // we only try to accept one connection, so will stop listening.\n\n\taddr := l.Addr()\n\tdone := make(chan struct{})\n\tvar errDial error\n\tgo func(done chan<- struct{}) {\n\t\tc, errDial = net.Dial(addr.Network(), addr.String())\n\t\tclose(done)\n\t}(done)\n\n\ts, err = l.Accept()\n\t<-done\n\n\tif err == nil {\n\t\terr = errDial\n\t}\n\n\tif err != nil {\n\t\tif s != nil {\n\t\t\ts.Close()\n\t\t}\n\t\tif c != nil {\n\t\t\tc.Close()\n\t\t}\n\t}\n\n\treturn s, c, err\n}\n\nfunc TestWriteCoalescing(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tserver, client, err := tcpConnPair()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdone := make(chan struct{}, 1)\n\tvar (\n\t\tbuf      bytes.Buffer\n\t\tbufMutex sync.Mutex\n\t)\n\tgo func() {\n\t\tdefer close(done)\n\t\tdefer server.Close()\n\t\tvar err error\n\t\tb := make([]byte, 256)\n\t\tvar n int\n\t\tfor {\n\t\t\tif n, err = server.Read(b); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tbufMutex.Lock()\n\t\t\tbuf.Write(b[:n])\n\t\t\tbufMutex.Unlock()\n\t\t}\n\t\tif err != io.EOF {\n\t\t\tt.Errorf(\"unexpected read error: %v\", err)\n\t\t}\n\t}()\n\tenqueued := make(chan struct{})\n\tresetTimer := make(chan struct{})\n\tw := &writeCoalescer{\n\t\twriteCh: make(chan writeRequest),\n\t\tc:       client,\n\t\tquit:    ctx.Done(),\n\t\ttestEnqueuedHook: func() {\n\t\t\tenqueued <- struct{}{}\n\t\t},\n\t\ttestFlushedHook: func() {\n\t\t\tclient.Close()\n\t\t},\n\t}\n\tw.setWriteTimeout(500 * time.Millisecond)\n\ttimerC := make(chan time.Time, 1)\n\tgo func() {\n\t\tw.writeFlusherImpl(timerC, func() { resetTimer <- struct{}{} })\n\t}()\n\n\tgo func() {\n\t\tif _, err := w.writeContext(context.Background(), []byte(\"one\")); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif _, err := w.writeContext(context.Background(), []byte(\"two\")); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t<-enqueued\n\t<-resetTimer\n\t<-enqueued\n\n\t// flush\n\ttimerC <- time.Now()\n\n\t<-done\n\n\tif got := buf.String(); got != \"onetwo\" && got != \"twoone\" {\n\t\tt.Fatalf(\"expected to get %q got %q\", \"onetwo or twoone\", got)\n\t}\n}\n\nfunc TestWriteCoalescing_WriteAfterClose(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tvar buf bytes.Buffer\n\tdefer cancel()\n\tserver, client, err := tcpConnPair()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdone := make(chan struct{}, 1)\n\tgo func() {\n\t\tio.Copy(&buf, server)\n\t\tserver.Close()\n\t\tclose(done)\n\t}()\n\tw := newWriteCoalescer(client, 0, 5*time.Millisecond, ctx.Done())\n\n\t// ensure 1 write works\n\tif _, err := w.writeContext(context.Background(), []byte(\"one\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tclient.Close()\n\t<-done\n\tif v := buf.String(); v != \"one\" {\n\t\tt.Fatalf(\"expected buffer to be %q got %q\", \"one\", v)\n\t}\n\n\t// now close and do a write, we should error\n\tcancel()\n\tclient.Close() // close client conn too, since server won't see the answer anyway.\n\n\tif _, err := w.writeContext(context.Background(), []byte(\"two\")); err == nil {\n\t\tt.Fatal(\"expected to get error for write after closing\")\n\t} else if err != io.EOF {\n\t\tt.Fatalf(\"expected to get EOF got %v\", err)\n\t}\n}\n\nfunc TestSkipMetadata(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tsrv := NewTestServer(t, protoVersion4, ctx)\n\tdefer srv.Stop()\n\n\tcfg := testCluster(protoVersion4, srv.Address)\n\tcfg.DisableSkipMetadata = false\n\n\tdb, err := cfg.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"NewCluster: %v\", err)\n\t}\n\tdefer db.Close()\n\n\tif err := db.Query(\"select nometadata\").Exec(); err != nil {\n\t\tt.Fatalf(\"expected no error got: %v\", err)\n\t}\n\n\tif err := db.Query(\"select metadata\").Exec(); err != nil {\n\t\tt.Fatalf(\"expected no error got: %v\", err)\n\t}\n}\n\nfunc TestPrepareBatchMetadataMultipleKeyspaceTables(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tsrv := NewTestServer(t, protoVersion4, ctx)\n\tdefer srv.Stop()\n\n\tcfg := testCluster(protoVersion4, srv.Address)\n\tdb, err := cfg.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"CreateSession: %v\", err)\n\t}\n\tdefer db.Close()\n\n\tconn := db.getConn()\n\tif conn == nil {\n\t\tt.Fatal(\"expected connection, got nil\")\n\t}\n\n\tstmt := \"BEGIN BATCH INSERT INTO ks1.tbl1 (col1) VALUES (?) INSERT INTO ks2.tbl2 (col2) VALUES (?) APPLY BATCH\"\n\tinfo, err := conn.prepareStatement(ctx, stmt, nil, time.Second)\n\tif err != nil {\n\t\tt.Fatalf(\"prepareStatement failed: %v\", err)\n\t}\n\n\tif got := len(info.request.columns); got != 2 {\n\t\tt.Fatalf(\"expected 2 request columns, got %d\", got)\n\t}\n\n\tcol0 := info.request.columns[0]\n\tif col0.Keyspace != \"ks1\" || col0.Table != \"tbl1\" || col0.Name != \"col1\" {\n\t\tt.Fatalf(\"unexpected column 0: %+v\", col0)\n\t}\n\n\tcol1 := info.request.columns[1]\n\tif col1.Keyspace != \"ks2\" || col1.Table != \"tbl2\" || col1.Name != \"col2\" {\n\t\tt.Fatalf(\"unexpected column 1: %+v\", col1)\n\t}\n\n\tif info.request.keyspace != \"\" || info.request.table != \"\" {\n\t\tt.Fatalf(\"expected empty prepared keyspace/table for mixed batch, got %q/%q\", info.request.keyspace, info.request.table)\n\t}\n}\n\ntype recordingFrameHeaderObserver struct {\n\tt      *testing.T\n\tmu     sync.Mutex\n\tframes []ObservedFrameHeader\n}\n\nfunc (r *recordingFrameHeaderObserver) ObserveFrameHeader(ctx context.Context, frm ObservedFrameHeader) {\n\tr.mu.Lock()\n\tr.frames = append(r.frames, frm)\n\tr.mu.Unlock()\n}\n\nfunc (r *recordingFrameHeaderObserver) getFrames() []ObservedFrameHeader {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\treturn r.frames\n}\n\nfunc TestFrameHeaderObserver(t *testing.T) {\n\tsrv := NewTestServer(t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\tcluster := testCluster(defaultProto, srv.Address)\n\tcluster.NumConns = 1\n\tobserver := &recordingFrameHeaderObserver{t: t}\n\tcluster.FrameHeaderObserver = observer\n\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := db.Query(\"void\").Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tframes := observer.getFrames()\n\texpFrames := []frm.Op{frm.OpSupported, frm.OpReady, frm.OpResult}\n\tif len(frames) != len(expFrames) {\n\t\tt.Fatalf(\"Expected to receive %d frames, instead received %d\", len(expFrames), len(frames))\n\t}\n\n\tfor i, op := range expFrames {\n\t\tif op != frames[i].Opcode {\n\t\t\tt.Fatalf(\"expected frame %d to be %v got %v\", i, op, frames[i])\n\t\t}\n\t}\n\tvoidResultFrame := frames[2]\n\tif voidResultFrame.Length != int32(4) {\n\t\tt.Fatalf(\"Expected to receive frame with body length 4, instead received body length %d\", voidResultFrame.Length)\n\t}\n}\n\nfunc NewTestServerWithAddress(addr string, t testing.TB, protocol uint8, ctx context.Context) *TestServer {\n\treturn newTestServerOpts{\n\t\taddr:     addr,\n\t\tprotocol: protocol,\n\t}.newServer(t, ctx)\n}\n\nfunc NewTestServerWithAddressAndSupportedFactory(addr string, t testing.TB, protocol uint8, ctx context.Context, supportedFactory testSupportedFactory) *TestServer {\n\treturn newTestServerOpts{\n\t\taddr:             addr,\n\t\tprotocol:         protocol,\n\t\tsupportedFactory: supportedFactory,\n\t}.newServer(t, ctx)\n}\n\ntype newTestServerOpts struct {\n\taddr             string\n\tprotocol         uint8\n\tsupportedFactory testSupportedFactory\n\trecvHook         func(*framer)\n}\n\nfunc (nts newTestServerOpts) newServer(t testing.TB, ctx context.Context) *TestServer {\n\tladdr, err := net.ResolveTCPAddr(\"tcp\", nts.addr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlisten, err := net.ListenTCP(\"tcp\", laddr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\theaderSize := 9\n\n\tctx, cancel := context.WithCancel(ctx)\n\tsrv := &TestServer{\n\t\tAddress:    listen.Addr().String(),\n\t\tlisten:     listen,\n\t\tt:          t,\n\t\tprotocol:   nts.protocol,\n\t\theaderSize: headerSize,\n\t\tctx:        ctx,\n\t\tcancel:     cancel,\n\n\t\tsupportedFactory: nts.supportedFactory,\n\t\tonRecv:           nts.recvHook,\n\t}\n\n\tgo srv.closeWatch()\n\tgo srv.serve()\n\n\treturn srv\n}\n\nfunc NewTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer {\n\treturn NewTestServerWithAddress(\"127.0.0.1:0\", t, protocol, ctx)\n}\n\nfunc NewSSLTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer {\n\treturn NewSSLTestServerWithSupportedFactory(t, protocol, ctx, nil)\n}\n\nfunc NewSSLTestServerWithSupportedFactory(t testing.TB, protocol uint8, ctx context.Context, supportedFactory testSupportedFactory) *TestServer {\n\tpem, err := os.ReadFile(\"testdata/pki/ca.crt\")\n\tcertPool := x509.NewCertPool()\n\tif !certPool.AppendCertsFromPEM(pem) {\n\t\tt.Fatalf(\"Failed parsing or appending certs\")\n\t}\n\tmycert, err := tls.LoadX509KeyPair(\"testdata/pki/cassandra.crt\", \"testdata/pki/cassandra.key\")\n\tif err != nil {\n\t\tt.Fatalf(\"could not load cert\")\n\t}\n\tconfig := &tls.Config{\n\t\tCertificates: []tls.Certificate{mycert},\n\t\tRootCAs:      certPool,\n\t}\n\tlisten, err := tls.Listen(\"tcp\", \"127.0.0.1:0\", config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\theaderSize := 9\n\n\tctx, cancel := context.WithCancel(ctx)\n\tsrv := &TestServer{\n\t\tAddress:    listen.Addr().String(),\n\t\tlisten:     listen,\n\t\tt:          t,\n\t\tprotocol:   protocol,\n\t\theaderSize: headerSize,\n\t\tctx:        ctx,\n\t\tcancel:     cancel,\n\n\t\tsupportedFactory: supportedFactory,\n\t}\n\n\tgo srv.closeWatch()\n\tgo srv.serve()\n\treturn srv\n}\n\ntype TestServer struct {\n\tAddress          string\n\tTimeoutOnStartup int32\n\tt                testing.TB\n\tlisten           net.Listener\n\tnKillReq         int64\n\tsupportedFactory testSupportedFactory\n\n\tprotocol   byte\n\theaderSize int\n\tctx        context.Context\n\tcancel     context.CancelFunc\n\n\tmu     sync.Mutex\n\tclosed bool\n\n\t// onRecv is a hook point for tests, called in receive loop.\n\tonRecv func(*framer)\n}\n\ntype testSupportedFactory func(conn net.Conn) map[string][]string\n\nfunc (srv *TestServer) session() (*Session, error) {\n\treturn testCluster(frm.ProtoVersion(srv.protocol), srv.Address).CreateSession()\n}\n\nfunc (srv *TestServer) host() *HostInfo {\n\thosts, err := resolveInitialEndpoint(nil, srv.Address, 9042)\n\tif err != nil {\n\t\tsrv.t.Fatal(err)\n\t}\n\treturn hosts[0]\n}\n\nfunc (srv *TestServer) closeWatch() {\n\t<-srv.ctx.Done()\n\n\tsrv.mu.Lock()\n\tdefer srv.mu.Unlock()\n\n\tsrv.closeLocked()\n}\n\nfunc (srv *TestServer) serve() {\n\tdefer srv.listen.Close()\n\tfor !srv.isClosed() {\n\t\tconn, err := srv.listen.Accept()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tvar exts map[string][]string\n\t\tif srv.supportedFactory != nil {\n\t\t\texts = (srv.supportedFactory)(conn)\n\t\t}\n\n\t\tgo func(conn net.Conn, exts map[string][]string) {\n\t\t\tdefer conn.Close()\n\t\t\tfor !srv.isClosed() {\n\t\t\t\tframer, err := srv.readFrame(conn)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == io.EOF || errors.Is(err, net.ErrClosed) {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tsrv.errorLocked(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif srv.onRecv != nil {\n\t\t\t\t\tsrv.onRecv(framer)\n\t\t\t\t}\n\n\t\t\t\tgo srv.process(conn, framer, exts)\n\t\t\t}\n\t\t}(conn, exts)\n\t}\n}\n\nfunc (srv *TestServer) isClosed() bool {\n\tsrv.mu.Lock()\n\tdefer srv.mu.Unlock()\n\treturn srv.closed\n}\n\nfunc (srv *TestServer) closeLocked() {\n\tif srv.closed {\n\t\treturn\n\t}\n\n\tsrv.closed = true\n\n\tsrv.listen.Close()\n\tsrv.cancel()\n}\n\nfunc (srv *TestServer) Stop() {\n\tsrv.mu.Lock()\n\tdefer srv.mu.Unlock()\n\tsrv.closeLocked()\n}\n\nfunc (srv *TestServer) errorLocked(err any) {\n\tsrv.mu.Lock()\n\tdefer srv.mu.Unlock()\n\tif srv.closed {\n\t\treturn\n\t}\n\tsrv.t.Error(err)\n}\n\nfunc (srv *TestServer) process(conn net.Conn, reqFrame *framer, exts map[string][]string) {\n\thead := reqFrame.header\n\tif head == nil {\n\t\tsrv.errorLocked(\"process frame with a nil header\")\n\t\treturn\n\t}\n\trespFrame := newFramer(nil, reqFrame.proto)\n\n\tswitch head.Op {\n\tcase frm.OpStartup:\n\t\tif atomic.LoadInt32(&srv.TimeoutOnStartup) > 0 {\n\t\t\t// Do not respond to startup command\n\t\t\t// wait until we get a cancel signal\n\t\t\tselect {\n\t\t\tcase <-srv.ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\trespFrame.writeHeader(0, frm.OpReady, head.Stream)\n\tcase frm.OpOptions:\n\t\trespFrame.writeHeader(0, frm.OpSupported, head.Stream)\n\t\trespFrame.writeStringMultiMap(exts)\n\tcase frm.OpQuery:\n\t\tquery := reqFrame.readLongString()\n\t\tfirst := query\n\t\tif n := strings.Index(query, \" \"); n > 0 {\n\t\t\tfirst = first[:n]\n\t\t}\n\t\tswitch strings.ToLower(first) {\n\t\tcase \"kill\":\n\t\t\tatomic.AddInt64(&srv.nKillReq, 1)\n\t\t\trespFrame.writeHeader(0, frm.OpError, head.Stream)\n\t\t\trespFrame.writeInt(0x1001)\n\t\t\trespFrame.writeString(\"query killed\")\n\t\tcase \"use\":\n\t\t\trespFrame.writeInt(frm.ResultKindKeyspace)\n\t\t\trespFrame.writeString(strings.TrimSpace(query[3:]))\n\t\tcase \"void\":\n\t\t\trespFrame.writeHeader(0, frm.OpResult, head.Stream)\n\t\t\trespFrame.writeInt(frm.ResultKindVoid)\n\t\tcase \"timeout\":\n\t\t\t<-srv.ctx.Done()\n\t\t\treturn\n\t\tcase \"slow\":\n\t\t\tgo func() {\n\t\t\t\trespFrame.writeHeader(0, frm.OpResult, head.Stream)\n\t\t\t\trespFrame.writeInt(frm.ResultKindVoid)\n\t\t\t\trespFrame.buf[0] = srv.protocol | 0x80\n\t\t\t\tselect {\n\t\t\t\tcase <-srv.ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-time.After(50 * time.Millisecond):\n\t\t\t\t\trespFrame.finish()\n\t\t\t\t\trespFrame.writeTo(conn)\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn\n\t\tcase \"speculative\":\n\t\t\tatomic.AddInt64(&srv.nKillReq, 1)\n\t\t\tif atomic.LoadInt64(&srv.nKillReq) > 3 {\n\t\t\t\trespFrame.writeHeader(0, frm.OpResult, head.Stream)\n\t\t\t\trespFrame.writeInt(frm.ResultKindVoid)\n\t\t\t\trespFrame.writeString(\"speculative query success on the node \" + srv.Address)\n\t\t\t} else {\n\t\t\t\trespFrame.writeHeader(0, frm.OpError, head.Stream)\n\t\t\t\trespFrame.writeInt(0x1001)\n\t\t\t\trespFrame.writeString(\"speculative error\")\n\t\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\t\t<-time.After(time.Millisecond * 120)\n\t\t\t}\n\t\tdefault:\n\t\t\trespFrame.writeHeader(0, frm.OpResult, head.Stream)\n\t\t\trespFrame.writeInt(frm.ResultKindVoid)\n\t\t}\n\tcase frm.OpError:\n\t\trespFrame.writeHeader(0, frm.OpError, head.Stream)\n\t\trespFrame.buf = append(respFrame.buf, reqFrame.buf...)\n\tcase frm.OpPrepare:\n\t\tquery := strings.TrimSpace(reqFrame.readLongString())\n\t\tlower := strings.ToLower(query)\n\t\tname := \"\"\n\t\tif strings.HasPrefix(lower, \"select \") {\n\t\t\tname = strings.TrimPrefix(lower, \"select \")\n\t\t\tif n := strings.Index(name, \" \"); n > 0 {\n\t\t\t\tname = name[:n]\n\t\t\t}\n\t\t} else if strings.HasPrefix(lower, \"begin batch\") {\n\t\t\tname = \"batchmetadata\"\n\t\t} else {\n\t\t\tname = lower\n\t\t}\n\t\tswitch name {\n\t\tcase \"nometadata\":\n\t\t\trespFrame.writeHeader(0, frm.OpResult, head.Stream)\n\t\t\trespFrame.writeInt(frm.ResultKindPrepared)\n\t\t\t// <id>\n\t\t\trespFrame.writeShortBytes(binary.BigEndian.AppendUint64(nil, 1))\n\t\t\t// <metadata>\n\t\t\trespFrame.writeInt(0) // <flags>\n\t\t\trespFrame.writeInt(0) // <columns_count>\n\t\t\tif srv.protocol >= protoVersion4 {\n\t\t\t\trespFrame.writeInt(0) // <pk_count>\n\t\t\t}\n\t\t\t// <result_metadata>\n\t\t\trespFrame.writeInt(int32(frm.FlagNoMetaData)) // <flags>\n\t\t\trespFrame.writeInt(0)\n\t\tcase \"metadata\":\n\t\t\trespFrame.writeHeader(0, frm.OpResult, head.Stream)\n\t\t\trespFrame.writeInt(frm.ResultKindPrepared)\n\t\t\t// <id>\n\t\t\trespFrame.writeShortBytes(binary.BigEndian.AppendUint64(nil, 2))\n\t\t\t// <metadata>\n\t\t\trespFrame.writeInt(0) // <flags>\n\t\t\trespFrame.writeInt(0) // <columns_count>\n\t\t\tif srv.protocol >= protoVersion4 {\n\t\t\t\trespFrame.writeInt(0) // <pk_count>\n\t\t\t}\n\t\t\t// <result_metadata>\n\t\t\trespFrame.writeInt(int32(frm.FlagGlobalTableSpec)) // <flags>\n\t\t\trespFrame.writeInt(1)                              // <columns_count>\n\t\t\t// <global_table_spec>\n\t\t\trespFrame.writeString(\"keyspace\")\n\t\t\trespFrame.writeString(\"table\")\n\t\t\t// <col_spec_0>\n\t\t\trespFrame.writeString(\"col0\")             // <name>\n\t\t\trespFrame.writeShort(uint16(TypeBoolean)) // <type>\n\t\tcase \"batchmetadata\":\n\t\t\trespFrame.writeHeader(0, frm.OpResult, head.Stream)\n\t\t\trespFrame.writeInt(frm.ResultKindPrepared)\n\t\t\t// <id>\n\t\t\trespFrame.writeShortBytes(binary.BigEndian.AppendUint64(nil, 3))\n\t\t\t// <metadata>\n\t\t\trespFrame.writeInt(0) // <flags>\n\t\t\trespFrame.writeInt(2) // <columns_count>\n\t\t\tif srv.protocol >= protoVersion4 {\n\t\t\t\trespFrame.writeInt(0) // <pk_count>\n\t\t\t}\n\t\t\t// <col_spec_0>\n\t\t\trespFrame.writeString(\"ks1\")\n\t\t\trespFrame.writeString(\"tbl1\")\n\t\t\trespFrame.writeString(\"col1\")\n\t\t\trespFrame.writeShort(uint16(TypeInt))\n\t\t\t// <col_spec_1>\n\t\t\trespFrame.writeString(\"ks2\")\n\t\t\trespFrame.writeString(\"tbl2\")\n\t\t\trespFrame.writeString(\"col2\")\n\t\t\trespFrame.writeShort(uint16(TypeInt))\n\t\t\t// <result_metadata>\n\t\t\trespFrame.writeInt(int32(frm.FlagNoMetaData))\n\t\t\trespFrame.writeInt(0)\n\t\tdefault:\n\t\t\trespFrame.writeHeader(0, frm.OpError, head.Stream)\n\t\t\trespFrame.writeInt(0)\n\t\t\trespFrame.writeString(\"unsupported query: \" + name)\n\t\t}\n\tcase frm.OpExecute:\n\t\tb := reqFrame.readShortBytesCopy()\n\t\tid := binary.BigEndian.Uint64(b)\n\t\t// <query_parameters>\n\t\treqFrame.readConsistency() // <consistency>\n\t\tvar flags byte\n\t\tif srv.protocol > protoVersion4 {\n\t\t\tui := reqFrame.readInt()\n\t\t\tflags = byte(ui)\n\t\t} else {\n\t\t\tflags = reqFrame.readByte()\n\t\t}\n\t\tswitch id {\n\t\tcase 1:\n\t\t\tif flags&frm.FlagSkipMetaData != 0 {\n\t\t\t\trespFrame.writeHeader(0, frm.OpError, head.Stream)\n\t\t\t\trespFrame.writeInt(0)\n\t\t\t\trespFrame.writeString(\"skip metadata unexpected\")\n\t\t\t} else {\n\t\t\t\trespFrame.writeHeader(0, frm.OpResult, head.Stream)\n\t\t\t\trespFrame.writeInt(frm.ResultKindRows)\n\t\t\t\t// <metadata>\n\t\t\t\trespFrame.writeInt(0) // <flags>\n\t\t\t\trespFrame.writeInt(0) // <columns_count>\n\t\t\t\t// <rows_count>\n\t\t\t\trespFrame.writeInt(0)\n\t\t\t}\n\t\tcase 2:\n\t\t\tif flags&frm.FlagSkipMetaData != 0 {\n\t\t\t\trespFrame.writeHeader(0, frm.OpResult, head.Stream)\n\t\t\t\trespFrame.writeInt(frm.ResultKindRows)\n\t\t\t\t// <metadata>\n\t\t\t\trespFrame.writeInt(0) // <flags>\n\t\t\t\trespFrame.writeInt(0) // <columns_count>\n\t\t\t\t// <rows_count>\n\t\t\t\trespFrame.writeInt(0)\n\t\t\t} else {\n\t\t\t\trespFrame.writeHeader(0, frm.OpError, head.Stream)\n\t\t\t\trespFrame.writeInt(0)\n\t\t\t\trespFrame.writeString(\"skip metadata expected\")\n\t\t\t}\n\t\tdefault:\n\t\t\trespFrame.writeHeader(0, frm.OpError, head.Stream)\n\t\t\trespFrame.writeInt(ErrCodeUnprepared)\n\t\t\trespFrame.writeString(\"unprepared\")\n\t\t\trespFrame.writeShortBytes(binary.BigEndian.AppendUint64(nil, id))\n\t\t}\n\tdefault:\n\t\trespFrame.writeHeader(0, frm.OpError, head.Stream)\n\t\trespFrame.writeInt(0)\n\t\trespFrame.writeString(\"not supported\")\n\t}\n\n\trespFrame.buf[0] = srv.protocol | 0x80\n\n\tif err := respFrame.finish(); err != nil {\n\t\tsrv.errorLocked(err)\n\t}\n\n\tif err := respFrame.writeTo(conn); err != nil {\n\t\tif !errors.Is(err, net.ErrClosed) {\n\t\t\tsrv.errorLocked(err)\n\t\t}\n\t}\n}\n\nfunc (srv *TestServer) readFrame(conn net.Conn) (*framer, error) {\n\tbuf := make([]byte, srv.headerSize)\n\thead, err := readHeader(conn, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tframer := newFramer(nil, srv.protocol)\n\n\terr = framer.readFrame(conn, &head)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// should be a request frame\n\tif head.Version.Response() {\n\t\treturn nil, fmt.Errorf(\"expected to read a request frame got version: %v\", head.Version)\n\t} else if head.Version.Version() != srv.protocol {\n\t\treturn nil, fmt.Errorf(\"expected to read protocol version 0x%x got 0x%x\", srv.protocol, head.Version.Version())\n\t}\n\n\treturn framer, nil\n}\n\nfunc TestGetSchemaAgreement(t *testing.T) {\n\tschema_version1 := ParseUUIDMust(\"af810386-a694-11ef-81fa-3aea73156247\")\n\tpeersRows := []schemaAgreementHost{\n\t\t{\n\t\t\tDataCenter:    \"datacenter1\",\n\t\t\tHostID:        ParseUUIDMust(\"b2035fd9-e0ca-4857-8c45-e63c00fb7c43\"),\n\t\t\tRack:          \"rack1\",\n\t\t\tRPCAddress:    \"127.0.0.3\",\n\t\t\tSchemaVersion: schema_version1,\n\t\t},\n\t\t{\n\t\t\tDataCenter:    \"datacenter1\",\n\t\t\tHostID:        ParseUUIDMust(\"4b21ee4c-acea-4267-8e20-aaed5361a0dd\"),\n\t\t\tRack:          \"rack1\",\n\t\t\tRPCAddress:    \"127.0.0.2\",\n\t\t\tSchemaVersion: schema_version1,\n\t\t},\n\t\t{\n\t\t\tDataCenter:    \"datacenter2\",\n\t\t\tHostID:        ParseUUIDMust(\"dfef4a22-b8d8-47e9-aee5-8c19d4b7a9e3\"),\n\t\t\tRack:          \"rack1\",\n\t\t\tRPCAddress:    \"127.0.0.5\",\n\t\t\tSchemaVersion: ParseUUIDMust(\"875a938a-a695-11ef-4314-85c8ef0ebaa2\"),\n\t\t},\n\t}\n\n\tvar logger StdLogger\n\n\tt.Run(\"SchemaNotConsistent\", func(t *testing.T) {\n\t\terr := getSchemaAgreement(\n\t\t\t[]string{\"875a938a-a695-11ef-4314-85c8ef0ebaa2\"},\n\t\t\tpeersRows,\n\t\t\tlogger,\n\t\t)\n\n\t\tassert.Error(t, err, \"error expected when local schema is different then others\")\n\t})\n\n\tt.Run(\"ZeroTokenNodeSchemaNotConsistent\", func(t *testing.T) {\n\t\terr := getSchemaAgreement(\n\t\t\t[]string{\"af810386-a694-11ef-81fa-3aea73156247\"},\n\t\t\tpeersRows,\n\t\t\tlogger,\n\t\t)\n\n\t\tassert.Error(t, err, \"expected error when zero-token node has different schema\")\n\t})\n\n\tt.Run(\"SchemaConsistent\", func(t *testing.T) {\n\t\tpeersRows[2].SchemaVersion = schema_version1\n\t\terr := getSchemaAgreement(\n\t\t\t[]string{\"af810386-a694-11ef-81fa-3aea73156247\"},\n\t\t\tpeersRows,\n\t\t\tlogger,\n\t\t)\n\n\t\tassert.NoError(t, err, \"expected no error when all nodes have the same schema\")\n\t})\n}\n\nfunc TestUseKeyspaceQuoteEscaping(t *testing.T) {\n\ttests := []struct {\n\t\tkeyspace string\n\t\twant     string\n\t}{\n\t\t{\"simple\", `USE \"simple\"`},\n\t\t{`my\"ks`, `USE \"my\"\"ks\"`},\n\t\t{`a\"\"b`, `USE \"a\"\"\"\"b\"`},\n\t\t{`\"`, `USE \"\"\"\"`},\n\t\t{\"\", `USE \"\"`},\n\t}\n\tfor _, tt := range tests {\n\t\tgot := useKeyspaceStmt(tt.keyspace)\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"keyspace %q: got %q, want %q\", tt.keyspace, got, tt.want)\n\t\t}\n\t}\n}\n\n// newTestConnWithFramerPool creates a minimal Conn with an initialized framer pool\n// suitable for testing releaseFramer and EWMA logic.\nfunc newTestConnWithFramerPool() *Conn {\n\tc := &Conn{}\n\tc.framers.defaults = framerConfig{\n\t\tproto: protoVersion4 & protoVersionMask,\n\t}\n\tc.framers.initPool(c)\n\treturn c\n}\n\nfunc buildTestFrame(t *testing.T, f *framer, req frameBuilder, streamID int) ([]byte, frm.FrameHeader) {\n\tt.Helper()\n\n\tif err := req.buildFrame(f, streamID); err != nil {\n\t\tt.Fatalf(\"buildFrame failed: %v\", err)\n\t}\n\n\tbuf := append([]byte(nil), f.buf...)\n\theader, err := readHeader(bytes.NewReader(buf), make([]byte, headSize))\n\tif err != nil {\n\t\tt.Fatalf(\"readHeader failed: %v\", err)\n\t}\n\n\treturn buf, header\n}\n\nfunc TestReleaseFramer(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"EWMAEquilibrium\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\n\t\t// Release framers with bufCap == avg. EWMA should not drift.\n\t\tfor i := 0; i < 20; i++ {\n\t\t\tf := c.getReadFramer()\n\t\t\t// readBuffer is defaultBufSize (128), avg starts at defaultBufSize\n\t\t\tc.releaseReadFramer(f)\n\t\t}\n\n\t\tavg := c.framers.readPool.bufAvgSize.Load()\n\t\tif avg != defaultBufSize {\n\t\t\tt.Errorf(\"EWMA should stay at defaultBufSize=%d when all buffers equal, got %d\", defaultBufSize, avg)\n\t\t}\n\t})\n\n\tt.Run(\"DelegatesFramerReleaseToConn\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\n\t\tf := c.getReadFramer()\n\t\tf.readBuffer = make([]byte, 4096)\n\n\t\tf.Release()\n\n\t\tavgAfterFirstRelease := c.framers.readPool.bufAvgSize.Load()\n\t\tif avgAfterFirstRelease <= defaultBufSize {\n\t\t\tt.Fatalf(\"framer.Release() should route through Conn.releaseFramer and update EWMA, got %d\", avgAfterFirstRelease)\n\t\t}\n\n\t\tf.Release()\n\n\t\tif avgAfterSecondRelease := c.framers.readPool.bufAvgSize.Load(); avgAfterSecondRelease != avgAfterFirstRelease {\n\t\t\tt.Fatalf(\"second framer.Release() should be a no-op: first avg=%d second avg=%d\", avgAfterFirstRelease, avgAfterSecondRelease)\n\t\t}\n\t})\n\n\tt.Run(\"EWMAConvergesUpward\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\n\t\t// Release framers with a larger buffer; EWMA should converge toward it.\n\t\tconst targetSize = 4096\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tf := c.getReadFramer()\n\t\t\tf.readBuffer = make([]byte, targetSize)\n\t\t\tc.releaseReadFramer(f)\n\t\t}\n\n\t\tavg := c.framers.readPool.bufAvgSize.Load()\n\t\t// After 100 iterations with weight=8, avg should be very close to targetSize.\n\t\t// Allow 1% tolerance.\n\t\tif avg < targetSize*99/100 || avg > targetSize*101/100 {\n\t\t\tt.Errorf(\"EWMA should converge to ~%d, got %d\", targetSize, avg)\n\t\t}\n\t})\n\n\tt.Run(\"EWMAConvergesDownward\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\n\t\t// First, push EWMA up.\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tf := c.getReadFramer()\n\t\t\tf.readBuffer = make([]byte, 4096)\n\t\t\tc.releaseReadFramer(f)\n\t\t}\n\n\t\t// Now release framers with small buffers; EWMA should converge back down.\n\t\t// Due to upward bias (+4 rounding), convergence downward is slower.\n\t\tconst smallSize = 256\n\t\tfor i := 0; i < 200; i++ {\n\t\t\tf := c.getReadFramer()\n\t\t\tf.readBuffer = make([]byte, smallSize)\n\t\t\tc.releaseReadFramer(f)\n\t\t}\n\n\t\tavg := c.framers.readPool.bufAvgSize.Load()\n\t\t// Due to the upward-biased rounding (+framerBufEWMAWeight/2), the EWMA settles\n\t\t// slightly above the actual sample value when converging downward. The steady-state\n\t\t// offset is at most framerBufEWMAWeight/2 (i.e., 4) per step which compounds to\n\t\t// roughly framerBufEWMAWeight/2 above the target. Allow generous tolerance.\n\t\tif avg < smallSize || avg > smallSize+2*framerBufEWMAWeight {\n\t\t\tt.Errorf(\"EWMA should converge toward ~%d (with upward bias), got %d\", smallSize, avg)\n\t\t}\n\t})\n\n\tt.Run(\"ShrinkOversizedBuffer\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\n\t\t// EWMA starts at defaultBufSize (128). Release a very large framer.\n\t\tf := c.getReadFramer()\n\t\tf.readBuffer = make([]byte, 100000)\n\t\torigBuf := f.readBuffer\n\t\tc.releaseReadFramer(f)\n\n\t\t// Get the framer back from the pool and check that its buffer was shrunk.\n\t\tf2 := c.getReadFramer()\n\t\tif cap(f2.readBuffer) >= cap(origBuf) {\n\t\t\tt.Errorf(\"oversized buffer should have been shrunk: original cap=%d, new cap=%d\",\n\t\t\t\tcap(origBuf), cap(f2.readBuffer))\n\t\t}\n\t\t// Shrink target should be at least defaultBufSize.\n\t\tif cap(f2.readBuffer) < defaultBufSize {\n\t\t\tt.Errorf(\"shrunk buffer should be at least defaultBufSize=%d, got cap=%d\",\n\t\t\t\tdefaultBufSize, cap(f2.readBuffer))\n\t\t}\n\t\tc.releaseReadFramer(f2)\n\t})\n\n\tt.Run(\"NoShrinkNormalBuffer\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\n\t\t// Release a few framers with identical buffers; none should be shrunk.\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tf := c.getReadFramer()\n\t\t\torigCap := cap(f.readBuffer)\n\t\t\tc.releaseReadFramer(f)\n\t\t\tf2 := c.getReadFramer()\n\t\t\tif cap(f2.readBuffer) != origCap {\n\t\t\t\tt.Errorf(\"iteration %d: normal-sized buffer should not be shrunk: orig cap=%d, new cap=%d\",\n\t\t\t\t\ti, origCap, cap(f2.readBuffer))\n\t\t\t}\n\t\t\tc.releaseReadFramer(f2)\n\t\t}\n\t})\n\n\tt.Run(\"ShrinkFloorIsDefaultBufSize\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\n\t\t// Push EWMA down to a very small value by releasing tiny buffers.\n\t\t// The shrink target should never go below defaultBufSize.\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tf := c.getReadFramer()\n\t\t\tf.readBuffer = make([]byte, 1) // Tiny buffer\n\t\t\tc.releaseReadFramer(f)\n\t\t}\n\n\t\t// Now release a moderately large buffer that triggers shrink.\n\t\tf := c.getReadFramer()\n\t\tf.readBuffer = make([]byte, 10000)\n\t\tc.releaseReadFramer(f)\n\n\t\tf2 := c.getReadFramer()\n\t\tif cap(f2.readBuffer) < defaultBufSize {\n\t\t\tt.Errorf(\"shrink target should respect defaultBufSize floor: got cap=%d, want >= %d\",\n\t\t\t\tcap(f2.readBuffer), defaultBufSize)\n\t\t}\n\t\tc.releaseReadFramer(f2)\n\t})\n\n\tt.Run(\"NilFramer\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\t\t// Should not panic.\n\t\tc.releaseReadFramer(nil)\n\t})\n\n\tt.Run(\"NoPool\", func(t *testing.T) {\n\t\tc := &Conn{} // No pool initialized.\n\t\tf := newFramer(nil, protoVersion4)\n\t\t// Should not panic, framer is just dropped.\n\t\tc.releaseReadFramer(f)\n\t})\n\n\tt.Run(\"ReadAndWritePoolsAreSeparate\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\n\t\treadFramer := c.getReadFramer()\n\t\treadFramer.readBuffer = make([]byte, 100000)\n\t\tc.releaseReadFramer(readFramer)\n\n\t\twriteFramer := c.getWriteFramer()\n\t\twriteFramer.buf = make([]byte, 0, 8192)\n\t\tc.releaseWriteFramer(writeFramer)\n\n\t\tif writeAvg := c.framers.writePool.bufAvgSize.Load(); writeAvg <= defaultBufSize {\n\t\t\tt.Fatalf(\"writer pool should track its own EWMA, got %d\", writeAvg)\n\t\t}\n\n\t\twriteFramer = c.getWriteFramer()\n\t\tif cap(writeFramer.buf) >= cap(readFramer.readBuffer) {\n\t\t\tt.Fatalf(\"writer framer should not inherit oversized reader buffer state, got writer cap=%d reader cap=%d\", cap(writeFramer.buf), cap(readFramer.readBuffer))\n\t\t}\n\t\tc.releaseWriteFramer(writeFramer)\n\t})\n\n\tt.Run(\"WriteFramerResetsCustomPayloadFlagBetweenUses\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\t\tconst streamID = 7\n\n\t\tf := c.getWriteFramer()\n\t\tpayloadReq := &writeQueryFrame{\n\t\t\tstatement: \"SELECT now() FROM system.local\",\n\t\t\tcustomPayload: map[string][]byte{\n\t\t\t\t\"k\": []byte(\"v\"),\n\t\t\t},\n\t\t}\n\t\t_, payloadHeader := buildTestFrame(t, f, payloadReq, streamID)\n\t\tif payloadHeader.Flags&frm.FlagCustomPayload == 0 {\n\t\t\tt.Fatalf(\"custom payload frame should set %v, got flags=%08b\", frm.FlagCustomPayload, payloadHeader.Flags)\n\t\t}\n\n\t\tc.releaseWriteFramer(f)\n\t\tif got, want := f.flags, c.framers.defaults.flags; got != want {\n\t\t\tt.Fatalf(\"releaseWriteFramer should restore default flags: got %08b want %08b\", got, want)\n\t\t}\n\n\t\tf = c.getWriteFramer()\n\t\tplainReq := &writeQueryFrame{statement: \"SELECT now() FROM system.local\"}\n\t\tplainBuf, plainHeader := buildTestFrame(t, f, plainReq, streamID)\n\t\tif plainHeader.Flags != c.framers.defaults.flags {\n\t\t\tt.Fatalf(\"plain query should use default flags after pooled reuse: got %08b want %08b\", plainHeader.Flags, c.framers.defaults.flags)\n\t\t}\n\n\t\tfresh := newFramer(nil, protoVersion4)\n\t\tfreshBuf, freshHeader := buildTestFrame(t, fresh, plainReq, streamID)\n\t\tif plainHeader.Flags != freshHeader.Flags {\n\t\t\tt.Fatalf(\"reused plain query flags do not match fresh framer: got %08b want %08b\", plainHeader.Flags, freshHeader.Flags)\n\t\t}\n\t\tif !bytes.Equal(plainBuf, freshBuf) {\n\t\t\tt.Fatal(\"reused plain query frame does not match fresh framer output\")\n\t\t}\n\n\t\tc.releaseWriteFramer(f)\n\t})\n\n\tt.Run(\"WriteFramerResetsTracingFlagBetweenUses\", func(t *testing.T) {\n\t\tc := newTestConnWithFramerPool()\n\t\tconst streamID = 9\n\n\t\tf := c.getWriteFramer()\n\t\tf.trace()\n\t\ttracedReq := &writeQueryFrame{statement: \"SELECT now() FROM system.local\"}\n\t\t_, tracedHeader := buildTestFrame(t, f, tracedReq, streamID)\n\t\tif tracedHeader.Flags&frm.FlagTracing == 0 {\n\t\t\tt.Fatalf(\"traced query should set %v, got flags=%08b\", frm.FlagTracing, tracedHeader.Flags)\n\t\t}\n\n\t\tc.releaseWriteFramer(f)\n\t\tif got, want := f.flags, c.framers.defaults.flags; got != want {\n\t\t\tt.Fatalf(\"releaseWriteFramer should restore default flags: got %08b want %08b\", got, want)\n\t\t}\n\n\t\tf = c.getWriteFramer()\n\t\t_, plainHeader := buildTestFrame(t, f, tracedReq, streamID)\n\t\tif plainHeader.Flags&frm.FlagTracing != 0 {\n\t\t\tt.Fatalf(\"plain query should not inherit tracing flag after pooled reuse: got %08b\", plainHeader.Flags)\n\t\t}\n\n\t\tc.releaseWriteFramer(f)\n\t})\n}\n"
  },
  {
    "path": "connectionpool.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/internal/debug\"\n\t\"github.com/gocql/gocql/tablets\"\n\n\t\"github.com/gocql/gocql/debounce\"\n)\n\n// interface to implement to receive the host information\ntype SetHosts interface {\n\tSetHosts(hosts []*HostInfo)\n}\n\n// interface to implement to receive the partitioner value\ntype SetPartitioner interface {\n\tSetPartitioner(partitioner string)\n}\n\n// interface to implement to receive the tablets value\ntype SetTablets interface {\n\tSetTablets(tablets tablets.TabletInfoList)\n}\n\ntype policyConnPool struct {\n\tsession       *Session\n\thostConnPools map[string]*hostConnPool\n\tkeyspace      string\n\tport          int\n\tnumConns      int\n\tmu            sync.RWMutex\n}\n\nfunc connConfig(cfg *ClusterConfig) (*ConnConfig, error) {\n\thostDialer := cfg.HostDialer\n\n\tif hostDialer == nil {\n\t\tdialer := cfg.Dialer\n\t\tif dialer == nil {\n\t\t\td := net.Dialer{\n\t\t\t\tTimeout: cfg.ConnectTimeout,\n\t\t\t}\n\t\t\tif cfg.SocketKeepalive > 0 {\n\t\t\t\td.KeepAlive = cfg.SocketKeepalive\n\t\t\t}\n\t\t\tdialer = &ScyllaShardAwareDialer{Dialer: d}\n\t\t}\n\n\t\thostDialer = &scyllaDialer{\n\t\t\tdialer:    dialer,\n\t\t\tlogger:    cfg.logger(),\n\t\t\ttlsConfig: cfg.getActualTLSConfig(),\n\t\t\tcfg:       cfg,\n\t\t}\n\t}\n\n\treturn &ConnConfig{\n\t\tProtoVersion:   cfg.ProtoVersion,\n\t\tCQLVersion:     cfg.CQLVersion,\n\t\tWriteTimeout:   cfg.WriteTimeout,\n\t\tReadTimeout:    cfg.ReadTimeout,\n\t\tConnectTimeout: cfg.ConnectTimeout,\n\t\tDialer:         cfg.Dialer,\n\t\tHostDialer:     hostDialer,\n\t\tCompressor:     cfg.Compressor,\n\t\tAuthenticator:  cfg.Authenticator,\n\t\tAuthProvider:   cfg.AuthProvider,\n\t\tKeepalive:      cfg.SocketKeepalive,\n\t\tLogger:         cfg.logger(),\n\t\ttlsConfig:      cfg.getActualTLSConfig(),\n\t}, nil\n}\n\nfunc newPolicyConnPool(session *Session) *policyConnPool {\n\t// create the pool\n\tpool := &policyConnPool{\n\t\tsession:       session,\n\t\tport:          session.cfg.Port,\n\t\tnumConns:      session.cfg.NumConns,\n\t\tkeyspace:      session.cfg.Keyspace,\n\t\thostConnPools: map[string]*hostConnPool{},\n\t}\n\n\treturn pool\n}\n\nfunc (p *policyConnPool) SetHosts(hosts []*HostInfo) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\ttoRemove := make(map[string]struct{})\n\tfor hostID := range p.hostConnPools {\n\t\ttoRemove[hostID] = struct{}{}\n\t}\n\n\tpools := make(chan *hostConnPool)\n\tcreateCount := 0\n\tfor _, host := range hosts {\n\t\tif !host.IsUp() {\n\t\t\t// don't create a connection pool for a down host\n\t\t\tcontinue\n\t\t}\n\t\thostID := host.HostID()\n\t\tif _, exists := p.hostConnPools[hostID]; exists {\n\t\t\t// still have this host, so don't remove it\n\t\t\tdelete(toRemove, hostID)\n\t\t\tcontinue\n\t\t}\n\n\t\tcreateCount++\n\t\tgo func(host *HostInfo) {\n\t\t\t// create a connection pool for the host\n\t\t\tpools <- newHostConnPool(\n\t\t\t\tp.session,\n\t\t\t\thost,\n\t\t\t\tp.numConns,\n\t\t\t\tp.keyspace,\n\t\t\t)\n\t\t}(host)\n\t}\n\n\t// add created pools\n\tfor createCount > 0 {\n\t\tpool := <-pools\n\t\tcreateCount--\n\t\tif pool.Size() > 0 {\n\t\t\t// add pool only if there a connections available\n\t\t\tp.hostConnPools[pool.host.HostID()] = pool\n\t\t}\n\t}\n\n\tfor addr := range toRemove {\n\t\tpool := p.hostConnPools[addr]\n\t\tdelete(p.hostConnPools, addr)\n\t\tgo pool.Close()\n\t}\n}\n\nfunc (p *policyConnPool) InFlight() int {\n\tp.mu.RLock()\n\tcount := 0\n\tfor _, pool := range p.hostConnPools {\n\t\tcount += pool.InFlight()\n\t}\n\tp.mu.RUnlock()\n\n\treturn count\n}\n\nfunc (p *policyConnPool) Size() int {\n\tp.mu.RLock()\n\tcount := 0\n\tfor _, pool := range p.hostConnPools {\n\t\tcount += pool.Size()\n\t}\n\tp.mu.RUnlock()\n\n\treturn count\n}\n\nfunc (p *policyConnPool) getPool(host *HostInfo) (pool *hostConnPool, ok bool) {\n\thostID := host.HostID()\n\tp.mu.RLock()\n\tpool, ok = p.hostConnPools[hostID]\n\tp.mu.RUnlock()\n\treturn\n}\n\nfunc (p *policyConnPool) getPoolByHostID(hostID string) (pool *hostConnPool, ok bool) {\n\tp.mu.RLock()\n\tpool, ok = p.hostConnPools[hostID]\n\tp.mu.RUnlock()\n\treturn\n}\n\nfunc (p *policyConnPool) iteratePool(iter func(info HostPoolInfo) bool) {\n\tp.mu.RLock()\n\tfor _, pool := range p.hostConnPools {\n\t\tif !iter(pool) {\n\t\t\tbreak\n\t\t}\n\t}\n\tp.mu.RUnlock()\n}\n\nfunc (p *policyConnPool) Close() {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\t// close the pools\n\tfor addr, pool := range p.hostConnPools {\n\t\tdelete(p.hostConnPools, addr)\n\t\tpool.Close()\n\t}\n}\n\nfunc (p *policyConnPool) addHost(host *HostInfo) {\n\thostID := host.HostID()\n\tp.mu.Lock()\n\tpool, ok := p.hostConnPools[hostID]\n\tif !ok {\n\t\tpool = newHostConnPool(\n\t\t\tp.session,\n\t\t\thost,\n\t\t\tp.numConns,\n\t\t\tp.keyspace,\n\t\t)\n\n\t\tp.hostConnPools[hostID] = pool\n\t}\n\tp.mu.Unlock()\n\n\tpool.fill_debounce()\n}\n\nfunc (p *policyConnPool) removeHost(hostID string) {\n\tp.mu.Lock()\n\tpool, ok := p.hostConnPools[hostID]\n\tif !ok {\n\t\tp.mu.Unlock()\n\t\treturn\n\t}\n\n\tdelete(p.hostConnPools, hostID)\n\tp.mu.Unlock()\n\n\tgo pool.Close()\n}\n\n// hostConnPool is a connection pool for a single host.\n// Connection selection is based on a provided ConnSelectionPolicy\ntype hostConnPool struct {\n\tconnPicker ConnPicker\n\tlogger     StdLogger\n\tsession    *Session\n\thost       *HostInfo\n\tdebouncer  *debounce.SimpleDebouncer\n\tkeyspace   string\n\tsize       int\n\t// protection for connPicker, closed, filling\n\tmu      sync.RWMutex\n\tclosed  bool\n\tfilling bool\n}\n\nfunc (pool *hostConnPool) String() string {\n\tpool.mu.RLock()\n\tdefer pool.mu.RUnlock()\n\tsize, _ := pool.connPicker.Size()\n\treturn fmt.Sprintf(\"[filling=%v closed=%v conns=%v size=%v host=%v]\",\n\t\tpool.filling, pool.closed, size, pool.size, pool.host)\n}\n\nfunc newHostConnPool(session *Session, host *HostInfo, size int, keyspace string) *hostConnPool {\n\tpool := &hostConnPool{\n\t\tsession:    session,\n\t\thost:       host,\n\t\tsize:       size,\n\t\tkeyspace:   keyspace,\n\t\tconnPicker: nopConnPicker{},\n\t\tfilling:    false,\n\t\tclosed:     false,\n\t\tlogger:     session.logger,\n\t\tdebouncer:  debounce.NewSimpleDebouncer(),\n\t}\n\n\t// the pool is not filled or connected\n\treturn pool\n}\n\n// Pick a connection from this connection pool for the given query.\nfunc (pool *hostConnPool) Pick(token Token, qry ExecutableQuery) *Conn {\n\tpool.mu.RLock()\n\tdefer pool.mu.RUnlock()\n\n\tif pool.closed {\n\t\treturn nil\n\t}\n\n\tsize, missing := pool.connPicker.Size()\n\tif missing > 0 {\n\t\t// try to fill the pool\n\t\tgo pool.fill_debounce()\n\n\t\tif size == 0 {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn pool.connPicker.Pick(token, qry)\n}\n\n// Size returns the number of connections currently active in the pool\nfunc (pool *hostConnPool) Size() int {\n\tpool.mu.RLock()\n\tdefer pool.mu.RUnlock()\n\n\tsize, _ := pool.connPicker.Size()\n\treturn size\n}\n\n// Size returns the number of connections currently active in the pool\nfunc (pool *hostConnPool) InFlight() int {\n\tpool.mu.RLock()\n\tdefer pool.mu.RUnlock()\n\n\tsize := pool.connPicker.InFlight()\n\treturn size\n}\n\n// Close the connection pool\nfunc (pool *hostConnPool) Close() {\n\tpool.mu.Lock()\n\tif pool.closed {\n\t\tpool.mu.Unlock()\n\t\treturn\n\t}\n\tpool.closed = true\n\tpool.mu.Unlock()\n\n\tpool.connPicker.Close()\n}\n\n// Fill the connection pool\nfunc (pool *hostConnPool) fill() {\n\tpool.mu.RLock()\n\t// avoid filling a closed pool, or concurrent filling\n\tif pool.closed || pool.filling {\n\t\tpool.mu.RUnlock()\n\t\treturn\n\t}\n\n\t// determine the filling work to be done\n\tstartCount, fillCount := pool.connPicker.Size()\n\n\t// avoid filling a full (or overfull) pool\n\tif fillCount <= 0 {\n\t\tpool.mu.RUnlock()\n\t\treturn\n\t}\n\n\t// switch from read to write lock\n\tpool.mu.RUnlock()\n\tpool.mu.Lock()\n\n\tstartCount, fillCount = pool.connPicker.Size()\n\tif pool.closed || pool.filling || fillCount <= 0 {\n\t\t// looks like another goroutine already beat this\n\t\t// goroutine to the filling\n\t\tpool.mu.Unlock()\n\t\treturn\n\t}\n\n\t// ok fill the pool\n\tpool.filling = true\n\n\t// allow others to access the pool while filling\n\tpool.mu.Unlock()\n\t// only this goroutine should make calls to fill/empty the pool at this\n\t// point until after this routine or its subordinates calls\n\t// fillingStopped\n\n\t// fill only the first connection synchronously\n\tif startCount == 0 {\n\t\terr := pool.connect()\n\t\tpool.logConnectErr(err)\n\n\t\tif err != nil {\n\t\t\t// probably unreachable host\n\t\t\tpool.fillingStopped(err)\n\t\t\treturn\n\t\t}\n\t\t// notify the session that this node is connected\n\t\tgo pool.session.handleNodeConnected(pool.host)\n\n\t\t// filled one, let's reload it to see if it has changed\n\t\tpool.mu.RLock()\n\t\t_, fillCount = pool.connPicker.Size()\n\t\tpool.mu.RUnlock()\n\t}\n\n\t// fill the rest of the pool asynchronously\n\tgo func() {\n\t\terr := pool.connectMany(fillCount)\n\n\t\t// mark the end of filling\n\t\tpool.fillingStopped(err)\n\n\t\tif err == nil && startCount > 0 {\n\t\t\t// notify the session that this node is connected again\n\t\t\tgo pool.session.handleNodeConnected(pool.host)\n\t\t}\n\t}()\n}\n\nfunc (pool *hostConnPool) fill_debounce() {\n\tpool.debouncer.Debounce(pool.fill)\n}\n\nfunc (pool *hostConnPool) logConnectErr(err error) {\n\tif opErr, ok := err.(*net.OpError); ok && (opErr.Op == \"dial\" || opErr.Op == \"read\") {\n\t\t// connection refused\n\t\t// these are typical during a node outage so avoid log spam.\n\t\tif debug.Enabled {\n\t\t\tpool.logger.Printf(\"unable to dial %q: %v\\n\", pool.host, err)\n\t\t}\n\t} else if err != nil {\n\t\t// unexpected error\n\t\tpool.logger.Printf(\"error: failed to connect to %q due to error: %v\", pool.host, err)\n\t}\n}\n\n// transition back to a not-filling state.\nfunc (pool *hostConnPool) fillingStopped(err error) {\n\tif err != nil {\n\t\tif debug.Enabled {\n\t\t\tpool.logger.Printf(\"gocql: filling stopped %q: %v\\n\", pool.host.ConnectAddress(), err)\n\t\t}\n\t\t// wait for some time to avoid back-to-back filling\n\t\t// this provides some time between failed attempts\n\t\t// to fill the pool for the host to recover\n\t\ttime.Sleep(time.Duration(rand.Int31n(100)+31) * time.Millisecond)\n\t}\n\n\tpool.mu.Lock()\n\tpool.filling = false\n\tcount, _ := pool.connPicker.Size()\n\thost := pool.host\n\tport := pool.host.Port()\n\tpool.mu.Unlock()\n\n\t// if we errored and the size is now zero, make sure the host is marked as down\n\t// see https://github.com/apache/cassandra-gocql-driver/issues/1614\n\tif debug.Enabled {\n\t\tpool.logger.Printf(\"gocql: conns of pool after stopped %q: %v\\n\", host.ConnectAddress(), count)\n\t}\n\tif err != nil && count == 0 {\n\t\tif pool.session.cfg.ConvictionPolicy.AddFailure(err, host) {\n\t\t\tpool.session.handleNodeDown(host.ConnectAddress(), port)\n\t\t}\n\t}\n}\n\n// connectMany creates new connections concurrent.\nfunc (pool *hostConnPool) connectMany(count int) error {\n\tif count == 0 {\n\t\treturn nil\n\t}\n\tvar (\n\t\twg         sync.WaitGroup\n\t\tmu         sync.Mutex\n\t\tconnectErr error\n\t)\n\twg.Add(count)\n\tfor i := 0; i < count; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := pool.connect()\n\t\t\tpool.logConnectErr(err)\n\t\t\tif err != nil {\n\t\t\t\tmu.Lock()\n\t\t\t\tconnectErr = err\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t}()\n\t}\n\t// wait for all connections are done\n\twg.Wait()\n\n\treturn connectErr\n}\n\n// create a new connection to the host and add it to the pool\nfunc (pool *hostConnPool) connect() (err error) {\n\tpool.mu.Lock()\n\tshardID, nrShards := pool.connPicker.NextShard()\n\tpool.mu.Unlock()\n\n\t// TODO: provide a more robust connection retry mechanism, we should also\n\t// be able to detect hosts that come up by trying to connect to downed ones.\n\t// try to connect\n\tvar conn *Conn\n\treconnectionPolicy := pool.session.cfg.ReconnectionPolicy\n\tfor i := 0; i < reconnectionPolicy.GetMaxRetries(); i++ {\n\t\tconn, err = pool.session.connectShard(pool.session.ctx, pool.host, pool, shardID, nrShards)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tif opErr, isOpErr := err.(*net.OpError); isOpErr {\n\t\t\t// if the error is not a temporary error (ex: network unreachable) don't\n\t\t\t//  retry\n\t\t\tif !opErr.Temporary() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif debug.Enabled {\n\t\t\tpool.logger.Printf(\"gocql: connection failed %q: %v, reconnecting with %T\\n\",\n\t\t\t\tpool.host.ConnectAddress(), err, reconnectionPolicy)\n\t\t}\n\t\ttime.Sleep(reconnectionPolicy.GetInterval(i))\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif pool.keyspace != \"\" {\n\t\t// set the keyspace\n\t\tif err = conn.UseKeyspace(pool.keyspace); err != nil {\n\t\t\tconn.Close()\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// add the Conn to the pool\n\tpool.mu.Lock()\n\tif pool.closed {\n\t\tpool.mu.Unlock()\n\t\tconn.Close()\n\t\treturn nil\n\t}\n\n\t// lazily initialize the connPicker when we know the required type\n\tpool.initConnPicker(conn)\n\tif err := pool.connPicker.Put(conn); err != nil {\n\t\tpool.mu.Unlock()\n\t\tconn.Close()\n\t\tif debug.Enabled {\n\t\t\tpool.logger.Printf(\"gocql: pool connection was not added to the pool: %w\", err)\n\t\t}\n\t\treturn nil\n\t}\n\tpool.mu.Unlock()\n\tconn.finalizeConnection()\n\n\treturn nil\n}\n\nfunc (pool *hostConnPool) initConnPicker(conn *Conn) {\n\tif _, ok := pool.connPicker.(nopConnPicker); !ok {\n\t\treturn\n\t}\n\n\tif conn.isScyllaConn() {\n\t\tpool.connPicker = newScyllaConnPicker(conn, pool.logger)\n\t\treturn\n\t}\n\n\tpool.connPicker = newDefaultConnPicker(pool.size)\n}\n\n// handle any error from a Conn\nfunc (pool *hostConnPool) HandleError(conn *Conn, err error, closed bool) {\n\tif !closed {\n\t\t// still an open connection, so continue using it\n\t\treturn\n\t}\n\n\t// TODO: track the number of errors per host and detect when a host is dead,\n\t// then also have something which can detect when a host comes back.\n\tpool.mu.Lock()\n\tdefer pool.mu.Unlock()\n\n\tif pool.closed {\n\t\t// pool closed\n\t\treturn\n\t}\n\n\tif debug.Enabled {\n\t\tpool.logger.Printf(\"gocql: pool connection error %q: %v\\n\", conn.addr, err)\n\t}\n\n\tpool.connPicker.Remove(conn)\n\tgo pool.fill_debounce()\n}\n\nfunc (pool *hostConnPool) GetConnectionCount() int {\n\tpool.mu.Lock()\n\tdefer pool.mu.Unlock()\n\treturn pool.connPicker.GetConnectionCount()\n}\n\nfunc (pool *hostConnPool) GetExcessConnectionCount() int {\n\tpool.mu.Lock()\n\tdefer pool.mu.Unlock()\n\treturn pool.connPicker.GetExcessConnectionCount()\n}\n\nfunc (pool *hostConnPool) GetShardCount() int {\n\tpool.mu.Lock()\n\tdefer pool.mu.Unlock()\n\treturn pool.connPicker.GetShardCount()\n}\n\nfunc (pool *hostConnPool) Host() HostInformation {\n\treturn pool.host\n}\n\nfunc (pool *hostConnPool) IsClosed() bool {\n\tpool.mu.Lock()\n\tdefer pool.mu.Unlock()\n\treturn pool.closed\n}\n"
  },
  {
    "path": "connectionpool_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/debounce\"\n)\n\nfunc TestSetupTLSConfig(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname                       string\n\t\topts                       *SslOptions\n\t\texpectedInsecureSkipVerify bool\n\t}{\n\t\t{\n\t\t\tname: \"Config nil, EnableHostVerification false\",\n\t\t\topts: &SslOptions{\n\t\t\t\tEnableHostVerification: false,\n\t\t\t},\n\t\t\texpectedInsecureSkipVerify: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Config nil, EnableHostVerification true\",\n\t\t\topts: &SslOptions{\n\t\t\t\tEnableHostVerification: true,\n\t\t\t},\n\t\t\texpectedInsecureSkipVerify: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Config.InsecureSkipVerify false, EnableHostVerification false\",\n\t\t\topts: &SslOptions{\n\t\t\t\tEnableHostVerification: false,\n\t\t\t\tConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedInsecureSkipVerify: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Config.InsecureSkipVerify true, EnableHostVerification false\",\n\t\t\topts: &SslOptions{\n\t\t\t\tEnableHostVerification: false,\n\t\t\t\tConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedInsecureSkipVerify: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Config.InsecureSkipVerify false, EnableHostVerification true\",\n\t\t\topts: &SslOptions{\n\t\t\t\tEnableHostVerification: true,\n\t\t\t\tConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedInsecureSkipVerify: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Config.InsecureSkipVerify true, EnableHostVerification true\",\n\t\t\topts: &SslOptions{\n\t\t\t\tEnableHostVerification: true,\n\t\t\t\tConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedInsecureSkipVerify: false,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\ttlsConfig, err := setupTLSConfig(test.opts)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error %q\", err.Error())\n\t\t\t}\n\t\t\tif tlsConfig.InsecureSkipVerify != test.expectedInsecureSkipVerify {\n\t\t\t\tt.Fatalf(\"got %v, but expected %v\", tlsConfig.InsecureSkipVerify,\n\t\t\t\t\ttest.expectedInsecureSkipVerify)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// errorConn is a mock net.Conn whose Close returns an error,\n// triggering HandleError via Conn.closeWithError.\ntype errorConn struct {\n\tnet.Conn\n}\n\nfunc (e errorConn) Close() error {\n\treturn errors.New(\"mock close error\")\n}\n\n// TestHostConnPoolCloseDeadlock verifies that hostConnPool.Close() does not\n// self-deadlock when defaultConnPicker closes connections that trigger\n// HandleError callbacks.\n//\n// Deadlock chain (before fix):\n//\n//\tClose() -> connPicker.Close() -> conn.Close() -> HandleError() -> pool.mu.Lock() (DEADLOCK)\nfunc TestHostConnPoolCloseDeadlock(t *testing.T) {\n\tt.Parallel()\n\n\thost := &HostInfo{connectAddress: net.ParseIP(\"127.0.0.1\"), port: 9042}\n\tsession := &Session{\n\t\tcfg: ClusterConfig{\n\t\t\tNumConns:         2,\n\t\t\tConvictionPolicy: &SimpleConvictionPolicy{},\n\t\t},\n\t\tlogger: nopLogger{},\n\t}\n\n\tpool := &hostConnPool{\n\t\tsession:    session,\n\t\thost:       host,\n\t\tsize:       2,\n\t\tkeyspace:   \"test\",\n\t\tconnPicker: nopConnPicker{},\n\t\tlogger:     nopLogger{},\n\t\tdebouncer:  debounce.NewSimpleDebouncer(),\n\t}\n\n\t// Build a defaultConnPicker with Conns that trigger HandleError on Close.\n\tpicker := newDefaultConnPicker(2)\n\tfor i := 0; i < 2; i++ {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tconn := &Conn{\n\t\t\tconn:         errorConn{},\n\t\t\terrorHandler: pool,\n\t\t\tcancel:       cancel,\n\t\t\tctx:          ctx,\n\t\t\tlogger:       nopLogger{},\n\t\t}\n\t\t_ = picker.Put(conn)\n\t}\n\tpool.connPicker = picker\n\n\t// Close must return before timeout; otherwise the pool is deadlocked.\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tpool.Close()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\t// Success — Close returned without deadlocking.\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"hostConnPool.Close() deadlocked: timed out after 5 seconds\")\n\t}\n}\n\n// TestHostConnPoolConnectClosedPoolDoesNotDeadlock verifies that connect's\n// already-closed-pool path does not close a connection while holding pool.mu.\nfunc TestHostConnPoolConnectClosedPoolDoesNotDeadlock(t *testing.T) {\n\tt.Parallel()\n\n\thost := &HostInfo{connectAddress: net.ParseIP(\"127.0.0.1\"), port: 9042}\n\tsession := &Session{\n\t\tcfg: ClusterConfig{\n\t\t\tNumConns:         1,\n\t\t\tConvictionPolicy: &SimpleConvictionPolicy{},\n\t\t},\n\t\tlogger: nopLogger{},\n\t}\n\n\tpool := &hostConnPool{\n\t\tsession:    session,\n\t\thost:       host,\n\t\tsize:       1,\n\t\tkeyspace:   \"test\",\n\t\tconnPicker: nopConnPicker{},\n\t\tlogger:     nopLogger{},\n\t\tdebouncer:  debounce.NewSimpleDebouncer(),\n\t\tclosed:     true,\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tconn := &Conn{\n\t\tconn:         errorConn{},\n\t\terrorHandler: pool,\n\t\tcancel:       cancel,\n\t\tctx:          ctx,\n\t\tlogger:       nopLogger{},\n\t}\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tpool.mu.Lock()\n\t\tif pool.closed {\n\t\t\tpool.mu.Unlock()\n\t\t\tconn.Close()\n\t\t}\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"closed-pool connect cleanup deadlocked: timed out after 5 seconds\")\n\t}\n}\n"
  },
  {
    "path": "connpicker.go",
    "content": "package gocql\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"sync/atomic\"\n)\n\ntype ConnPicker interface {\n\tPick(Token, ExecutableQuery) *Conn\n\tPut(*Conn) error\n\tRemove(conn *Conn)\n\tInFlight() int\n\tSize() (int, int)\n\tClose()\n\n\t// NextShard returns the shardID to connect to.\n\t// nrShard specifies how many shards the host has.\n\t// If nrShards is zero, the caller shouldn't use shard-aware port.\n\tNextShard() (shardID, nrShards int)\n\n\tGetConnectionCount() int\n\tGetExcessConnectionCount() int\n\tGetShardCount() int\n}\n\ntype defaultConnPicker struct {\n\tconns []*Conn\n\tpos   uint32\n\tsize  int\n\tmu    sync.RWMutex\n}\n\nfunc (p *defaultConnPicker) GetConnectionCount() int {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\treturn len(p.conns)\n}\n\nfunc (p *defaultConnPicker) GetExcessConnectionCount() int {\n\treturn 0\n}\n\nfunc (p *defaultConnPicker) GetShardCount() int {\n\t// It is not supposed to be used for scylla nodes and therefore does not know anything about shards count\n\treturn 0\n}\n\nfunc newDefaultConnPicker(size int) *defaultConnPicker {\n\tif size <= 0 {\n\t\tpanic(fmt.Sprintf(\"invalid pool size %d\", size))\n\t}\n\treturn &defaultConnPicker{\n\t\tsize: size,\n\t}\n}\n\nfunc (p *defaultConnPicker) Remove(conn *Conn) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tfor i, candidate := range p.conns {\n\t\tif candidate == conn {\n\t\t\tlast := len(p.conns) - 1\n\t\t\tp.conns[i], p.conns = p.conns[last], p.conns[:last]\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (p *defaultConnPicker) Close() {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tconns := p.conns\n\tp.conns = nil\n\tfor _, conn := range conns {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}\n}\n\nfunc (p *defaultConnPicker) InFlight() int {\n\tp.mu.RLock()\n\tsize := len(p.conns)\n\tp.mu.RUnlock()\n\treturn size\n}\n\nfunc (p *defaultConnPicker) Size() (int, int) {\n\tp.mu.RLock()\n\tsize := len(p.conns)\n\tp.mu.RUnlock()\n\treturn size, p.size - size\n}\n\nfunc (p *defaultConnPicker) Pick(Token, ExecutableQuery) *Conn {\n\tpos := int(atomic.AddUint32(&p.pos, 1) - 1)\n\n\tp.mu.RLock()\n\tsize := len(p.conns)\n\n\tvar (\n\t\tleastBusyConn    *Conn\n\t\tstreamsAvailable int\n\t)\n\n\t// find the conn which has the most available streams, this is racy\n\tfor i := 0; i < size; i++ {\n\t\tconn := p.conns[(pos+i)%size]\n\t\tif conn == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif streams := conn.AvailableStreams(); streams > streamsAvailable {\n\t\t\tleastBusyConn = conn\n\t\t\tstreamsAvailable = streams\n\t\t}\n\t}\n\tp.mu.RUnlock()\n\n\treturn leastBusyConn\n}\n\nfunc (p *defaultConnPicker) Put(conn *Conn) error {\n\tp.mu.Lock()\n\tp.conns = append(p.conns, conn)\n\tp.mu.Unlock()\n\treturn nil\n}\n\nfunc (*defaultConnPicker) NextShard() (shardID, nrShards int) {\n\treturn 0, 0\n}\n\n// nopConnPicker is a no-operation implementation of ConnPicker, it's used when\n// hostConnPool is created to allow deferring creation of the actual ConnPicker\n// to the point where we have first connection.\ntype nopConnPicker struct{}\n\nfunc (p nopConnPicker) GetConnectionCount() int {\n\treturn 0\n}\n\nfunc (p nopConnPicker) GetExcessConnectionCount() int {\n\treturn 0\n}\n\nfunc (p nopConnPicker) GetShardCount() int {\n\treturn 0\n}\n\nfunc (nopConnPicker) Pick(Token, ExecutableQuery) *Conn {\n\treturn nil\n}\n\nfunc (nopConnPicker) Put(*Conn) error {\n\treturn nil\n}\n\nfunc (nopConnPicker) Remove(conn *Conn) {\n}\n\nfunc (nopConnPicker) InFlight() int {\n\treturn 0\n}\n\nfunc (nopConnPicker) Size() (int, int) {\n\t// Return 1 to make hostConnPool to try to establish a connection.\n\t// When first connection is established hostConnPool replaces nopConnPicker\n\t// with a different ConnPicker implementation.\n\treturn 0, 1\n}\n\nfunc (nopConnPicker) Close() {\n}\n\nfunc (nopConnPicker) NextShard() (shardID, nrShards int) {\n\treturn 0, 0\n}\n"
  },
  {
    "path": "control.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"context\"\n\tcrand \"crypto/rand\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"net\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/events\"\n\t\"github.com/gocql/gocql/internal/debug\"\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\nvar (\n\trandr    *rand.Rand\n\tmutRandr sync.Mutex\n)\n\nfunc init() {\n\tb := make([]byte, 4)\n\tif _, err := crand.Read(b); err != nil {\n\t\tpanic(fmt.Sprintf(\"unable to seed random number generator: %v\", err))\n\t}\n\n\trandr = rand.New(rand.NewSource(int64(readInt(b))))\n}\n\nconst (\n\tcontrolConnStarting = 0\n\tcontrolConnStarted  = 1\n\tcontrolConnClosing  = -1\n)\n\ntype controlConnection interface {\n\tgetConn() *connHost\n\tawaitSchemaAgreement() error\n\tquery(statement string, values ...any) (iter *Iter)\n\tquerySystem(statement string, values ...any) (iter *Iter)\n\tdiscoverProtocol(hosts []*HostInfo) (int, error)\n\tconnect(hosts []*HostInfo) error\n\tclose()\n\tgetSession() *Session\n\treconnect() error\n}\n\n// Ensure that the atomic variable is aligned to a 64bit boundary\n// so that atomic operations can be applied on 32bit architectures.\ntype controlConn struct {\n\tconn         atomic.Value\n\tretry        RetryPolicy\n\tsession      *Session\n\tquit         chan struct{}\n\tstate        int32\n\treconnecting int32\n}\n\nfunc (c *controlConn) getSession() *Session {\n\treturn c.session\n}\n\nfunc createControlConn(session *Session) *controlConn {\n\n\tcontrol := &controlConn{\n\t\tsession: session,\n\t\tquit:    make(chan struct{}),\n\t\tretry:   &SimpleRetryPolicy{NumRetries: 3},\n\t}\n\n\tcontrol.conn.Store((*connHost)(nil))\n\n\treturn control\n}\n\nfunc (c *controlConn) heartBeat() {\n\tif !atomic.CompareAndSwapInt32(&c.state, controlConnStarting, controlConnStarted) {\n\t\treturn\n\t}\n\n\tsleepTime := 1 * time.Second\n\ttimer := time.NewTimer(sleepTime)\n\tdefer timer.Stop()\n\n\tfor {\n\t\ttimer.Reset(sleepTime)\n\n\t\tselect {\n\t\tcase <-c.quit:\n\t\t\treturn\n\t\tcase <-timer.C:\n\t\t}\n\n\t\tresp, err := c.writeFrame(&writeOptionsFrame{})\n\t\tif err != nil {\n\t\t\tgoto reconn\n\t\t}\n\n\t\tswitch resp.(type) {\n\t\tcase *frm.SupportedFrame:\n\t\t\t// Everything ok\n\t\t\tsleepTime = 30 * time.Second\n\t\t\tcontinue\n\t\tcase error:\n\t\t\tgoto reconn\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"gocql: unknown frame in response to options: %T\", resp))\n\t\t}\n\n\treconn:\n\t\t// try to connect a bit faster\n\t\tsleepTime = 1 * time.Second\n\t\tc.reconnect()\n\t\tcontinue\n\t}\n}\n\nfunc resolveInitialEndpoint(resolver DNSResolver, addr string, defaultPort int) ([]*HostInfo, error) {\n\tvar port int\n\thost, portStr, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\thost = addr\n\t\tport = defaultPort\n\t} else {\n\t\tport, err = strconv.Atoi(portStr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Check if host is a literal IP address\n\tif ip := net.ParseIP(host); ip != nil {\n\t\tif validIpAddr(ip) {\n\t\t\thb := HostInfoBuilder{\n\t\t\t\t// Fake hosts for initial endpoints do not need HostID\n\t\t\t\tHostname:       host,\n\t\t\t\tConnectAddress: ip,\n\t\t\t\tPort:           port,\n\t\t\t}\n\t\t\thh := hb.Build()\n\t\t\treturn []*HostInfo{&hh}, nil\n\t\t}\n\t}\n\n\t// Look up host in DNS\n\tips, err := resolver.LookupIP(host)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(ips) == 0 {\n\t\treturn nil, fmt.Errorf(\"no IP's returned from DNS lookup for %q\", addr)\n\t}\n\n\tvar hosts []*HostInfo\n\tfor _, ip := range ips {\n\t\tif validIpAddr(ip) {\n\t\t\thb := HostInfoBuilder{\n\t\t\t\t// Fake hosts for initial endpoints do not need HostID\n\t\t\t\tHostname:       host,\n\t\t\t\tConnectAddress: ip,\n\t\t\t\tPort:           port,\n\t\t\t}\n\t\t\thh := hb.Build()\n\t\t\thosts = append(hosts, &hh)\n\t\t}\n\t}\n\n\tif len(hosts) == 0 {\n\t\treturn nil, fmt.Errorf(\"no IP's founded for %q\", addr)\n\t}\n\treturn hosts, nil\n}\n\nfunc shuffleHosts(hosts []*HostInfo) []*HostInfo {\n\tshuffled := make([]*HostInfo, len(hosts))\n\tcopy(shuffled, hosts)\n\n\tmutRandr.Lock()\n\trandr.Shuffle(len(hosts), func(i, j int) {\n\t\tshuffled[i], shuffled[j] = shuffled[j], shuffled[i]\n\t})\n\tmutRandr.Unlock()\n\n\treturn shuffled\n}\n\n// this is going to be version dependant and a nightmare to maintain :(\nvar protocolSupportRe = regexp.MustCompile(`the lowest supported version is \\d+ and the greatest is (\\d+)$`)\n\nfunc parseProtocolFromError(err error) int {\n\t// I really wish this had the actual info in the error frame...\n\tmatches := protocolSupportRe.FindAllStringSubmatch(err.Error(), -1)\n\tif len(matches) != 1 || len(matches[0]) != 2 {\n\t\tif verr, ok := err.(*protocolError); ok {\n\t\t\treturn int(verr.frame.Header().Version.Version())\n\t\t}\n\t\treturn 0\n\t}\n\n\tmax, err := strconv.Atoi(matches[0][1])\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\treturn max\n}\n\nfunc (c *controlConn) discoverProtocol(hosts []*HostInfo) (int, error) {\n\thosts = shuffleHosts(hosts)\n\n\tconnCfg := *c.session.connCfg\n\tconnCfg.ProtoVersion = protoVersion4 // TODO: define maxProtocol\n\n\thandler := connErrorHandlerFn(func(c *Conn, err error, closed bool) {\n\t\t// we should never get here, but if we do it means we connected to a\n\t\t// host successfully which means our attempted protocol version worked\n\t\tif !closed {\n\t\t\tc.Close()\n\t\t}\n\t})\n\n\tvar err error\n\tfor _, host := range hosts {\n\t\tvar conn *Conn\n\t\tconn, err = c.session.dial(c.session.ctx, host, &connCfg, handler)\n\t\t// not need to call conn.finalizeConnection since this connection to be terminated right away\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\n\t\tif err == nil {\n\t\t\treturn connCfg.ProtoVersion, nil\n\t\t}\n\n\t\tif proto := parseProtocolFromError(err); proto > 0 {\n\t\t\treturn proto, nil\n\t\t}\n\t}\n\n\treturn 0, err\n}\n\nfunc (c *controlConn) connect(hosts []*HostInfo) error {\n\tif len(hosts) == 0 {\n\t\treturn errors.New(\"control: no endpoints specified\")\n\t}\n\n\t// shuffle endpoints so not all drivers will connect to the same initial\n\t// node.\n\thosts = shuffleHosts(hosts)\n\n\tcfg := *c.session.connCfg\n\tcfg.disableCoalesce = true\n\n\tvar conn *Conn\n\tvar err error\n\tfor _, host := range hosts {\n\t\tconn, err = c.session.dial(c.session.ctx, host, &cfg, c)\n\t\t// conn.finalizeConnection() to be called outside of this function, since initialization process is not completed yet\n\t\tif err != nil {\n\t\t\tc.session.logger.Printf(\"gocql: unable to dial control conn %v:%v: %v\\n\", host.ConnectAddress(), host.Port(), err)\n\t\t\tcontinue\n\t\t}\n\t\terr = c.setupConn(conn)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tc.session.logger.Printf(\"gocql: unable setup control conn %v:%v: %v\\n\", host.ConnectAddress(), host.Port(), err)\n\t\tconn.Close()\n\t\tconn = nil\n\t}\n\tif conn == nil {\n\t\treturn fmt.Errorf(\"unable to connect to initial hosts: %v\", err)\n\t}\n\n\t// we could fetch the initial ring here and update initial host data. So that\n\t// when we return from here we have a ring topology ready to go.\n\n\tgo c.heartBeat()\n\n\treturn nil\n}\n\ntype connHost struct {\n\tconn ConnInterface\n\thost *HostInfo\n}\n\nfunc (c *controlConn) setupConn(conn *Conn) error {\n\t// we need up-to-date host info for the filterHost call below\n\titer := conn.querySystem(context.TODO(), qrySystemLocal)\n\thost, err := hostInfoFromIter(iter, c.session.cfg.Port)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c.session.cfg.filterHost(host) {\n\t\treturn fmt.Errorf(\"host was filtered: %v\", host.ConnectAddress())\n\t}\n\n\tif err := c.registerEvents(conn); err != nil {\n\t\treturn fmt.Errorf(\"register events: %v\", err)\n\t}\n\n\tch := &connHost{\n\t\tconn: conn,\n\t\thost: host,\n\t}\n\told, _ := c.conn.Swap(ch).(*connHost)\n\tvar oldHost events.HostInfo\n\tif old != nil && old.host != nil {\n\t\toldHost.HostID = old.host.HostID()\n\t\toldHost.Host = old.host.ConnectAddress()\n\t\toldHost.Port = old.host.Port()\n\t}\n\tc.session.publishEvent(&events.ControlConnectionRecreatedEvent{\n\t\tOldHost: oldHost,\n\t\tNewHost: events.HostInfo{\n\t\t\tHostID: host.HostID(),\n\t\t\tHost:   host.ConnectAddress(),\n\t\t\tPort:   host.Port(),\n\t\t},\n\t})\n\tif c.session.initialized() {\n\t\t// We connected to control conn, so add the connect the host in pool as well.\n\t\t// Notify session we can start trying to connect to the node.\n\t\t// We can't start the fill before the session is initialized, otherwise the fill would interfere\n\t\t// with the fill called by Session.init. Session.init needs to wait for its fill to finish and that\n\t\t// would return immediately if we started the fill here.\n\t\t// TODO(martin-sucha): Trigger pool refill for all hosts, like in reconnectDownedHosts?\n\t\tgo c.session.startPoolFill(host)\n\t}\n\treturn nil\n}\n\nfunc (c *controlConn) registerEvents(conn *Conn) error {\n\tvar events []string\n\n\tif !c.session.cfg.Events.DisableTopologyEvents {\n\t\tevents = append(events, \"TOPOLOGY_CHANGE\")\n\t}\n\tif !c.session.cfg.Events.DisableNodeStatusEvents {\n\t\tevents = append(events, \"STATUS_CHANGE\")\n\t}\n\tif !c.session.cfg.Events.DisableSchemaEvents {\n\t\tevents = append(events, \"SCHEMA_CHANGE\")\n\t}\n\tif c.session.cfg.ClientRoutesConfig != nil {\n\t\tevents = append(events, \"CLIENT_ROUTES_CHANGE\")\n\t}\n\n\tif len(events) == 0 {\n\t\treturn nil\n\t}\n\n\tframer, err := conn.exec(context.Background(),\n\t\t&writeRegisterFrame{\n\t\t\tevents: events,\n\t\t}, nil, conn.cfg.ConnectTimeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer framer.Release()\n\n\tframe, err := framer.parseFrame()\n\tif err != nil {\n\t\treturn err\n\t} else if _, ok := frame.(*frm.ReadyFrame); !ok {\n\t\treturn fmt.Errorf(\"unexpected frame in response to register: got %T: %v\\n\", frame, frame)\n\t}\n\n\treturn nil\n}\n\nfunc (c *controlConn) reconnect() error {\n\tif atomic.LoadInt32(&c.state) == controlConnClosing {\n\t\treturn fmt.Errorf(\"control connection is closing\")\n\t}\n\tif !atomic.CompareAndSwapInt32(&c.reconnecting, 0, 1) {\n\t\treturn fmt.Errorf(\"control connection is reconnecting\")\n\t}\n\tdefer atomic.StoreInt32(&c.reconnecting, 0)\n\n\terr := c.attemptReconnect()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"gocql: unable to reconnect control connection: %w\\n\", err)\n\t\tc.session.logger.Printf(err.Error())\n\t\treturn err\n\t}\n\n\terr = c.session.refreshRingNow()\n\tif err != nil {\n\t\tc.session.logger.Printf(\"gocql: unable to refresh ring: %v\\n\", err)\n\t}\n\n\terr = c.session.metadataDescriber.refreshAllSchema()\n\tif err != nil {\n\t\tc.session.logger.Printf(\"gocql: unable to refresh the schema: %v\\n\", err)\n\t}\n\treturn nil\n}\n\nfunc (c *controlConn) attemptReconnect() error {\n\thosts := c.session.hostSource.getHostsList()\n\thosts = shuffleHosts(hosts)\n\n\t// keep the old behavior of connecting to the old host first by moving it to\n\t// the front of the slice\n\tch := c.getConn()\n\tif ch != nil {\n\t\tfor i := range hosts {\n\t\t\tif hosts[i].Equal(ch.host) {\n\t\t\t\thosts[0], hosts[i] = hosts[i], hosts[0]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tch.conn.Close()\n\t}\n\n\terr := c.attemptReconnectToAnyOfHosts(hosts)\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tc.session.logger.Printf(\"gocql: unable to connect to any ring node: %v\\n\", err)\n\tc.session.logger.Printf(\"gocql: control falling back to initial contact points.\\n\")\n\t// Fallback to initial contact points, as it may be the case that all known initialHosts\n\t// changed their IPs while keeping the same hostname(s).\n\tinitialHosts, resolvErr := resolveInitialEndpoints(c.session.cfg.DNSResolver, c.session.cfg.Hosts, c.session.cfg.Port, c.session.logger)\n\tif resolvErr != nil {\n\t\treturn fmt.Errorf(\"resolve contact points' hostnames: %v\", resolvErr)\n\t}\n\n\treturn c.attemptReconnectToAnyOfHosts(initialHosts)\n}\n\nfunc (c *controlConn) attemptReconnectToAnyOfHosts(hosts []*HostInfo) error {\n\tfor _, host := range hosts {\n\t\tconn, err := c.session.connect(c.session.ctx, host, c)\n\t\tif err != nil {\n\t\t\tif c.session.cfg.ConvictionPolicy.AddFailure(err, host) {\n\t\t\t\tc.session.handleNodeDown(host.ConnectAddress(), host.Port())\n\t\t\t}\n\t\t\tc.session.logger.Printf(\"gocql: unable to dial control conn %v:%v: %v\\n\", host.ConnectAddress(), host.Port(), err)\n\t\t\tcontinue\n\t\t}\n\t\terr = c.setupConn(conn)\n\t\tif err != nil {\n\t\t\tc.session.logger.Printf(\"gocql: unable setup control conn %v:%v: %v\\n\", host.ConnectAddress(), host.Port(), err)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\t\tconn.finalizeConnection()\n\t\tc.session.publishEvent(&events.ControlConnectionRecreatedEvent{\n\t\t\tNewHost: events.HostInfo{\n\t\t\t\tHost:   host.ConnectAddress(),\n\t\t\t\tPort:   host.Port(),\n\t\t\t\tHostID: host.HostID(),\n\t\t\t},\n\t\t})\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unable to connect to any known node: %v\", hosts)\n}\n\nfunc (c *controlConn) HandleError(conn *Conn, err error, closed bool) {\n\tif !closed {\n\t\treturn\n\t}\n\n\toldConn := c.getConn()\n\n\t// If connection has long gone, and not been attempted for awhile,\n\t// it's possible to have oldConn as nil here (#1297).\n\tif oldConn != nil && oldConn.conn != conn {\n\t\treturn\n\t}\n\n\tgo c.reconnect()\n}\n\nfunc (c *controlConn) getConn() *connHost {\n\treturn c.conn.Load().(*connHost)\n}\n\n// writeFrame sends frame w on the control connection and returns the parsed\n// response frame.\n//\n// NOTE: The returned frame must not retain any byte-slice references to the\n// framer's read buffer, because the framer is released back to the pool\n// immediately after parseFrame returns (via defer). Frame types that use\n// readBytesCopy (e.g. SupportedFrame, AuthChallengeFrame, AuthSuccessFrame)\n// are safe; frame types that use readBytes and expose []byte fields would not\n// be safe and must not be returned from this function.\nfunc (c *controlConn) writeFrame(w frameBuilder) (frame, error) {\n\tch := c.getConn()\n\tif ch == nil {\n\t\treturn nil, errNoControl\n\t}\n\n\tframer, err := ch.conn.exec(context.Background(), w, nil, c.session.cfg.MetadataSchemaRequestTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer framer.Release()\n\n\treturn framer.parseFrame()\n}\n\n// query will return nil if the connection is closed or nil\nfunc (c *controlConn) querySystem(statement string, values ...any) (iter *Iter) {\n\tconn := c.getConn().conn.(*Conn)\n\treturn c.runQuery(c.session.Query(statement+conn.usingTimeoutClause, values...).\n\t\tConsistency(One).\n\t\tSetRequestTimeout(conn.systemRequestTimeout).\n\t\tRoutingKey([]byte{}).\n\t\tTrace(nil))\n}\n\n// query will return nil if the connection is closed or nil\nfunc (c *controlConn) query(statement string, values ...any) (iter *Iter) {\n\treturn c.runQuery(c.session.Query(statement, values...).Consistency(One).RoutingKey([]byte{}).Trace(nil))\n}\n\n// query will return nil if the connection is closed or nil\nfunc (c *controlConn) runQuery(qry *Query) (iter *Iter) {\n\tfor {\n\t\tch := c.getConn()\n\t\tqry.conn = ch.conn\n\t\titer = ch.conn.executeQuery(context.TODO(), qry)\n\n\t\tif debug.Enabled && iter.err != nil {\n\t\t\tc.session.logger.Printf(\"control: error executing %q: %v\\n\", qry.stmt, iter.err)\n\t\t}\n\n\t\tqry.AddAttempts(1, ch.host)\n\t\tif iter.err == nil || !c.retry.Attempt(qry) {\n\t\t\tbreak\n\t\t}\n\t\titer.finalize(true)\n\t}\n\n\treturn\n}\n\nfunc (c *controlConn) awaitSchemaAgreement() error {\n\tch := c.getConn()\n\treturn (&Iter{err: ch.conn.awaitSchemaAgreement(context.TODO())}).err\n}\n\nfunc (c *controlConn) close() {\n\tif atomic.CompareAndSwapInt32(&c.state, controlConnStarted, controlConnClosing) {\n\t\tc.quit <- struct{}{}\n\t}\n\n\tch := c.getConn()\n\tif ch != nil {\n\t\tch.conn.Close()\n\t}\n}\n\nvar errNoControl = errors.New(\"gocql: no control connection available\")\n"
  },
  {
    "path": "control_integration_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"testing\"\n)\n\n// unixSocketDialer is a special dialer which connects only to the maintenance_socket.\ntype unixSocketDialer struct {\n\tdialer     net.Dialer\n\tsocketPath string\n}\n\nfunc (d unixSocketDialer) DialContext(_ context.Context, _, _ string) (net.Conn, error) {\n\treturn d.dialer.Dial(\"unix\", d.socketPath)\n}\n\nfunc TestUnixSockets(t *testing.T) {\n\tt.Parallel()\n\n\tsocketFiles := getClusterSocketFile()\n\tif len(socketFiles) == 0 {\n\t\tt.Skip(\"this test needs path to socket file provided into -cluster-socket cli option\")\n\t}\n\n\tc := createCluster()\n\tc.NumConns = 1\n\tc.DisableInitialHostLookup = true\n\tc.ProtoVersion = protoVersion3\n\tc.ReconnectInterval = 0\n\tc.WriteCoalesceWaitTime = 0\n\n\tc.Events.DisableNodeStatusEvents = true\n\tc.Events.DisableTopologyEvents = true\n\tc.Events.DisableSchemaEvents = true\n\n\td := net.Dialer{\n\t\tTimeout: c.Timeout,\n\t}\n\tif c.SocketKeepalive > 0 {\n\t\td.KeepAlive = c.SocketKeepalive\n\t}\n\n\tc.Dialer = unixSocketDialer{\n\t\tdialer:     d,\n\t\tsocketPath: socketFiles[0],\n\t}\n\n\tsess, err := c.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create session: %v\", err)\n\t}\n\n\tdefer sess.Close()\n\n\tkeyspace := testKeyspaceName(t)\n\n\terr = createTable(sess, `DROP KEYSPACE IF EXISTS `+keyspace)\n\tif err != nil {\n\t\tt.Fatal(\"unable to drop keyspace if exists:\", err)\n\t}\n\n\terr = createTable(sess, fmt.Sprintf(`CREATE KEYSPACE %s\n\tWITH replication = {\n\t\t'class' : 'NetworkTopologyStrategy',\n\t\t'replication_factor' : 1\n\t}`, keyspace))\n\tif err != nil {\n\t\tt.Fatal(\"unable to create keyspace:\", err)\n\t}\n}\n"
  },
  {
    "path": "control_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\nfunc TestHostInfo_Lookup(t *testing.T) {\n\tt.Parallel()\n\n\tresolver := NewSimpleDNSResolver(true)\n\n\ttests := [...]struct {\n\t\taddr string\n\t\tip   net.IP\n\t}{\n\t\t{\"127.0.0.1\", net.IPv4(127, 0, 0, 1)},\n\t\t{\"localhost\", net.IPv4(127, 0, 0, 1)}, // TODO: this may be host dependant\n\t}\n\n\tfor i, test := range tests {\n\t\thosts, err := resolveInitialEndpoint(resolver, test.addr, 1)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\thost := hosts[0]\n\t\tif !host.ConnectAddress().Equal(test.ip) {\n\t\t\tt.Errorf(\"expected ip %v got %v for addr %q\", test.ip, host.ConnectAddress(), test.addr)\n\t\t}\n\t}\n}\n\nfunc TestParseProtocol(t *testing.T) {\n\tt.Parallel()\n\n\ttests := [...]struct {\n\t\terr   error\n\t\tproto int\n\t}{\n\t\t{\n\t\t\terr: &protocolError{\n\t\t\t\tframe: frm.ErrorFrame{\n\t\t\t\t\tCode:    0x10,\n\t\t\t\t\tMessage: \"Invalid or unsupported protocol version (5); the lowest supported version is 3 and the greatest is 4\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tproto: protoVersion4,\n\t\t},\n\t\t{\n\t\t\terr: &protocolError{\n\t\t\t\tframe: frm.ErrorFrame{\n\t\t\t\t\tFrameHeader: frm.FrameHeader{\n\t\t\t\t\t\tVersion: 0x83,\n\t\t\t\t\t},\n\t\t\t\t\tCode:    0x10,\n\t\t\t\t\tMessage: \"Invalid or unsupported protocol version: 5\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tproto: protoVersion3,\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tif proto := parseProtocolFromError(test.err); proto != test.proto {\n\t\t\tt.Errorf(\"%d: exepcted proto %d got %d\", i, test.proto, proto)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "cqltypes.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\ntype Duration struct {\n\tMonths      int32\n\tDays        int32\n\tNanoseconds int64\n}\n"
  },
  {
    "path": "debounce/refresh_deboucer.go",
    "content": "package debounce\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tRingRefreshDebounceTime = 1 * time.Second\n)\n\n// debounces requests to call a refresh function (currently used for ring refresh). It also supports triggering a refresh immediately.\ntype RefreshDebouncer struct {\n\tbroadcaster  *errorBroadcaster\n\ttimer        *time.Timer\n\trefreshNowCh chan struct{}\n\tquit         chan struct{}\n\trefreshFn    func() error\n\tinterval     time.Duration\n\tmu           sync.Mutex\n\tstopped      bool\n}\n\nfunc NewRefreshDebouncer(interval time.Duration, refreshFn func() error) *RefreshDebouncer {\n\td := &RefreshDebouncer{\n\t\tstopped:      false,\n\t\tbroadcaster:  nil,\n\t\trefreshNowCh: make(chan struct{}, 1),\n\t\tquit:         make(chan struct{}),\n\t\tinterval:     interval,\n\t\ttimer:        time.NewTimer(interval),\n\t\trefreshFn:    refreshFn,\n\t}\n\td.timer.Stop()\n\tgo d.flusher()\n\treturn d\n}\n\n// debounces a request to call the refresh function\nfunc (d *RefreshDebouncer) Debounce() {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\tif d.stopped {\n\t\treturn\n\t}\n\td.timer.Reset(d.interval)\n}\n\n// requests an immediate refresh which will cancel pending refresh requests\nfunc (d *RefreshDebouncer) RefreshNow() <-chan error {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\tif d.broadcaster == nil {\n\t\td.broadcaster = newErrorBroadcaster()\n\t\tselect {\n\t\tcase d.refreshNowCh <- struct{}{}:\n\t\tdefault:\n\t\t\t// already a refresh pending\n\t\t}\n\t}\n\treturn d.broadcaster.newListener()\n}\n\nfunc (d *RefreshDebouncer) flusher() {\n\tfor {\n\t\tselect {\n\t\tcase <-d.refreshNowCh:\n\t\tcase <-d.timer.C:\n\t\tcase <-d.quit:\n\t\t}\n\t\td.mu.Lock()\n\t\tif d.stopped {\n\t\t\tif d.broadcaster != nil {\n\t\t\t\td.broadcaster.stop()\n\t\t\t\td.broadcaster = nil\n\t\t\t}\n\t\t\td.timer.Stop()\n\t\t\td.mu.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\t// make sure both request channels are cleared before we refresh\n\t\tselect {\n\t\tcase <-d.refreshNowCh:\n\t\tdefault:\n\t\t}\n\n\t\td.timer.Stop()\n\t\tselect {\n\t\tcase <-d.timer.C:\n\t\tdefault:\n\t\t}\n\n\t\tcurBroadcaster := d.broadcaster\n\t\td.broadcaster = nil\n\t\td.mu.Unlock()\n\n\t\terr := d.refreshFn()\n\t\tif curBroadcaster != nil {\n\t\t\tcurBroadcaster.broadcast(err)\n\t\t}\n\t}\n}\n\nfunc (d *RefreshDebouncer) Stop() {\n\td.mu.Lock()\n\tif d.stopped {\n\t\td.mu.Unlock()\n\t\treturn\n\t}\n\td.stopped = true\n\td.mu.Unlock()\n\td.quit <- struct{}{} // sync with flusher\n\tclose(d.quit)\n}\n\n// broadcasts an error to multiple channels (listeners)\ntype errorBroadcaster struct {\n\tlisteners []chan<- error\n\tmu        sync.Mutex\n}\n\nfunc newErrorBroadcaster() *errorBroadcaster {\n\treturn &errorBroadcaster{\n\t\tlisteners: nil,\n\t\tmu:        sync.Mutex{},\n\t}\n}\n\nfunc (b *errorBroadcaster) newListener() <-chan error {\n\tch := make(chan error, 1)\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\tb.listeners = append(b.listeners, ch)\n\treturn ch\n}\n\nfunc (b *errorBroadcaster) broadcast(err error) {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\tcurListeners := b.listeners\n\tif len(curListeners) > 0 {\n\t\tb.listeners = nil\n\t} else {\n\t\treturn\n\t}\n\n\tfor _, listener := range curListeners {\n\t\tlistener <- err\n\t\tclose(listener)\n\t}\n}\n\nfunc (b *errorBroadcaster) stop() {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\tif len(b.listeners) == 0 {\n\t\treturn\n\t}\n\tfor _, listener := range b.listeners {\n\t\tclose(listener)\n\t}\n\tb.listeners = nil\n}\n"
  },
  {
    "path": "debounce/refresh_debouncer_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage debounce\n\nimport (\n\t\"errors\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\n// This test sends debounce requests and waits until the refresh function is called (which should happen when the timer elapses).\nfunc TestRefreshDebouncer_MultipleEvents(t *testing.T) {\n\tconst numberOfEvents = 10\n\tchannel := make(chan int, numberOfEvents) // should never use more than 1 but allow for more to possibly detect bugs\n\tfn := func() error {\n\t\tchannel <- 0\n\t\treturn nil\n\t}\n\tbeforeEvents := time.Now()\n\twg := sync.WaitGroup{}\n\td := NewRefreshDebouncer(2*time.Second, fn)\n\tdefer d.Stop()\n\tfor i := 0; i < numberOfEvents; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\td.Debounce()\n\t\t}()\n\t}\n\twg.Wait()\n\ttimeoutCh := time.After(2500 * time.Millisecond) // extra time to avoid flakiness\n\tselect {\n\tcase <-channel:\n\tcase <-timeoutCh:\n\t\tt.Fatalf(\"timeout elapsed without flush function being called\")\n\t}\n\tafterFunctionCall := time.Now()\n\n\t// use 1.5 seconds instead of 2 seconds to avoid timer precision issues\n\tif afterFunctionCall.Sub(beforeEvents) < 1500*time.Millisecond {\n\t\tt.Fatalf(\"function was called after %v ms instead of ~2 seconds\", afterFunctionCall.Sub(beforeEvents).Milliseconds())\n\t}\n\n\t// wait another 2 seconds and check if function was called again\n\ttime.Sleep(2500 * time.Millisecond)\n\tif len(channel) > 0 {\n\t\tt.Fatalf(\"function was called more than once\")\n\t}\n}\n\n// This test:\n//\n//\t1 - Sends debounce requests when test starts\n//\t2 - Calls refreshNow() before the timer elapsed (which stops the timer) about 1.5 seconds after test starts\n//\n// The end result should be 1 refresh function call when refreshNow() is called.\nfunc TestRefreshDebouncer_RefreshNow(t *testing.T) {\n\tconst numberOfEvents = 10\n\tchannel := make(chan int, numberOfEvents) // should never use more than 1 but allow for more to possibly detect bugs\n\tfn := func() error {\n\t\tchannel <- 0\n\t\treturn nil\n\t}\n\tbeforeEvents := time.Now()\n\teventsWg := sync.WaitGroup{}\n\td := NewRefreshDebouncer(2*time.Second, fn)\n\tdefer d.Stop()\n\tfor i := 0; i < numberOfEvents; i++ {\n\t\teventsWg.Add(1)\n\t\tgo func() {\n\t\t\tdefer eventsWg.Done()\n\t\t\td.Debounce()\n\t\t}()\n\t}\n\n\trefreshNowWg := sync.WaitGroup{}\n\trefreshNowWg.Add(1)\n\tgo func() {\n\t\tdefer refreshNowWg.Done()\n\t\ttime.Sleep(1500 * time.Millisecond)\n\t\td.RefreshNow()\n\t}()\n\n\teventsWg.Wait()\n\tselect {\n\tcase <-channel:\n\t\tt.Fatalf(\"function was called before the expected time\")\n\tdefault:\n\t}\n\n\trefreshNowWg.Wait()\n\n\ttimeoutCh := time.After(200 * time.Millisecond) // allow for 200ms of delay to prevent flakiness\n\tselect {\n\tcase <-channel:\n\tcase <-timeoutCh:\n\t\tt.Fatalf(\"timeout elapsed without flush function being called\")\n\t}\n\tafterFunctionCall := time.Now()\n\n\t// use 1 second instead of 1.5s to avoid timer precision issues\n\tif afterFunctionCall.Sub(beforeEvents) < 1000*time.Millisecond {\n\t\tt.Fatalf(\"function was called after %v ms instead of ~1.5 seconds\", afterFunctionCall.Sub(beforeEvents).Milliseconds())\n\t}\n\n\t// wait some time and check if function was called again\n\ttime.Sleep(2500 * time.Millisecond)\n\tif len(channel) > 0 {\n\t\tt.Fatalf(\"function was called more than once\")\n\t}\n}\n\n// This test:\n//\n//\t1 - Sends debounce requests when test starts\n//\t2 - Calls refreshNow() before the timer elapsed (which stops the timer) about 1 second after test starts\n//\t3 - Sends more debounce requests (which resets the timer with a 3-second interval) about 2 seconds after test starts\n//\n// The end result should be 2 refresh function calls:\n//\n//\t1 - When refreshNow() is called (1 second after the test starts)\n//\t2 - When the timer elapses after the second \"wave\" of debounce requests (5 seconds after the test starts)\nfunc TestRefreshDebouncer_EventsAfterRefreshNow(t *testing.T) {\n\tconst numberOfEvents = 10\n\tchannel := make(chan int, numberOfEvents) // should never use more than 2 but allow for more to possibly detect bugs\n\tfn := func() error {\n\t\tchannel <- 0\n\t\treturn nil\n\t}\n\tbeforeEvents := time.Now()\n\twg := sync.WaitGroup{}\n\td := NewRefreshDebouncer(3*time.Second, fn)\n\tdefer d.Stop()\n\tfor i := 0; i < numberOfEvents; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\td.Debounce()\n\t\t\ttime.Sleep(2000 * time.Millisecond)\n\t\t\td.Debounce()\n\t\t}()\n\t}\n\n\tgo func() {\n\t\ttime.Sleep(1 * time.Second)\n\t\td.RefreshNow()\n\t}()\n\n\twg.Wait()\n\ttimeoutCh := time.After(1500 * time.Millisecond) // extra 500ms to prevent flakiness\n\tselect {\n\tcase <-channel:\n\tcase <-timeoutCh:\n\t\tt.Fatalf(\"timeout elapsed without flush function being called after refreshNow()\")\n\t}\n\tafterFunctionCall := time.Now()\n\n\t// use 500ms instead of 1s to avoid timer precision issues\n\tif afterFunctionCall.Sub(beforeEvents) < 500*time.Millisecond {\n\t\tt.Fatalf(\"function was called after %v ms instead of ~1 second\", afterFunctionCall.Sub(beforeEvents).Milliseconds())\n\t}\n\n\ttimeoutCh = time.After(4 * time.Second) // extra 1s to prevent flakiness\n\tselect {\n\tcase <-channel:\n\tcase <-timeoutCh:\n\t\tt.Fatalf(\"timeout elapsed without flush function being called after debounce requests\")\n\t}\n\tafterSecondFunctionCall := time.Now()\n\n\t// use 2.5s instead of 3s to avoid timer precision issues\n\tif afterSecondFunctionCall.Sub(afterFunctionCall) < 2500*time.Millisecond {\n\t\tt.Fatalf(\"function was called after %v ms instead of ~3 seconds\", afterSecondFunctionCall.Sub(afterFunctionCall).Milliseconds())\n\t}\n\n\tif len(channel) > 0 {\n\t\tt.Fatalf(\"function was called more than twice\")\n\t}\n}\n\nfunc TestErrorBroadcaster_MultipleListeners(t *testing.T) {\n\tb := newErrorBroadcaster()\n\tdefer b.stop()\n\tconst numberOfListeners = 10\n\tvar listeners []<-chan error\n\tfor i := 0; i < numberOfListeners; i++ {\n\t\tlisteners = append(listeners, b.newListener())\n\t}\n\n\terr := errors.New(\"expected error\")\n\twg := sync.WaitGroup{}\n\tresult := atomic.Value{}\n\tfor _, listener := range listeners {\n\t\tcurrentListener := listener\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\treceivedErr, ok := <-currentListener\n\t\t\tif !ok {\n\t\t\t\tresult.Store(errors.New(\"listener was closed\"))\n\t\t\t} else if receivedErr != err {\n\t\t\t\tresult.Store(errors.New(\"expected received error to be the same as the one that was broadcasted\"))\n\t\t\t}\n\t\t}()\n\t}\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tb.broadcast(err)\n\t\tb.stop()\n\t}()\n\twg.Wait()\n\tif loadedVal := result.Load(); loadedVal != nil {\n\t\tt.Error(loadedVal.(error).Error())\n\t}\n}\n\nfunc TestErrorBroadcaster_StopWithoutBroadcast(t *testing.T) {\n\tvar b = newErrorBroadcaster()\n\tdefer b.stop()\n\tconst numberOfListeners = 10\n\tvar listeners []<-chan error\n\tfor i := 0; i < numberOfListeners; i++ {\n\t\tlisteners = append(listeners, b.newListener())\n\t}\n\n\twg := sync.WaitGroup{}\n\tresult := atomic.Value{}\n\tfor _, listener := range listeners {\n\t\tcurrentListener := listener\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t// broadcaster stopped, expect listener to be closed\n\t\t\t_, ok := <-currentListener\n\t\t\tif ok {\n\t\t\t\tresult.Store(errors.New(\"expected listener to be closed\"))\n\t\t\t}\n\t\t}()\n\t}\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\t// call stop without broadcasting anything to current listeners\n\t\tb.stop()\n\t}()\n\twg.Wait()\n\tif loadedVal := result.Load(); loadedVal != nil {\n\t\tt.Error(loadedVal.(error).Error())\n\t}\n}\n"
  },
  {
    "path": "debounce/simple_debouncer.go",
    "content": "package debounce\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n)\n\n// SimpleDebouncer is are tool for queuing immutable functions calls. It provides:\n// 1. Blocking simultaneous calls\n// 2. If there is no running call and no waiting call, then the current call go through\n// 3. If there is running call and no waiting call, then the current call go waiting\n// 4. If there is running call and waiting call, then the current call are voided\ntype SimpleDebouncer struct {\n\tm     sync.Mutex\n\tcount atomic.Int32\n}\n\n// NewSimpleDebouncer creates a new SimpleDebouncer.\nfunc NewSimpleDebouncer() *SimpleDebouncer {\n\treturn &SimpleDebouncer{}\n}\n\n// Debounce attempts to execute the function if the logic of the SimpleDebouncer allows it.\nfunc (d *SimpleDebouncer) Debounce(fn func()) bool {\n\tif d.count.Add(1) > 2 {\n\t\td.count.Add(-1)\n\t\treturn false\n\t}\n\td.m.Lock()\n\tfn()\n\td.count.Add(-1)\n\td.m.Unlock()\n\treturn true\n}\n"
  },
  {
    "path": "debounce/simple_debouncer_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage debounce\n\nimport (\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\n// TestSimpleDebouncerRace tests SimpleDebouncer for the fact that it does not allow concurrent writing, reading.\nfunc TestSimpleDebouncerRace(t *testing.T) {\n\tt.Parallel()\n\n\toperations := 1000\n\truns := 100\n\tcount := 3\n\n\td := NewSimpleDebouncer()\n\tfor r := 0; r < runs; r++ {\n\t\tvar counter atomic.Int32\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(count)\n\n\t\tresults := make([]bool, count)\n\t\tfails := make([]bool, count)\n\t\tfor c := range results {\n\t\t\tresult := &results[c]\n\t\t\tfail := &fails[c]\n\n\t\t\tgo func() {\n\t\t\t\t*result = d.Debounce(func() {\n\t\t\t\t\tfor i := 0; i < operations; i++ {\n\t\t\t\t\t\tif counter.Add(1) != 1 {\n\t\t\t\t\t\t\t*fail = true\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttime.Sleep(time.Microsecond)\n\t\t\t\t\t\tcounter.Add(-1)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\n\t\t// check results\n\n\t\tfinished := 0\n\t\tfor i, done := range results {\n\t\t\tif done {\n\t\t\t\tfinished++\n\t\t\t}\n\t\t\tif fails[i] {\n\t\t\t\tt.Fatalf(\"Simultaneous execution detected\")\n\t\t\t}\n\t\t}\n\t\tif finished < 2 {\n\t\t\tt.Fatalf(\"In one run should be finished more than 2 `Debounce` method calls, but finished %d\", finished)\n\t\t}\n\t}\n}\n\n// TestDebouncerExtreme tests SimpleDebouncer in the conditions  fast multi `Debounce` method calls and fast execution of the `debounced function`.\nfunc TestDebouncerExtreme(t *testing.T) {\n\tt.Parallel()\n\n\ttype runResult struct {\n\t\texecutedN int32\n\t\tdone      bool\n\t}\n\n\truns := 10000\n\tcount := 20\n\n\td := NewSimpleDebouncer()\n\tvar wg sync.WaitGroup\n\tfor r := 0; r < runs; r++ {\n\t\tvar executionsC atomic.Int32\n\t\twg.Add(count)\n\n\t\tresults := make([]runResult, count)\n\n\t\tfor c := range results {\n\t\t\tresult := &results[c]\n\n\t\t\tgo func() {\n\t\t\t\tresult.done = d.Debounce(func() {\n\t\t\t\t\tresult.executedN = executionsC.Add(1)\n\t\t\t\t})\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\n\t\t// check results\n\t\tfinished := 0\n\t\tfor _, result := range results {\n\t\t\tif result.done {\n\t\t\t\tif result.executedN == 0 {\n\t\t\t\t\tt.Fatalf(\"Wrong execution detected: \\n%#v\", result)\n\t\t\t\t}\n\t\t\t\tfinished++\n\t\t\t}\n\t\t}\n\t\tif finished < 2 {\n\t\t\tt.Fatalf(\"In one run should be finished more than 2 `Debounce` method calls, but finished %d\", finished)\n\t\t}\n\t}\n}\n\n// TestSimpleDebouncerCount tests SimpleDebouncer for the fact that it pended only one function call.\nfunc TestSimpleDebouncerCount(t *testing.T) {\n\tt.Parallel()\n\n\tcalls := 10\n\n\t// Subtracting a one call that will be performed directly (not through goroutines)\n\tcalls--\n\n\td := NewSimpleDebouncer()\n\tvar prepared, start, done sync.WaitGroup\n\tprepared.Add(calls)\n\tstart.Add(1)\n\tdone.Add(calls)\n\n\tfinished := 0\n\tfor c := 0; c < calls; c++ {\n\t\tgo func() {\n\t\t\tprepared.Done()\n\t\t\tstart.Wait()\n\t\t\td.Debounce(func() {\n\t\t\t\tfinished++\n\t\t\t})\n\t\t\tdone.Done()\n\t\t}()\n\t}\n\td.Debounce(func() {\n\t\tprepared.Wait()\n\t\tstart.Done()\n\t\tfinished++\n\t\ttime.Sleep(time.Second)\n\t})\n\tdone.Wait()\n\n\t// check results\n\tif finished != 2 {\n\t\tt.Fatalf(\"Should be finished 2 `Debounce` method calls, but finished %d\", finished)\n\t}\n}\n\n// TestDebouncer tests that the debouncer allows only one function to execute at a time\nfunc TestSimpleDebouncer(t *testing.T) {\n\tt.Parallel()\n\n\td := NewSimpleDebouncer()\n\tvar executions int32\n\tstartedCh := make(chan struct{}, 1)\n\tdoneCh := make(chan struct{}, 1)\n\n\t// Function to increment executions\n\tfn := func() {\n\t\t<-startedCh // Simulate work\n\t\tatomic.AddInt32(&executions, 1)\n\t\t<-doneCh // Simulate work\n\t}\n\tt.Run(\"Case 1\", func(t *testing.T) {\n\t\t// Case 1: Normal single execution\n\t\tstartedCh <- struct{}{}\n\t\tdoneCh <- struct{}{}\n\t\td.Debounce(fn)\n\t\t// We expect that the function has only executed once due to debouncing\n\t\tif atomic.LoadInt32(&executions) != 1 {\n\t\t\tt.Errorf(\"Expected function to be executed only once, but got %d executions\", executions)\n\t\t}\n\t})\n\n\tatomic.StoreInt32(&executions, 0)\n\tt.Run(\"Case 2\", func(t *testing.T) {\n\t\t// Case 2: Debounce the function multiple times at row when body is started\n\t\tgo d.Debounce(fn)\n\t\tstartedCh <- struct{}{}\n\t\t// Wait until first call execution started\n\t\twaitTillChannelIsEmpty(startedCh)\n\t\t// Call function twice, due to debounce only one should be executed\n\t\tgo d.Debounce(fn)\n\t\tgo d.Debounce(fn)\n\t\t// Let first call to complete\n\t\tdoneCh <- struct{}{}\n\t\t// Let second call to complete\n\t\tstartedCh <- struct{}{}\n\t\tdoneCh <- struct{}{}\n\t\t// Make sure second call is completed\n\t\twaitTillChannelIsEmpty(doneCh)\n\t\t// We expect that the function has only executed once due to debouncing\n\t\tif atomic.LoadInt32(&executions) != 2 {\n\t\t\tt.Errorf(\"Expected function to be executed twice, but got %d executions\", executions)\n\t\t}\n\t})\n}\nfunc waitTillChannelIsEmpty(ch chan struct{}) {\n\tfor {\n\t\tif len(ch) == 0 {\n\t\t\treturn\n\t\t}\n\t\truntime.Gosched()\n\t}\n}\n"
  },
  {
    "path": "dial.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// HostDialer allows customizing connection to cluster nodes.\ntype HostDialer interface {\n\t// DialHost establishes a connection to the host.\n\t// The returned connection must be directly usable for CQL protocol,\n\t// specifically DialHost is responsible also for setting up the TLS session if needed.\n\t// DialHost should disable write coalescing if the returned net.Conn does not support writev.\n\t// As of Go 1.18, only plain TCP connections support writev, TLS sessions should disable coalescing.\n\t// You can use WrapTLS helper function if you don't need to override the TLS setup.\n\tDialHost(ctx context.Context, host *HostInfo) (*DialedHost, error)\n}\n\n// DialedHost contains information about established connection to a host.\ntype DialedHost struct {\n\t// Conn used to communicate with the server.\n\tConn net.Conn\n\n\t// DisableCoalesce disables write coalescing for the Conn.\n\t// If true, the effect is the same as if WriteCoalesceWaitTime was configured to 0.\n\tDisableCoalesce bool\n}\n\n// defaultHostDialer dials host in a default way.\ntype defaultHostDialer struct {\n\tdialer    Dialer\n\ttlsConfig *tls.Config\n}\n\nfunc (hd *defaultHostDialer) DialHost(ctx context.Context, host *HostInfo) (*DialedHost, error) {\n\tip := host.ConnectAddress()\n\tport := host.Port()\n\n\tif !validIpAddr(ip) {\n\t\treturn nil, fmt.Errorf(\"host missing connect ip address: %v\", ip)\n\t} else if port == 0 {\n\t\treturn nil, fmt.Errorf(\"host missing port: %v\", port)\n\t}\n\n\taddr := net.JoinHostPort(ip.String(), strconv.Itoa(port))\n\ttranslatedInfo := host.getTranslatedConnectionInfo()\n\tif translatedInfo != nil {\n\t\taddr = translatedInfo.CQL.ToNetAddr()\n\t}\n\n\tconn, err := hd.dialer.DialContext(ctx, \"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn WrapTLS(ctx, conn, addr, hd.tlsConfig)\n}\n\nfunc tlsConfigForAddr(tlsConfig *tls.Config, addr string) *tls.Config {\n\t// the TLS config is safe to be reused by connections but it must not\n\t// be modified after being used.\n\tif !tlsConfig.InsecureSkipVerify && tlsConfig.ServerName == \"\" {\n\t\tcolonPos := strings.LastIndex(addr, \":\")\n\t\tif colonPos == -1 {\n\t\t\tcolonPos = len(addr)\n\t\t}\n\t\thostname := addr[:colonPos]\n\t\t// clone config to avoid modifying the shared one.\n\t\ttlsConfig = tlsConfig.Clone()\n\t\ttlsConfig.ServerName = hostname\n\t}\n\treturn tlsConfig\n}\n\n// WrapTLS optionally wraps a net.Conn connected to addr with the given tlsConfig.\n// If the tlsConfig is nil, conn is not wrapped into a TLS session, so is insecure.\n// If the tlsConfig does not have server name set, it is updated based on the default gocql rules.\nfunc WrapTLS(ctx context.Context, conn net.Conn, addr string, tlsConfig *tls.Config) (*DialedHost, error) {\n\tif tlsConfig != nil {\n\t\ttlsConfig := tlsConfigForAddr(tlsConfig, addr)\n\t\ttconn := tls.Client(conn, tlsConfig)\n\t\tif err := tconn.HandshakeContext(ctx); err != nil {\n\t\t\tconn.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tconn = tconn\n\t}\n\n\treturn &DialedHost{\n\t\tConn:            conn,\n\t\tDisableCoalesce: tlsConfig != nil, // write coalescing can't use writev when the connection is wrapped.\n\t}, nil\n}\n"
  },
  {
    "path": "dialer/recorder/recorder.go",
    "content": "package recorder\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/dialer\"\n)\n\nfunc NewRecordDialer(dir string) *RecordDialer {\n\treturn &RecordDialer{\n\t\tdir: dir,\n\t}\n}\n\ntype RecordDialer struct {\n\tdir string\n\tnet.Dialer\n}\n\nfunc (d *RecordDialer) DialContext(ctx context.Context, network, addr string) (conn net.Conn, err error) {\n\tfmt.Println(\"Dial Context Record Dialer\")\n\tsourcePort := gocql.ScyllaGetSourcePort(ctx)\n\tfmt.Println(\"Source port: \", sourcePort)\n\tdialerWithLocalAddr := d.Dialer\n\tdialerWithLocalAddr.LocalAddr, err = net.ResolveTCPAddr(network, fmt.Sprintf(\":%d\", sourcePort))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\tconn, err = dialerWithLocalAddr.DialContext(ctx, network, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewConnectionRecorder(path.Join(d.dir, fmt.Sprintf(\"%s-%d\", addr, sourcePort)), conn)\n}\n\nfunc NewConnectionRecorder(fname string, conn net.Conn) (net.Conn, error) {\n\tfd_writes, err := os.OpenFile(fname+\"Writes\", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfd_reads, err2 := os.OpenFile(fname+\"Reads\", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)\n\tif err2 != nil {\n\t\treturn nil, err2\n\t}\n\treturn &ConnectionRecorder{fd_writes: fd_writes, fd_reads: fd_reads, orig: conn, write_record: FrameWriter{new: true}, read_record: FrameWriter{new: true}}, nil\n}\n\ntype FrameWriter struct {\n\trecord    dialer.Record\n\tto_record int\n\tnew       bool\n}\n\nfunc (f *FrameWriter) Write(b []byte, n int, file *os.File) (err error) {\n\tif f.new {\n\t\tf.to_record = -1\n\t\tf.record = dialer.Record{}\n\t}\n\n\trecorded_ealier := len(f.record.Data)\n\tf.record.Data = append(f.record.Data, b[:n]...)\n\n\tif f.to_record == -1 && len(f.record.Data) >= 9 {\n\t\tf.to_record = 9 + int(f.record.Data[5+0])<<24 | int(f.record.Data[6])<<16 | int(f.record.Data[7])<<8 | int(f.record.Data[8]) - recorded_ealier\n\t\tf.record.StreamID = int(f.record.Data[2])<<8 | int(f.record.Data[3])\n\t} else if f.to_record == -1 {\n\t\treturn err\n\t}\n\n\tf.to_record = f.to_record - n\n\tif f.to_record <= 0 {\n\t\tf.new = true\n\t\t// Write JSON record to file\n\t\tjsonData, marshalErr := json.Marshal(f.record)\n\t\tif marshalErr != nil {\n\t\t\treturn fmt.Errorf(\"failed to encode JSON record: %w\", marshalErr)\n\t\t}\n\t\t_, writeErr := file.Write(append(jsonData, '\\n'))\n\t\tif writeErr != nil {\n\t\t\treturn fmt.Errorf(\"failed to record: %w\", writeErr)\n\t\t}\n\t}\n\treturn err\n}\n\ntype ConnectionRecorder struct {\n\tfd_writes    *os.File\n\tfd_reads     *os.File\n\torig         net.Conn\n\tread_record  FrameWriter\n\twrite_record FrameWriter\n}\n\nfunc (c *ConnectionRecorder) Read(b []byte) (n int, err error) {\n\tn, err = c.orig.Read(b)\n\tif err != nil && err != io.EOF {\n\t\treturn n, err\n\t}\n\n\treturn n, c.read_record.Write(b, n, c.fd_reads)\n}\n\nfunc (c *ConnectionRecorder) Write(b []byte) (n int, err error) {\n\tn, err = c.orig.Write(b)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, c.write_record.Write(b, n, c.fd_writes)\n}\n\nfunc (c ConnectionRecorder) Close() error {\n\tif err := c.fd_writes.Close(); err != nil {\n\t\treturn fmt.Errorf(\"failed to close the file: %w\", err)\n\t}\n\tif err := c.fd_reads.Close(); err != nil {\n\t\treturn fmt.Errorf(\"failed to close the file: %w\", err)\n\t}\n\treturn c.orig.Close()\n}\n\nfunc (c ConnectionRecorder) LocalAddr() net.Addr {\n\treturn c.orig.LocalAddr()\n}\n\nfunc (c ConnectionRecorder) RemoteAddr() net.Addr {\n\treturn c.orig.RemoteAddr()\n}\n\nfunc (c ConnectionRecorder) SetDeadline(t time.Time) error {\n\treturn c.orig.SetDeadline(t)\n}\n\nfunc (c ConnectionRecorder) SetReadDeadline(t time.Time) error {\n\treturn c.orig.SetReadDeadline(t)\n}\n\nfunc (c ConnectionRecorder) SetWriteDeadline(t time.Time) error {\n\treturn c.orig.SetWriteDeadline(t)\n}\n"
  },
  {
    "path": "dialer/replayer/replayer.go",
    "content": "package replayer\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/dialer\"\n)\n\nfunc NewReplayDialer(dir string) *ReplayDialer {\n\treturn &ReplayDialer{\n\t\tdir: dir,\n\t}\n}\n\ntype ReplayDialer struct {\n\tdir string\n\tnet.Dialer\n}\n\nfunc (d *ReplayDialer) DialContext(ctx context.Context, network, addr string) (conn net.Conn, err error) {\n\tsourcePort := gocql.ScyllaGetSourcePort(ctx)\n\treturn NewConnectionReplayer(path.Join(d.dir, fmt.Sprintf(\"%s-%d\", addr, sourcePort)))\n}\n\nfunc NewConnectionReplayer(fname string) (net.Conn, error) {\n\tframes, err := loadResponseFramesFromFiles(fname+\"Reads\", fname+\"Writes\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ConnectionReplayer{frames: frames, frameIdsToReplay: []int{}, streamIdsToReplay: []int{}, frameIdx: 0, frameResponsePosition: 0, gotRequest: make(chan struct{}, 1)}, nil\n}\n\ntype ConnectionReplayer struct {\n\tgotRequest            chan struct{}\n\tframes                []*FrameRecorded\n\tframeIdsToReplay      []int\n\tstreamIdsToReplay     []int\n\tframeIdx              int\n\tframeResponsePosition int\n\tclosed                bool\n}\n\nfunc (c *ConnectionReplayer) frameStreamID() int {\n\treturn c.streamIdsToReplay[c.frameIdx]\n}\n\nfunc (c *ConnectionReplayer) getPendingFrame() *FrameRecorded {\n\tif c.frameIdx < 0 || c.frameIdx >= len(c.frameIdsToReplay) {\n\t\treturn nil\n\t}\n\tframeId := c.frameIdsToReplay[c.frameIdx]\n\tif frameId < 0 || frameId >= len(c.frames) {\n\t\treturn nil\n\t}\n\treturn c.frames[frameId]\n}\n\nfunc (c *ConnectionReplayer) pushStreamIDToReplay(b []byte, idx int) {\n\tif b[0] > 0x02 {\n\t\tc.streamIdsToReplay = append(c.streamIdsToReplay, int(b[2])<<8|int(b[3]))\n\t} else {\n\t\tc.streamIdsToReplay = append(c.streamIdsToReplay, int(b[2]))\n\t}\n\tc.frameIdsToReplay = append(c.frameIdsToReplay, idx)\n\n\tselect {\n\tcase c.gotRequest <- struct{}{}:\n\tdefault:\n\t}\n}\n\nfunc replaceFrameStreamID(b []byte, stream int) {\n\tif b[0] > 0x02 {\n\t\tb[2] = byte(stream >> 8)\n\t\tb[3] = byte(stream)\n\t} else {\n\t\tb[2] = byte(stream)\n\t}\n}\n\nfunc (c *ConnectionReplayer) Read(b []byte) (n int, err error) {\n\tframe := c.getPendingFrame()\n\tfor frame == nil {\n\t\t<-c.gotRequest\n\t\tframe = c.getPendingFrame()\n\t}\n\tif c.Closed() {\n\t\treturn 0, io.EOF\n\t}\n\tresponse := frame.Response[c.frameResponsePosition:]\n\n\tif len(b) < len(response) {\n\t\tcopy(b, response[:len(b)])\n\t\tc.frameResponsePosition = c.frameResponsePosition + len(b)\n\t\treturn len(b), err\n\t}\n\n\tcopy(b, response)\n\tif c.frameResponsePosition == 0 {\n\t\treplaceFrameStreamID(b, c.frameStreamID())\n\t}\n\n\tc.frameIdx = c.frameIdx + 1\n\tc.frameResponsePosition = 0\n\treturn len(response), err\n}\n\nfunc (c *ConnectionReplayer) Write(b []byte) (n int, err error) {\n\twriteHash := dialer.GetFrameHash(b)\n\n\tfor i, q := range c.frames {\n\t\tif q.Hash == writeHash {\n\t\t\tc.pushStreamIDToReplay(b, i)\n\t\t\treturn len(b), nil\n\t\t}\n\t}\n\tpanic(fmt.Errorf(\"unable to find a response to replay\"))\n}\n\nfunc (c *ConnectionReplayer) Close() error {\n\tclose(c.gotRequest)\n\tc.closed = true\n\treturn nil\n}\n\nfunc (c *ConnectionReplayer) Closed() bool {\n\treturn c.closed\n}\n\ntype MockAddr struct {\n\tnetwork string\n\taddress string\n}\n\nfunc (m *MockAddr) Network() string {\n\treturn m.network\n}\n\nfunc (m *MockAddr) String() string {\n\treturn m.address\n}\n\nfunc (c ConnectionReplayer) LocalAddr() net.Addr {\n\treturn &MockAddr{\n\t\tnetwork: \"tcp\",\n\t\taddress: \"10.0.0.1:54321\",\n\t}\n}\n\nfunc (c ConnectionReplayer) RemoteAddr() net.Addr {\n\treturn &MockAddr{\n\t\tnetwork: \"tcp\",\n\t\taddress: \"192.168.1.100:12345\",\n\t}\n}\n\nfunc (c ConnectionReplayer) SetDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (c ConnectionReplayer) SetReadDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (c ConnectionReplayer) SetWriteDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc loadFramesFromFile(filename string) (map[int]dialer.Record, error) {\n\trecords := make(map[int]dialer.Record)\n\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open file %s: %w\", filename, err)\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tvar record dialer.Record\n\t\tif err := json.Unmarshal(scanner.Bytes(), &record); err != nil {\n\t\t\tfmt.Printf(\"Error decoding JSON in %s: %s\\n\", filename, err)\n\t\t\tcontinue\n\t\t}\n\t\trecords[record.StreamID] = record\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading file %s: %w\", filename, err)\n\t}\n\treturn records, nil\n}\n\nfunc loadResponseFramesFromFiles(read_file, write_file string) ([]*FrameRecorded, error) {\n\tread_records, err := loadFramesFromFile(read_file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twrite_records, err := loadFramesFromFile(write_file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar frames = []*FrameRecorded{}\n\tfor streamID, record1 := range read_records {\n\t\tif record2, exists := write_records[streamID]; exists {\n\t\t\tframes = append(frames, &FrameRecorded{Response: record1.Data, Hash: dialer.GetFrameHash(record2.Data)})\n\t\t}\n\t}\n\treturn frames, nil\n}\n\ntype FrameRecorded struct {\n\tResponse []byte\n\tHash     int64\n}\n"
  },
  {
    "path": "dialer/utils.go",
    "content": "package dialer\n\nimport (\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n\t\"github.com/gocql/gocql/internal/murmur\"\n)\n\ntype Record struct {\n\tData     []byte `json:\"data\"`\n\tStreamID int    `json:\"stream_id\"`\n}\n\ntype frameOp byte\n\nconst (\n\t// header ops\n\topError         frameOp = 0x00\n\topStartup       frameOp = 0x01\n\topReady         frameOp = 0x02\n\topAuthenticate  frameOp = 0x03\n\topOptions       frameOp = 0x05\n\topSupported     frameOp = 0x06\n\topQuery         frameOp = 0x07\n\topResult        frameOp = 0x08\n\topPrepare       frameOp = 0x09\n\topExecute       frameOp = 0x0A\n\topRegister      frameOp = 0x0B\n\topEvent         frameOp = 0x0C\n\topBatch         frameOp = 0x0D\n\topAuthChallenge frameOp = 0x0E\n\topAuthResponse  frameOp = 0x0F\n\topAuthSuccess   frameOp = 0x10\n)\n\nfunc addBytes(frame []byte, index int) int {\n\tbytesLength := int(frame[index+0])<<24 | int(frame[index+1])<<16 | int(frame[index+2])<<8 | int(frame[index+3])\n\tindex = index + 4\n\tif bytesLength > 0 {\n\t\tindex = index + bytesLength\n\t}\n\treturn index\n}\n\nfunc addQueryParams(frame []byte, index int) int {\n\t//use consistency\n\tindex = index + 2\n\n\t//use query flags\n\tvar flags byte\n\tif frame[0] > 0x04 {\n\t\tflags = frame[index+3]\n\t\tindex = index + 4\n\t} else {\n\t\tflags = frame[index]\n\t\tindex = index + 1\n\t}\n\n\tnames := false\n\n\t// protoV3 specific things\n\tif frame[0] > 0x02 {\n\t\tif flags&frm.FlagValues == frm.FlagValues && flags&frm.FlagWithNameValues == frm.FlagWithNameValues {\n\t\t\tnames = true\n\t\t}\n\t}\n\n\tif flags&frm.FlagValues == frm.FlagValues {\n\t\tvaluesLen := int(frame[index])<<8 | int(frame[index+1])\n\t\tindex = index + 2\n\n\t\tfor i := 0; i < valuesLen; i++ {\n\t\t\tif names {\n\t\t\t\tstringLenght := int(frame[index])<<8 | int(frame[index+1])\n\t\t\t\tindex = index + 2 + stringLenght\n\t\t\t}\n\n\t\t\tindex = addBytes(frame, index)\n\t\t}\n\t}\n\n\tif flags&frm.FlagPageSize == frm.FlagPageSize {\n\t\tindex = index + 4\n\t}\n\n\tif flags&frm.FlagWithPagingState == frm.FlagWithPagingState {\n\t\tindex = addBytes(frame, index)\n\t}\n\n\tif flags&frm.FlagWithSerialConsistency == frm.FlagWithSerialConsistency {\n\t\tindex = index + 2\n\t}\n\n\t// do not use timelaps and keyspace\n\treturn index\n}\n\nfunc addHeader(index int) int {\n\treturn index + 8\n}\n\nfunc addCustomPayload(frame []byte, index int, p int) int {\n\tcustomPayloadLenght := int(frame[8+p])<<8 | int(frame[9+p])\n\tif customPayloadLenght > 0 {\n\t\tindex = index + 2\n\t}\n\tfor i := 0; i < customPayloadLenght; i++ {\n\t\tstringLenght := int(frame[index])<<8 | int(frame[index+1])\n\t\tindex = index + 2 + stringLenght\n\t\tindex = addBytes(frame, index)\n\t}\n\n\treturn index\n}\n\nfunc GetFrameHash(frame []byte) int64 {\n\tvar p int\n\tif frame[0] > 0x02 {\n\t\tp = 1\n\t\tstreamID1 := frame[2]\n\t\tstreamID2 := frame[3]\n\t\tdefer func() {\n\t\t\tframe[2] = streamID1\n\t\t\tframe[3] = streamID2\n\t\t}()\n\t\tframe[2] = byte('0')\n\t\tframe[3] = byte('0')\n\t} else {\n\t\tp = 0\n\t\tstreamID1 := frame[2]\n\t\tdefer func() {\n\t\t\tframe[2] = streamID1\n\t\t}()\n\t\tframe[2] = byte('0')\n\t}\n\tswitch frame[3+p] {\n\tcase byte(opStartup):\n\t\treturn murmur.Murmur3H1(frame[:8+p])\n\tcase byte(opPrepare):\n\t\treturn murmur.Murmur3H1(frame)\n\tcase byte(opAuthResponse):\n\t\treturn murmur.Murmur3H1(frame)\n\tcase byte(opQuery):\n\t\tindex := addHeader(p)\n\t\tif frame[1]&frm.FlagCustomPayload == frm.FlagCustomPayload {\n\t\t\tindex = addCustomPayload(frame, index, p)\n\t\t}\n\t\tendIndex := index\n\t\tendIndex = addQueryParams(frame, endIndex)\n\t\treturn murmur.Murmur3H1(frame[index:endIndex])\n\tcase byte(opExecute):\n\t\tindex := addHeader(p)\n\t\tif frame[1]&frm.FlagCustomPayload == frm.FlagCustomPayload {\n\t\t\tindex = addCustomPayload(frame, index, p)\n\t\t}\n\n\t\tendIndex := index\n\n\t\tpreparedIDLen := int(frame[index])<<8 | int(frame[index+1])\n\t\tendIndex = endIndex + 2 + preparedIDLen\n\t\tif frame[0] > 0x01 {\n\t\t\tendIndex = addQueryParams(frame, endIndex)\n\t\t} else {\n\t\t\tvaluesLen := int(frame[index])<<8 | int(frame[index+1])\n\t\t\tindex = index + 2\n\t\t\tfor i := 0; i < valuesLen; i++ {\n\t\t\t\tindex = addBytes(frame, index)\n\t\t\t}\n\t\t\tindex = index + 2\n\t\t}\n\t\treturn murmur.Murmur3H1(frame[index:endIndex])\n\tcase byte(opBatch):\n\t\treturn murmur.Murmur3H1(frame)\n\tcase byte(opOptions):\n\t\treturn murmur.Murmur3H1(frame)\n\tcase byte(opRegister):\n\t\treturn murmur.Murmur3H1(frame)\n\tdefault:\n\t\treturn murmur.Murmur3H1(frame)\n\t}\n}\n"
  },
  {
    "path": "dns_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n)\n\ntype mockDNSResolver struct {\n\tlock sync.RWMutex\n\tdata map[string][]net.IP\n}\n\nfunc newMockDNSResolver() *mockDNSResolver {\n\treturn &mockDNSResolver{\n\t\tdata: make(map[string][]net.IP),\n\t\tlock: sync.RWMutex{},\n\t}\n}\n\nfunc (r *mockDNSResolver) LookupIP(host string) ([]net.IP, error) {\n\tr.lock.RLock()\n\tdefer r.lock.RUnlock()\n\tips, _ := r.data[host]\n\tif len(ips) == 0 {\n\t\treturn nil, &net.DNSError{Err: errors.New(\"no IP addresses\").Error(), Name: host}\n\t}\n\treturn ips, nil\n}\n\nfunc (r *mockDNSResolver) Update(host string, ips ...net.IP) {\n\tr.lock.Lock()\n\tr.data[host] = ips\n\tdefer r.lock.Unlock()\n}\n\nfunc (r *mockDNSResolver) Delete(hosts ...string) {\n\tr.lock.Lock()\n\tfor _, host := range hosts {\n\t\tdelete(r.data, host)\n\t}\n\tdefer r.lock.Unlock()\n}\n\nfunc MustIP(ip string) net.IP {\n\tout := net.ParseIP(ip)\n\tif out == nil {\n\t\tpanic(\"failed to parse IP: \" + ip)\n\t}\n\treturn out\n}\n\nfunc TestDNS(t *testing.T) {\n\tt.Parallel()\n\n\tcheckIfSessionWorking := func(t *testing.T, cluster *ClusterConfig, hosts []string) {\n\t\ts, err := cluster.CreateSession()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create session: %v\", err)\n\t\t}\n\t\tdefer s.Close()\n\n\t\terr = s.refreshRingNow()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to refresh ring: %v\", err)\n\t\t}\n\n\t\terr = s.Query(\"select * from system.peers\").Exec()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to execute query: %v\", err)\n\t\t}\n\t\tringHosts := s.hostSource.getHostsList()\n\t\tif len(ringHosts) != len(hosts) {\n\t\t\tt.Fatalf(\"wrong number of hosts: got %d, want %d\", len(ringHosts), len(hosts))\n\t\t}\n\t}\n\n\tOneDNSPerNode := func(c *ClusterConfig) {\n\t\tr := newMockDNSResolver()\n\t\tvar dnsRecords []string\n\t\tfor id, host := range c.Hosts {\n\t\t\tdns := fmt.Sprintf(\"node%d.cluster.local\", id+1)\n\t\t\tdnsRecords = append(dnsRecords, dns)\n\t\t\tr.Update(dns, MustIP(host))\n\t\t}\n\t\tc.DNSResolver = r\n\t\tc.Hosts = dnsRecords\n\t}\n\n\tOneDNSPerCluster := func(c *ClusterConfig) {\n\t\tr := newMockDNSResolver()\n\t\tvar hostIPs []net.IP\n\t\tfor _, host := range c.Hosts {\n\t\t\thostIPs = append(hostIPs, MustIP(host))\n\t\t}\n\t\tr.Update(\"cluster.local\", hostIPs...)\n\t\tc.DNSResolver = r\n\t\tc.Hosts = []string{\"cluster.local\"}\n\t}\n\n\tOneDNSPerClusterFirstBroken := func(c *ClusterConfig) {\n\t\tr := newMockDNSResolver()\n\t\tvar hostIPs []net.IP\n\t\tfor _, host := range c.Hosts {\n\t\t\thostIPs = append(hostIPs, MustIP(host))\n\t\t}\n\t\thostIPs[0] = MustIP(\"0.0.0.0\")\n\t\tr.Update(\"cluster.local\", hostIPs...)\n\t\tc.DNSResolver = r\n\t\tc.Hosts = []string{\"cluster.local\"}\n\t}\n\n\tWithAddressTranslator := func(c *ClusterConfig) {\n\t\tvar toAddresses []net.IP\n\t\tvar fromAddresses []net.IP\n\t\tvar clusterHosts []string\n\t\tfor _, host := range c.Hosts {\n\t\t\tip := MustIP(host)\n\n\t\t\tvar fromAddress net.IP\n\t\t\tif ip.To4().String() == ip.String() {\n\t\t\t\tip = ip.To4()\n\t\t\t\tfromAddress = net.IPv4(ip[0], ip[1], ip[2]+1, ip[3])\n\t\t\t} else {\n\t\t\t\tfromAddress = net.IP{ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7], ip[8], ip[9], ip[10], ip[11], ip[12] + 1, ip[13], ip[14], ip[15]}\n\t\t\t}\n\t\t\ttoAddresses = append(toAddresses, ip)\n\t\t\tfromAddresses = append(fromAddresses, fromAddress)\n\t\t\tclusterHosts = append(clusterHosts, fromAddress.String())\n\t\t}\n\n\t\tc.AddressTranslator = AddressTranslatorFunc(func(addr net.IP, port int) (net.IP, int) {\n\t\t\tfor id, host := range fromAddresses {\n\t\t\t\tif host.Equal(addr) {\n\t\t\t\t\treturn toAddresses[id], port\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, host := range toAddresses {\n\t\t\t\tif host.Equal(addr) {\n\t\t\t\t\treturn addr, port\n\t\t\t\t}\n\t\t\t}\n\t\t\tpanic(\"failed to translate address\")\n\t\t})\n\t\tc.Hosts = clusterHosts\n\t}\n\n\ttestCases := []struct {\n\t\tname        string\n\t\tclusterMods []func(*ClusterConfig)\n\t}{\n\t\t{\n\t\t\tname:        \"OneDNSPerNode\",\n\t\t\tclusterMods: []func(*ClusterConfig){OneDNSPerNode},\n\t\t},\n\t\t{\n\t\t\tname:        \"OneDNSPerCluster\",\n\t\t\tclusterMods: []func(*ClusterConfig){OneDNSPerCluster},\n\t\t},\n\t\t{\n\t\t\tname:        \"AddressTranslator+OneDNSPerNode\",\n\t\t\tclusterMods: []func(*ClusterConfig){WithAddressTranslator, OneDNSPerNode},\n\t\t},\n\t\t{\n\t\t\tname:        \"AddressTranslator+OneDNSPerCluster\",\n\t\t\tclusterMods: []func(*ClusterConfig){WithAddressTranslator, OneDNSPerCluster},\n\t\t},\n\t\t{\n\t\t\tname:        \"OneDNSPerClusterFirstBroken\",\n\t\t\tclusterMods: []func(*ClusterConfig){OneDNSPerClusterFirstBroken},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcluster := createCluster(tc.clusterMods...)\n\t\t\tcheckIfSessionWorking(t, cluster, getClusterHosts())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "doc.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\n// Package gocql implements a fast and robust Cassandra driver for the\n// Go programming language.\n//\n// # Connecting to the cluster\n//\n// Pass a list of initial node IP addresses to NewCluster to create a new cluster configuration:\n//\n//\tcluster := gocql.NewCluster(\"192.168.1.1\", \"192.168.1.2\", \"192.168.1.3\")\n//\n// Port can be specified as part of the address, the above is equivalent to:\n//\n//\tcluster := gocql.NewCluster(\"192.168.1.1:9042\", \"192.168.1.2:9042\", \"192.168.1.3:9042\")\n//\n// It is recommended to use the value set in the Cassandra config for broadcast_address or listen_address,\n// an IP address not a domain name. This is because events from Cassandra will use the configured IP\n// address, which is used to index connected hosts. If the domain name specified resolves to more than 1 IP address\n// then the driver may connect multiple times to the same host, and will not mark the node being down or up from events.\n//\n// Then you can customize more options (see ClusterConfig):\n//\n//\tcluster.Keyspace = \"example\"\n//\tcluster.Consistency = gocql.Quorum\n//\tcluster.ProtoVersion = protoVersion4\n//\n// The driver tries to automatically detect the protocol version to use if not set, but you might want to set the\n// protocol version explicitly, as it's not defined which version will be used in certain situations (for example\n// during upgrade of the cluster when some of the nodes support different set of protocol versions than other nodes).\n//\n// The driver advertises the module name and version in the STARTUP message, so servers are able to detect the version.\n// If you use replace directive in go.mod, the driver will send information about the replacement module instead.\n//\n// When ready, create a session from the configuration. Don't forget to Close the session once you are done with it:\n//\n//\tsession, err := cluster.CreateSession()\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tdefer session.Close()\n//\n// # Authentication\n//\n// CQL protocol uses a SASL-based authentication mechanism and so consists of an exchange of server challenges and\n// client response pairs. The details of the exchanged messages depend on the authenticator used.\n//\n// To use authentication, set ClusterConfig.Authenticator or ClusterConfig.AuthProvider.\n//\n// PasswordAuthenticator is provided to use for username/password authentication:\n//\n//\t cluster := gocql.NewCluster(\"192.168.1.1\", \"192.168.1.2\", \"192.168.1.3\")\n//\t cluster.Authenticator = gocql.PasswordAuthenticator{\n//\t\t\tUsername: \"user\",\n//\t\t\tPassword: \"password\"\n//\t }\n//\t session, err := cluster.CreateSession()\n//\t if err != nil {\n//\t \treturn err\n//\t }\n//\t defer session.Close()\n//\n// By default, PasswordAuthenticator will attempt to authenticate regardless of what implementation the server returns\n// in its AUTHENTICATE message as its authenticator, (e.g. org.apache.cassandra.auth.PasswordAuthenticator).  If you\n// wish to restrict this you may use PasswordAuthenticator.AllowedAuthenticators:\n//\n//\t cluster.Authenticator = gocql.PasswordAuthenticator {\n//\t\t\tUsername:              \"user\",\n//\t\t\tPassword:              \"password\"\n//\t\t\tAllowedAuthenticators: []string{\"org.apache.cassandra.auth.PasswordAuthenticator\"},\n//\t }\n//\n// # Transport layer security\n//\n// It is possible to secure traffic between the client and server with TLS.\n//\n// To use TLS, set the ClusterConfig.SslOpts field. SslOptions embeds *tls.Config so you can set that directly.\n// There are also helpers to load keys/certificates from files.\n//\n// Warning: Due to historical reasons, the SslOptions is insecure by default, so you need to set EnableHostVerification\n// to true if no Config is set. Most users should set SslOptions.Config to a *tls.Config.\n// SslOptions and Config.InsecureSkipVerify interact as follows:\n//\n//\tConfig.InsecureSkipVerify | EnableHostVerification | Result\n//\tConfig is nil             | false                  | do not verify host\n//\tConfig is nil             | true                   | verify host\n//\tfalse                     | false                  | verify host\n//\ttrue                      | false                  | do not verify host\n//\tfalse                     | true                   | verify host\n//\ttrue                      | true                   | verify host\n//\n// For example:\n//\n//\tcluster := gocql.NewCluster(\"192.168.1.1\", \"192.168.1.2\", \"192.168.1.3\")\n//\tcluster.SslOpts = &gocql.SslOptions{\n//\t\tEnableHostVerification: true,\n//\t}\n//\tsession, err := cluster.CreateSession()\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tdefer session.Close()\n//\n// # Data-center awareness and query routing\n//\n// To route queries to local DC first, use DCAwareRoundRobinPolicy. For example, if the datacenter you\n// want to primarily connect is called dc1 (as configured in the database):\n//\n//\tcluster := gocql.NewCluster(\"192.168.1.1\", \"192.168.1.2\", \"192.168.1.3\")\n//\tcluster.PoolConfig.HostSelectionPolicy = gocql.DCAwareRoundRobinPolicy(\"dc1\")\n//\n// The driver can route queries to nodes that hold data replicas based on partition key (preferring local DC).\n//\n//\tcluster := gocql.NewCluster(\"192.168.1.1\", \"192.168.1.2\", \"192.168.1.3\")\n//\tcluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(gocql.DCAwareRoundRobinPolicy(\"dc1\"))\n//\n// Note that TokenAwareHostPolicy can take options such as gocql.ShuffleReplicas and gocql.NonLocalReplicasFallback.\n//\n// We recommend running with a token aware host policy in production for maximum performance.\n//\n// The driver can only use token-aware routing for queries where all partition key columns are query parameters.\n// For example, instead of\n//\n//\tsession.Query(\"select value from mytable where pk1 = 'abc' AND pk2 = ?\", \"def\")\n//\n// use\n//\n//\tsession.Query(\"select value from mytable where pk1 = ? AND pk2 = ?\", \"abc\", \"def\")\n//\n// # Rack-level awareness\n//\n// The DCAwareRoundRobinPolicy can be replaced with RackAwareRoundRobinPolicy, which takes two parameters, datacenter and rack.\n//\n// Instead of dividing hosts with two tiers (local datacenter and remote datacenters) it divides hosts into three\n// (the local rack, the rest of the local datacenter, and everything else).\n//\n// For example, to route queries to a specific rack within a datacenter:\n//\n//\tcluster := gocql.NewCluster(\"192.168.1.1\", \"192.168.1.2\", \"192.168.1.3\")\n//\tcluster.PoolConfig.HostSelectionPolicy = gocql.RackAwareRoundRobinPolicy(\"dc1\", \"rack1\")\n//\n// RackAwareRoundRobinPolicy can be combined with TokenAwareHostPolicy in the same way as DCAwareRoundRobinPolicy:\n//\n//\tcluster := gocql.NewCluster(\"192.168.1.1\", \"192.168.1.2\", \"192.168.1.3\")\n//\tcluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(gocql.RackAwareRoundRobinPolicy(\"dc1\", \"rack1\"))\n//\n// # AWS-specific considerations\n//\n// When using rack-aware policies with AWS, note that Availability Zone (AZ) names like \"us-east-1a\" are not consistent\n// between different AWS accounts. The same physical AZ may have different names in different accounts.\n//\n// For consistent rack-aware routing in AWS, you should use AZ IDs instead of AZ names. AZ IDs (e.g., \"use1-az1\") are\n// consistent identifiers across AWS accounts for the same physical location.\n//\n// To configure your Cassandra or ScyllaDB nodes with AZ IDs, you can retrieve the AZ ID using AWS CLI or APIs.\n// For more information, see AWS documentation on AZ IDs: https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html\n//\n// Example configuration for AWS using AZ IDs:\n//\n//\tcluster := gocql.NewCluster(\"192.168.1.1\", \"192.168.1.2\", \"192.168.1.3\")\n//\tcluster.PoolConfig.HostSelectionPolicy = gocql.RackAwareRoundRobinPolicy(\"us-east-1\", \"use1-az1\")\n//\n// # Executing queries\n//\n// Create queries with Session.Query. Query values must not be reused between different executions and must not be\n// modified after starting execution of the query.\n//\n// To execute a query without reading results, use Query.Exec:\n//\n//\t err := session.Query(`INSERT INTO tweet (timeline, id, text) VALUES (?, ?, ?)`,\n//\t\t\t\"me\", gocql.TimeUUID(), \"hello world\").WithContext(ctx).Exec()\n//\n// Single row can be read by calling Query.Scan:\n//\n//\t err := session.Query(`SELECT id, text FROM tweet WHERE timeline = ? LIMIT 1`,\n//\t\t\t\"me\").WithContext(ctx).Consistency(gocql.One).Scan(&id, &text)\n//\n// Multiple rows can be read using Iter.Scanner:\n//\n//\t scanner := session.Query(`SELECT id, text FROM tweet WHERE timeline = ?`,\n//\t \t\"me\").WithContext(ctx).Iter().Scanner()\n//\t for scanner.Next() {\n//\t \tvar (\n//\t \t\tid gocql.UUID\n//\t\t\ttext string\n//\t \t)\n//\t \terr = scanner.Scan(&id, &text)\n//\t \tif err != nil {\n//\t \t\tlog.Fatal(err)\n//\t \t}\n//\t \tfmt.Println(\"Tweet:\", id, text)\n//\t }\n//\t // scanner.Err() closes the iterator, so scanner nor iter should be used afterwards.\n//\t if err := scanner.Err(); err != nil {\n//\t \tlog.Fatal(err)\n//\t }\n//\n// See Example for complete example.\n//\n// # Prepared statements\n//\n// The driver automatically prepares DML queries (SELECT/INSERT/UPDATE/DELETE/BATCH statements) and maintains a cache\n// of prepared statements.\n// CQL protocol does not support preparing other query types.\n//\n// When using CQL protocol >= 4, it is possible to use gocql.UnsetValue as the bound value of a column.\n// This will cause the database to ignore writing the column.\n// The main advantage is the ability to keep the same prepared statement even when you don't\n// want to update some fields, where before you needed to make another prepared statement.\n//\n// # Executing multiple queries concurrently\n//\n// Session is safe to use from multiple goroutines, so to execute multiple concurrent queries, just execute them\n// from several worker goroutines. Gocql provides synchronously-looking API (as recommended for Go APIs) and the queries\n// are executed asynchronously at the protocol level.\n//\n//\tresults := make(chan error, 2)\n//\tgo func() {\n//\t\tresults <- session.Query(`INSERT INTO tweet (timeline, id, text) VALUES (?, ?, ?)`,\n//\t\t\t\"me\", gocql.TimeUUID(), \"hello world 1\").Exec()\n//\t}()\n//\tgo func() {\n//\t\tresults <- session.Query(`INSERT INTO tweet (timeline, id, text) VALUES (?, ?, ?)`,\n//\t\t\t\"me\", gocql.TimeUUID(), \"hello world 2\").Exec()\n//\t}()\n//\n// # Nulls\n//\n// Null values are are unmarshalled as zero value of the type. If you need to distinguish for example between text\n// column being null and empty string, you can unmarshal into *string variable instead of string.\n//\n//\tvar text *string\n//\terr := scanner.Scan(&text)\n//\tif err != nil {\n//\t\t// handle error\n//\t}\n//\tif text != nil {\n//\t\t// not null\n//\t}\n//\telse {\n//\t\t// null\n//\t}\n//\n// See Example_nulls for full example.\n//\n// # Reusing slices\n//\n// The driver reuses backing memory of slices when unmarshalling. This is an optimization so that a buffer does not\n// need to be allocated for every processed row. However, you need to be careful when storing the slices to other\n// memory structures.\n//\n//\tscanner := session.Query(`SELECT myints FROM table WHERE pk = ?`, \"key\").WithContext(ctx).Iter().Scanner()\n//\tvar myInts []int\n//\tfor scanner.Next() {\n//\t\t// This scan reuses backing store of myInts for each row.\n//\t\terr = scanner.Scan(&myInts)\n//\t\tif err != nil {\n//\t\t\tlog.Fatal(err)\n//\t\t}\n//\t}\n//\n// When you want to save the data for later use, pass a new slice every time. A common pattern is to declare the\n// slice variable within the scanner loop:\n//\n//\tscanner := session.Query(`SELECT myints FROM table WHERE pk = ?`, \"key\").WithContext(ctx).Iter().Scanner()\n//\tfor scanner.Next() {\n//\t\tvar myInts []int\n//\t\t// This scan always gets pointer to fresh myInts slice, so does not reuse memory.\n//\t\terr = scanner.Scan(&myInts)\n//\t\tif err != nil {\n//\t\t\tlog.Fatal(err)\n//\t\t}\n//\t}\n//\n// # Paging\n//\n// The driver supports paging of results with automatic prefetch, see ClusterConfig.PageSize, Session.SetPrefetch,\n// Query.PageSize, and Query.Prefetch.\n//\n// It is also possible to control the paging manually with Query.PageState (this disables automatic prefetch).\n// Manual paging is useful if you want to store the page state externally, for example in a URL to allow users\n// browse pages in a result. You might want to sign/encrypt the paging state when exposing it externally since\n// it contains data from primary keys.\n//\n// Paging state is specific to the CQL protocol version and the exact query used. It is meant as opaque state that\n// should not be modified. If you send paging state from different query or protocol version, then the behaviour\n// is not defined (you might get unexpected results or an error from the server). For example, do not send paging state\n// returned by node using protocol version 3 to a node using protocol version 4. Also, when using protocol version 4,\n// paging state between Cassandra 2.2 and 3.0 is incompatible (https://issues.apache.org/jira/browse/CASSANDRA-10880).\n//\n// The driver does not check whether the paging state is from the same protocol version/statement.\n// You might want to validate yourself as this could be a problem if you store paging state externally.\n// For example, if you store paging state in a URL, the URLs might become broken when you upgrade your cluster.\n//\n// Call Query.PageState(nil) to fetch just the first page of the query results. Pass the page state returned by\n// Iter.PageState to Query.PageState of a subsequent query to get the next page. If the length of slice returned\n// by Iter.PageState is zero, there are no more pages available (or an error occurred).\n//\n// Using too low values of PageSize will negatively affect performance, a value below 100 is probably too low.\n// While Cassandra returns exactly PageSize items (except for last page) in a page currently, the protocol authors\n// explicitly reserved the right to return smaller or larger amount of items in a page for performance reasons, so don't\n// rely on the page having the exact count of items.\n//\n// See Example_paging for an example of manual paging.\n//\n// # Dynamic list of columns\n//\n// There are certain situations when you don't know the list of columns in advance, mainly when the query is supplied\n// by the user. Iter.Columns, Iter.RowData, Iter.MapScan and Iter.SliceMap can be used to handle this case.\n//\n// See Example_dynamicColumns.\n//\n// # Batches\n//\n// The CQL protocol supports sending batches of DML statements (INSERT/UPDATE/DELETE) and so does gocql.\n// Use Session.Batch to create a new batch and then fill-in details of individual queries.\n// Then execute the batch with Session.ExecuteBatch.\n//\n// Logged batches ensure atomicity, either all or none of the operations in the batch will succeed, but they have\n// overhead to ensure this property.\n// Unlogged batches don't have the overhead of logged batches, but don't guarantee atomicity.\n// Updates of counters are handled specially by Cassandra so batches of counter updates have to use CounterBatch type.\n// A counter batch can only contain statements to update counters.\n//\n// For unlogged batches it is recommended to send only single-partition batches (i.e. all statements in the batch should\n// involve only a single partition).\n// Multi-partition batch needs to be split by the coordinator node and re-sent to\n// correct nodes.\n// With single-partition batches you can send the batch directly to the node for the partition without incurring the\n// additional network hop.\n//\n// It is also possible to pass entire BEGIN BATCH .. APPLY BATCH statement to Query.Exec.\n// There are differences how those are executed.\n// BEGIN BATCH statement passed to Query.Exec is prepared as a whole in a single statement.\n// Session.ExecuteBatch prepares individual statements in the batch.\n// If you have variable-length batches using the same statement, using Session.ExecuteBatch is more efficient.\n//\n// See Example_batch for an example.\n//\n// # Lightweight transactions\n//\n// Query.ScanCAS or Query.MapScanCAS can be used to execute a single-statement lightweight transaction (an\n// INSERT/UPDATE .. IF statement) and reading its result. See example for Query.MapScanCAS.\n//\n// Multiple-statement lightweight transactions can be executed as a logged batch that contains at least one conditional\n// statement. All the conditions must return true for the batch to be applied. You can use Session.ExecuteBatchCAS and\n// Session.MapExecuteBatchCAS when executing the batch to learn about the result of the LWT. See example for\n// Session.MapExecuteBatchCAS.\n//\n// # Retries and speculative execution\n//\n// Queries can be marked as idempotent. Marking the query as idempotent tells the driver that the query can be executed\n// multiple times without affecting its result. Non-idempotent queries are not eligible for retrying nor speculative\n// execution.\n//\n// Idempotent queries are retried in case of errors based on the configured RetryPolicy.\n// If the query is LWT and the configured RetryPolicy additionally implements LWTRetryPolicy\n// interface, then the policy will be cast to LWTRetryPolicy and used this way.\n//\n// Queries can be retried even before they fail by setting a SpeculativeExecutionPolicy. The policy can\n// cause the driver to retry on a different node if the query is taking longer than a specified delay even before the\n// driver receives an error or timeout from the server. When a query is speculatively executed, the original execution\n// is still executing. The two parallel executions of the query race to return a result, the first received result will\n// be returned.\n//\n// # User-defined types\n//\n// UDTs can be mapped (un)marshaled from/to map[string]any a Go struct (or a type implementing\n// UDTUnmarshaler, UDTMarshaler, Unmarshaler or Marshaler interfaces).\n//\n// For structs, cql tag can be used to specify the CQL field name to be mapped to a struct field:\n//\n//\ttype MyUDT struct {\n//\t\tFieldA int32 `cql:\"a\"`\n//\t\tFieldB string `cql:\"b\"`\n//\t}\n//\n// See Example_userDefinedTypesMap, Example_userDefinedTypesStruct, ExampleUDTMarshaler, ExampleUDTUnmarshaler.\n//\n// # Metrics and tracing\n//\n// It is possible to provide observer implementations that could be used to gather metrics:\n//\n//   - QueryObserver for monitoring individual queries.\n//   - BatchObserver for monitoring batch queries.\n//   - ConnectObserver for monitoring new connections from the driver to the database.\n//   - FrameHeaderObserver for monitoring individual protocol frames.\n//\n// CQL protocol also supports tracing of queries. When enabled, the database will write information about\n// internal events that happened during execution of the query. You can use Query.Trace to request tracing and receive\n// the session ID that the database used to store the trace information in system_traces.sessions and\n// system_traces.events tables. NewTraceWriter returns an implementation of Tracer that writes the events to a writer.\n// Gathering trace information might be essential for debugging and optimizing queries, but writing traces has overhead,\n// so this feature should not be used on production systems with very high load unless you know what you are doing.\n// There is also a new implementation of Tracer - TracerEnhanced, that is intended to be more reliable and convinient to use.\n// It has a funcionality to check if trace is ready to be extracted and only actually gets it if requested which makes\n// the impact on a performance smaller.\npackage gocql // import \"github.com/gocql/gocql\"\n"
  },
  {
    "path": "docs/Makefile",
    "content": "SHELL=bash\n# Global variables\n# You can set these variables from the command line.\nUV            = uv\nSPHINXOPTS    = -j auto\nSPHINXBUILD   = $(UV) run sphinx-build\nPAPER         =\nBUILDDIR      = _build\nSOURCEDIR     = source\n\n# Internal variables\nPAPEROPT_a4     = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR)\nTESTSPHINXOPTS  = $(ALLSPHINXOPTS) -W --keep-going\n\n.PHONY: all\nall: dirhtml\n\n# Setup commands\n.PHONY: setupenv\nsetupenv:\n\t@while IFS= read -r line; do \\\n      if [[ \"$${line}\" =~ ^[[:space:]]*requires[[:space:]]*= ]]; then\\\n        content=\"$${line#*=}\";\\\n        content=\"$${content//[[:space:]]/}\";\\\n        content=\"$${content#[}\";\\\n        content=\"$${content%]}\";\\\n        IFS=',' read -ra items <<< \"$$content\";\\\n        for item in \"$${items[@]}\"; do\\\n          item=\"$${item%\\\"}\";\\\n          item=\"$${item#\\\"}\";\\\n          pip install -q uv;\\\n        done;\\\n      fi;\\\n    done < pyproject.toml\n\n.PHONY: setup\nsetup:\n\t$(UV) sync\n\n.PHONY: update\nupdate:\n\t$(UV) sync --upgrade\n\n# Clean commands\n.PHONY: pristine\npristine: clean\n\tgit clean -dfX\n\n.PHONY: clean\nclean:\n\trm -rf $(BUILDDIR)/*\n\n# Generate output commands\n.PHONY: dirhtml\ndirhtml: setup\n\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\n.PHONY: singlehtml\nsinglehtml: setup\n\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml\n\t@echo\n\t@echo \"Build finished. The HTML page is in $(BUILDDIR)/singlehtml.\"\n\n.PHONY: epub\nepub: setup\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in $(BUILDDIR)/epub.\"\n\n.PHONY: epub3\nepub3: setup\n\t$(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3\n\t@echo\n\t@echo \"Build finished. The epub3 file is in $(BUILDDIR)/epub3.\"\n\n.PHONY: multiversion\nmultiversion: setup\n\t$(UV) run sphinx-multiversion source $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\n.PHONY: redirects\nredirects: setup\n\t$(UV) run redirects-cli fromfile --yaml-file _utils/redirects.yaml --output-dir $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\n# Preview commands\n.PHONY: preview\npreview: setup\n\t$(UV) run sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --port 5500\n\n.PHONY: multiversionpreview\nmultiversionpreview: multiversion\n\t$(UV) run python -m http.server 5500 --directory $(BUILDDIR)/dirhtml\n\n# Test commands\n.PHONY: test\ntest: setup\n\t$(SPHINXBUILD) -b dirhtml $(TESTSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\n.PHONY: linkcheck\nlinkcheck: setup\n\t$(SPHINXBUILD) -b linkcheck $(SOURCEDIR) $(BUILDDIR)/linkcheck\n"
  },
  {
    "path": "docs/_utils/redirects.yaml",
    "content": "### a dictionary of redirects\n#old path: new path\n#\n\n# removing redirection html script files\n# test: /\n\n# /stable/test-redirect.html: /stable/index.html\n"
  },
  {
    "path": "docs/pyproject.toml",
    "content": "[project]\nname = \"sphinx-docs\"\ndescription = \"ScyllaDB Documentation\"\nversion = \"0.1.0\"\nrequires-python = \">=3.11\"\ndependencies = [\n    \"pygments>=2.19.2\",\n    \"sphinx-scylladb-theme>=1.9.1\",\n    \"myst-parser>=5.0.0\",\n    \"sphinx-autobuild>=2025.4.8\",\n    \"sphinx>=9.0\",\n    \"sphinx-multiversion-scylla>=0.3.7\",\n    \"sphinx-sitemap>=2.9.0\",\n    \"redirects_cli>=0.1.3\",\n]"
  },
  {
    "path": "docs/source/conf.py",
    "content": "# -*- coding: utf-8 -*-\nimport warnings\nfrom datetime import date\n\nfrom sphinx_scylladb_theme.utils import multiversion_regex_builder\n\n# -- Global variables\n\n# Builds documentation for the following tags and branches.\nTAGS = []\nBRANCHES = [\n    \"master\",\n]\n# Sets the latest version.\nLATEST_VERSION = \"master\"\n# Set which versions are not released yet.\nUNSTABLE_VERSIONS = []\n# Set which versions are deprecated\nDEPRECATED_VERSIONS = [\"\"]\n# Sets custom build.\nFLAGS = []\n\n# -- General configuration ------------------------------------------\n\n# Add any Sphinx extension module names here, as strings.\nextensions = [\n    \"sphinx.ext.autodoc\",\n    \"sphinx.ext.todo\",\n    \"sphinx.ext.mathjax\",\n    \"sphinx.ext.githubpages\",\n    \"sphinx.ext.extlinks\",\n    \"sphinx_sitemap\",\n    \"sphinx_scylladb_theme\",\n    \"sphinx_multiversion\",  # optional\n    \"myst_parser\",  # optional\n]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"ScyllaDB gocql driver\"\ncopyright = str(date.today().year) + \" ScyllaDB\"\nauthor = \"ScyllaDB Project Contributors\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**/_partials\", \".venv\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# List of substitutions\nrst_prolog = \"\"\"\n.. |rst| replace:: restructuredText\n\"\"\"\n\n# -- Options for myst parser ----------------------------------------\nmyst_enable_extensions = [\"colon_fence\"]\n\n# -- Options for not found extension --------------------------------\n\n# Template used to render the 404.html generated by this extension.\nnotfound_template = \"404.html\"\n\n# Prefix added to all the URLs generated in the 404 page.\nnotfound_urls_prefix = \"\"\n\n# -- Options for sitemap extension ----------------------------------\n\nsitemap_url_scheme = \"/stable/{link}\"\n\n# -- Options for multiversion extension -----------------------------\n\n# Whitelist pattern for tags\nsmv_tag_whitelist = multiversion_regex_builder(TAGS)\n# Whitelist pattern for branches\nsmv_branch_whitelist = multiversion_regex_builder(BRANCHES)\n# Defines which version is considered to be the latest stable version.\nsmv_latest_version = LATEST_VERSION\n# Defines the new name for the latest version.\nsmv_rename_latest_version = \"stable\"\n# Whitelist pattern for remotes (set to None to use local branches only)\nsmv_remote_whitelist = r\"^origin$\"\n# Pattern for released versions\nsmv_released_pattern = r\"^tags/.*$\"\n# Format for versioned output directories inside the build directory\nsmv_outputdir_format = \"{ref.name}\"\n# -- Options for HTML output ----------------------------------------\n\n# The theme to use for pages.\nhtml_theme = \"sphinx_scylladb_theme\"\n# html_theme_path = [\"../..\"]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for the theme, see the\n# documentation.\nhtml_theme_options = {\n    \"conf_py_path\": \"docs/source/\",\n    \"hide_edit_this_page_button\": \"false\",\n    \"hide_feedback_buttons\": \"false\",\n    \"github_issues_repository\": \"scylladb/gocql\",\n    \"github_repository\": \"scylladb/gocql\",\n    \"site_description\": \"ScyllaDB gocql driver.\",\n    \"hide_version_dropdown\": [],\n    \"zendesk_tag\": \"gq6ltsh3nfex3cnwfy4aj9\",\n    \"versions_unstable\": UNSTABLE_VERSIONS,\n    \"versions_deprecated\": DEPRECATED_VERSIONS,\n}\n\n# Last updated format\nhtml_last_updated_fmt = \"%d %b %Y\"\n\n# Custom sidebar templates, maps document names to template names.\nhtml_sidebars = {\"**\": [\"side-nav.html\"]}\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ScyllaDocumentationdoc\"\n\n# URL which points to the root of the HTML documentation.\nhtml_baseurl = \"https://gocql-driver.docs.scylladb.com\"\n\n# Dictionary of values to pass into the template engine’s context for all pages\nhtml_context = {\"html_baseurl\": html_baseurl}\n\n# -- Initialize Sphinx ----------------------------------------------\n\n\ndef setup(sphinx):\n    warnings.filterwarnings(\n        action=\"ignore\",\n        category=UserWarning,\n        message=r\".*Container node skipped.*\",\n    )\n"
  },
  {
    "path": "docs/source/index.rst",
    "content": "=====================\nScyllaDB gocql driver\n=====================\n\nLorem ipsum.\n\n.. toctree::\n\n    sample-page"
  },
  {
    "path": "docs/source/sample-page.rst",
    "content": "===========\nSample page\n===========\n\nLorem ipsum."
  },
  {
    "path": "errors.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\n// See CQL Binary Protocol v5, section 8 for more details.\n// https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec\nconst (\n\t// ErrCodeServer indicates unexpected error on server-side.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1246-L1247\n\tErrCodeServer = 0x0000\n\t// ErrCodeProtocol indicates a protocol violation by some client message.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1248-L1250\n\tErrCodeProtocol = 0x000A\n\t// ErrCodeCredentials indicates missing required authentication.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1251-L1254\n\tErrCodeCredentials = 0x0100\n\t// ErrCodeUnavailable indicates unavailable error.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1255-L1265\n\tErrCodeUnavailable = 0x1000\n\t// ErrCodeOverloaded returned in case of request on overloaded node coordinator.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1266-L1267\n\tErrCodeOverloaded = 0x1001\n\t// ErrCodeBootstrapping returned from the coordinator node in bootstrapping phase.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1268-L1269\n\tErrCodeBootstrapping = 0x1002\n\t// ErrCodeTruncate indicates truncation exception.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1270\n\tErrCodeTruncate = 0x1003\n\t// ErrCodeWriteTimeout returned in case of timeout during the request write.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1271-L1304\n\tErrCodeWriteTimeout = 0x1100\n\t// ErrCodeReadTimeout returned in case of timeout during the request read.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1305-L1321\n\tErrCodeReadTimeout = 0x1200\n\t// ErrCodeReadFailure indicates request read error which is not covered by ErrCodeReadTimeout.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1322-L1340\n\tErrCodeReadFailure = 0x1300\n\t// ErrCodeFunctionFailure indicates an error in user-defined function.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1341-L1347\n\tErrCodeFunctionFailure = 0x1400\n\t// ErrCodeWriteFailure indicates request write error which is not covered by ErrCodeWriteTimeout.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1348-L1385\n\tErrCodeWriteFailure = 0x1500\n\t// ErrCodeCDCWriteFailure is defined, but not yet documented in CQLv5 protocol.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1386\n\tErrCodeCDCWriteFailure = 0x1600\n\t// ErrCodeCASWriteUnknown indicates only partially completed CAS operation.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1387-L1397\n\tErrCodeCASWriteUnknown = 0x1700\n\t// ErrCodeSyntax indicates the syntax error in the query.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1399\n\tErrCodeSyntax = 0x2000\n\t// ErrCodeUnauthorized indicates access rights violation by user on performed operation.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1400-L1401\n\tErrCodeUnauthorized = 0x2100\n\t// ErrCodeInvalid indicates invalid query error which is not covered by ErrCodeSyntax.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1402\n\tErrCodeInvalid = 0x2200\n\t// ErrCodeConfig indicates the configuration error.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1403\n\tErrCodeConfig = 0x2300\n\t// ErrCodeAlreadyExists is returned for the requests creating the existing keyspace/table.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1404-L1413\n\tErrCodeAlreadyExists = 0x2400\n\t// ErrCodeUnprepared returned from the host for prepared statement which is unknown.\n\t//\n\t// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1414-L1417\n\tErrCodeUnprepared = 0x2500\n)\n\ntype RequestError interface {\n\tCode() int\n\tMessage() string\n\tError() string\n}\n\ntype RequestErrUnavailable struct {\n\tfrm.ErrorFrame\n\tConsistency Consistency\n\tRequired    int\n\tAlive       int\n}\n\nfunc (e *RequestErrUnavailable) String() string {\n\treturn fmt.Sprintf(\"[request_error_unavailable consistency=%s required=%d alive=%d]\", e.Consistency, e.Required, e.Alive)\n}\n\ntype ErrorMap map[string]uint16\n\ntype RequestErrWriteTimeout struct {\n\tWriteType string\n\tfrm.ErrorFrame\n\tReceived    int\n\tBlockFor    int\n\tConsistency Consistency\n}\n\ntype RequestErrWriteFailure struct {\n\tErrorMap  ErrorMap\n\tWriteType string\n\tfrm.ErrorFrame\n\tReceived    int\n\tBlockFor    int\n\tNumFailures int\n\tConsistency Consistency\n}\n\ntype RequestErrCDCWriteFailure struct {\n\tfrm.ErrorFrame\n}\n\ntype RequestErrReadTimeout struct {\n\tfrm.ErrorFrame\n\tReceived    int\n\tBlockFor    int\n\tConsistency Consistency\n\tDataPresent byte\n}\n\ntype RequestErrAlreadyExists struct {\n\tKeyspace string\n\tTable    string\n\tfrm.ErrorFrame\n}\n\ntype RequestErrUnprepared struct {\n\tStatementId []byte\n\tfrm.ErrorFrame\n}\n\ntype RequestErrReadFailure struct {\n\tErrorMap ErrorMap\n\tfrm.ErrorFrame\n\tReceived    int\n\tBlockFor    int\n\tNumFailures int\n\tConsistency Consistency\n\tDataPresent bool\n}\n\ntype RequestErrFunctionFailure struct {\n\tKeyspace string\n\tFunction string\n\tArgTypes []string\n\tfrm.ErrorFrame\n}\n\n// RequestErrCASWriteUnknown is distinct error for ErrCodeCasWriteUnknown.\n//\n// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1387-L1397\ntype RequestErrCASWriteUnknown struct {\n\tfrm.ErrorFrame\n\tConsistency Consistency\n\tReceived    int\n\tBlockFor    int\n}\n\ntype UnknownServerError struct {\n\tfrm.ErrorFrame\n}\n\ntype OpType uint8\n\nconst (\n\tOpTypeRead  OpType = 0\n\tOpTypeWrite OpType = 1\n)\n\ntype RequestErrRateLimitReached struct {\n\tfrm.ErrorFrame\n\tOpType                OpType\n\tRejectedByCoordinator bool\n}\n\nfunc (e *RequestErrRateLimitReached) String() string {\n\tvar opType string\n\tif e.OpType == OpTypeRead {\n\t\topType = \"Read\"\n\t} else if e.OpType == OpTypeWrite {\n\t\topType = \"Write\"\n\t} else {\n\t\topType = \"Other\"\n\t}\n\treturn fmt.Sprintf(\"[request_error_rate_limit_reached OpType=%s RejectedByCoordinator=%t]\", opType, e.RejectedByCoordinator)\n}\n"
  },
  {
    "path": "errors_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestErrorsParse(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key)`, table)); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key)`, table)); err == nil {\n\t\tt.Fatal(\"Should have gotten already exists error from cassandra server.\")\n\t} else {\n\t\te := &RequestErrAlreadyExists{}\n\t\tif errors.As(err, &e) {\n\t\t\tif e.Table != table {\n\t\t\t\tt.Fatalf(\"expected error table to be %q but was %q\", table, e.Table)\n\t\t\t}\n\t\t} else {\n\t\t\tt.Fatalf(\"expected to get RequestErrAlreadyExists instead got %T\", e)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "events/event_converter.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage events\n\nimport (\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\n// FrameToEvent converts an internal frame to a public Event interface.\n// This function has access to internal frame types and can perform\n// type-safe conversions.\n// Returns nil if the frame is not an event frame.\nfunc FrameToEvent(f any) Event {\n\tif f == nil {\n\t\treturn nil\n\t}\n\n\tswitch frame := f.(type) {\n\tcase *frm.TopologyChangeEventFrame:\n\t\treturn &TopologyChangeEvent{\n\t\t\tChange: frame.Change,\n\t\t\tHost:   frame.Host,\n\t\t\tPort:   frame.Port,\n\t\t}\n\n\tcase *frm.StatusChangeEventFrame:\n\t\treturn &StatusChangeEvent{\n\t\t\tChange: frame.Change,\n\t\t\tHost:   frame.Host,\n\t\t\tPort:   frame.Port,\n\t\t}\n\n\tcase *frm.SchemaChangeKeyspace:\n\t\treturn &SchemaChangeKeyspaceEvent{\n\t\t\tChange:   frame.Change,\n\t\t\tKeyspace: frame.Keyspace,\n\t\t}\n\n\tcase *frm.SchemaChangeTable:\n\t\treturn &SchemaChangeTableEvent{\n\t\t\tChange:   frame.Change,\n\t\t\tKeyspace: frame.Keyspace,\n\t\t\tTable:    frame.Object,\n\t\t}\n\n\tcase *frm.SchemaChangeType:\n\t\treturn &SchemaChangeTypeEvent{\n\t\t\tChange:   frame.Change,\n\t\t\tKeyspace: frame.Keyspace,\n\t\t\tTypeName: frame.Object,\n\t\t}\n\n\tcase *frm.SchemaChangeFunction:\n\t\treturn &SchemaChangeFunctionEvent{\n\t\t\tChange:    frame.Change,\n\t\t\tKeyspace:  frame.Keyspace,\n\t\t\tFunction:  frame.Name,\n\t\t\tArguments: frame.Args,\n\t\t}\n\n\tcase *frm.SchemaChangeAggregate:\n\t\treturn &SchemaChangeAggregateEvent{\n\t\t\tChange:    frame.Change,\n\t\t\tKeyspace:  frame.Keyspace,\n\t\t\tAggregate: frame.Name,\n\t\t\tArguments: frame.Args,\n\t\t}\n\tcase *frm.ClientRoutesChanged:\n\t\treturn &ClientRoutesChangedEvent{\n\t\t\tChangeType:    frame.ChangeType,\n\t\t\tConnectionIDs: frame.ConnectionIDs,\n\t\t\tHostIDs:       frame.HostIDs,\n\t\t}\n\tdefault:\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "events/event_converter_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage events_test\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql/events\"\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\nfunc TestFrameToEvent_TopologyChange(t *testing.T) {\n\tframe := &frm.TopologyChangeEventFrame{\n\t\tChange: \"NEW_NODE\",\n\t\tHost:   net.ParseIP(\"192.168.1.1\"),\n\t\tPort:   9042,\n\t}\n\n\tevent := events.FrameToEvent(frame)\n\tif event == nil {\n\t\tt.Fatal(\"FrameToEvent returned nil\")\n\t}\n\n\ttopologyEvent, ok := event.(*events.TopologyChangeEvent)\n\tif !ok {\n\t\tt.Fatalf(\"Expected *TopologyChangeEvent, got %T\", event)\n\t}\n\n\tif topologyEvent.Change != \"NEW_NODE\" {\n\t\tt.Errorf(\"Change = %v, want NEW_NODE\", topologyEvent.Change)\n\t}\n\tif !topologyEvent.Host.Equal(net.ParseIP(\"192.168.1.1\")) {\n\t\tt.Errorf(\"Host = %v, want 192.168.1.1\", topologyEvent.Host)\n\t}\n\tif topologyEvent.Port != 9042 {\n\t\tt.Errorf(\"Port = %v, want 9042\", topologyEvent.Port)\n\t}\n\tif topologyEvent.Type() != events.ClusterEventTypeTopologyChange {\n\t\tt.Errorf(\"Type() = %v, want EventTypeTopologyChange\", topologyEvent.Type())\n\t}\n}\n\nfunc TestFrameToEvent_StatusChange(t *testing.T) {\n\tframe := &frm.StatusChangeEventFrame{\n\t\tChange: \"UP\",\n\t\tHost:   net.ParseIP(\"192.168.1.2\"),\n\t\tPort:   9042,\n\t}\n\n\tevent := events.FrameToEvent(frame)\n\tif event == nil {\n\t\tt.Fatal(\"FrameToEvent returned nil\")\n\t}\n\n\tstatusEvent, ok := event.(*events.StatusChangeEvent)\n\tif !ok {\n\t\tt.Fatalf(\"Expected *StatusChangeEvent, got %T\", event)\n\t}\n\n\tif statusEvent.Change != \"UP\" {\n\t\tt.Errorf(\"Change = %v, want UP\", statusEvent.Change)\n\t}\n\tif !statusEvent.Host.Equal(net.ParseIP(\"192.168.1.2\")) {\n\t\tt.Errorf(\"Host = %v, want 192.168.1.2\", statusEvent.Host)\n\t}\n\tif statusEvent.Port != 9042 {\n\t\tt.Errorf(\"Port = %v, want 9042\", statusEvent.Port)\n\t}\n\tif statusEvent.Type() != events.ClusterEventTypeStatusChange {\n\t\tt.Errorf(\"Type() = %v, want EventTypeStatusChange\", statusEvent.Type())\n\t}\n}\n\nfunc TestFrameToEvent_SchemaChangeKeyspace(t *testing.T) {\n\tframe := &frm.SchemaChangeKeyspace{\n\t\tChange:   \"CREATED\",\n\t\tKeyspace: \"test_keyspace\",\n\t}\n\n\tevent := events.FrameToEvent(frame)\n\tif event == nil {\n\t\tt.Fatal(\"FrameToEvent returned nil\")\n\t}\n\n\tschemaEvent, ok := event.(*events.SchemaChangeKeyspaceEvent)\n\tif !ok {\n\t\tt.Fatalf(\"Expected *SchemaChangeKeyspaceEvent, got %T\", event)\n\t}\n\n\tif schemaEvent.Change != \"CREATED\" {\n\t\tt.Errorf(\"Change = %v, want CREATED\", schemaEvent.Change)\n\t}\n\tif schemaEvent.Keyspace != \"test_keyspace\" {\n\t\tt.Errorf(\"Keyspace = %v, want test_keyspace\", schemaEvent.Keyspace)\n\t}\n\tif schemaEvent.Type() != events.ClusterEventTypeSchemaChangeKeyspace {\n\t\tt.Errorf(\"Type() = %v, want EventTypeSchemaChangeKeyspace\", schemaEvent.Type())\n\t}\n}\n\nfunc TestFrameToEvent_SchemaChangeTable(t *testing.T) {\n\tframe := &frm.SchemaChangeTable{\n\t\tChange:   \"UPDATED\",\n\t\tKeyspace: \"test_keyspace\",\n\t\tObject:   \"test_table\",\n\t}\n\n\tevent := events.FrameToEvent(frame)\n\tif event == nil {\n\t\tt.Fatal(\"FrameToEvent returned nil\")\n\t}\n\n\tschemaEvent, ok := event.(*events.SchemaChangeTableEvent)\n\tif !ok {\n\t\tt.Fatalf(\"Expected *SchemaChangeTableEvent, got %T\", event)\n\t}\n\n\tif schemaEvent.Change != \"UPDATED\" {\n\t\tt.Errorf(\"Change = %v, want UPDATED\", schemaEvent.Change)\n\t}\n\tif schemaEvent.Keyspace != \"test_keyspace\" {\n\t\tt.Errorf(\"Keyspace = %v, want test_keyspace\", schemaEvent.Keyspace)\n\t}\n\tif schemaEvent.Table != \"test_table\" {\n\t\tt.Errorf(\"Table = %v, want test_table\", schemaEvent.Table)\n\t}\n\tif schemaEvent.Type() != events.ClusterEventTypeSchemaChangeTable {\n\t\tt.Errorf(\"Type() = %v, want EventTypeSchemaChangeTable\", schemaEvent.Type())\n\t}\n}\n\nfunc TestFrameToEvent_SchemaChangeType(t *testing.T) {\n\tframe := &frm.SchemaChangeType{\n\t\tChange:   \"DROPPED\",\n\t\tKeyspace: \"test_keyspace\",\n\t\tObject:   \"test_type\",\n\t}\n\n\tevent := events.FrameToEvent(frame)\n\tif event == nil {\n\t\tt.Fatal(\"FrameToEvent returned nil\")\n\t}\n\n\tschemaEvent, ok := event.(*events.SchemaChangeTypeEvent)\n\tif !ok {\n\t\tt.Fatalf(\"Expected *SchemaChangeTypeEvent, got %T\", event)\n\t}\n\n\tif schemaEvent.Change != \"DROPPED\" {\n\t\tt.Errorf(\"Change = %v, want DROPPED\", schemaEvent.Change)\n\t}\n\tif schemaEvent.Keyspace != \"test_keyspace\" {\n\t\tt.Errorf(\"Keyspace = %v, want test_keyspace\", schemaEvent.Keyspace)\n\t}\n\tif schemaEvent.TypeName != \"test_type\" {\n\t\tt.Errorf(\"TypeName = %v, want test_type\", schemaEvent.TypeName)\n\t}\n\tif schemaEvent.Type() != events.ClusterEventTypeSchemaChangeType {\n\t\tt.Errorf(\"Type() = %v, want EventTypeSchemaChangeType\", schemaEvent.Type())\n\t}\n}\n\nfunc TestFrameToEvent_SchemaChangeFunction(t *testing.T) {\n\tframe := &frm.SchemaChangeFunction{\n\t\tChange:   \"CREATED\",\n\t\tKeyspace: \"test_keyspace\",\n\t\tName:     \"test_function\",\n\t\tArgs:     []string{\"int\", \"text\"},\n\t}\n\n\tevent := events.FrameToEvent(frame)\n\tif event == nil {\n\t\tt.Fatal(\"FrameToEvent returned nil\")\n\t}\n\n\tschemaEvent, ok := event.(*events.SchemaChangeFunctionEvent)\n\tif !ok {\n\t\tt.Fatalf(\"Expected *SchemaChangeFunctionEvent, got %T\", event)\n\t}\n\n\tif schemaEvent.Change != \"CREATED\" {\n\t\tt.Errorf(\"Change = %v, want CREATED\", schemaEvent.Change)\n\t}\n\tif schemaEvent.Keyspace != \"test_keyspace\" {\n\t\tt.Errorf(\"Keyspace = %v, want test_keyspace\", schemaEvent.Keyspace)\n\t}\n\tif schemaEvent.Function != \"test_function\" {\n\t\tt.Errorf(\"Function = %v, want test_function\", schemaEvent.Function)\n\t}\n\tif len(schemaEvent.Arguments) != 2 {\n\t\tt.Errorf(\"len(Arguments) = %v, want 2\", len(schemaEvent.Arguments))\n\t}\n\tif schemaEvent.Type() != events.ClusterEventTypeSchemaChangeFunction {\n\t\tt.Errorf(\"Type() = %v, want EventTypeSchemaChangeFunction\", schemaEvent.Type())\n\t}\n}\n\nfunc TestFrameToEvent_SchemaChangeAggregate(t *testing.T) {\n\tframe := &frm.SchemaChangeAggregate{\n\t\tChange:   \"UPDATED\",\n\t\tKeyspace: \"test_keyspace\",\n\t\tName:     \"test_aggregate\",\n\t\tArgs:     []string{\"int\"},\n\t}\n\n\tevent := events.FrameToEvent(frame)\n\tif event == nil {\n\t\tt.Fatal(\"FrameToEvent returned nil\")\n\t}\n\n\tschemaEvent, ok := event.(*events.SchemaChangeAggregateEvent)\n\tif !ok {\n\t\tt.Fatalf(\"Expected *SchemaChangeAggregateEvent, got %T\", event)\n\t}\n\n\tif schemaEvent.Change != \"UPDATED\" {\n\t\tt.Errorf(\"Change = %v, want UPDATED\", schemaEvent.Change)\n\t}\n\tif schemaEvent.Keyspace != \"test_keyspace\" {\n\t\tt.Errorf(\"Keyspace = %v, want test_keyspace\", schemaEvent.Keyspace)\n\t}\n\tif schemaEvent.Aggregate != \"test_aggregate\" {\n\t\tt.Errorf(\"Aggregate = %v, want test_aggregate\", schemaEvent.Aggregate)\n\t}\n\tif len(schemaEvent.Arguments) != 1 {\n\t\tt.Errorf(\"len(Arguments) = %v, want 1\", len(schemaEvent.Arguments))\n\t}\n\tif schemaEvent.Type() != events.ClusterEventTypeSchemaChangeAggregate {\n\t\tt.Errorf(\"Type() = %v, want EventTypeSchemaChangeAggregate\", schemaEvent.Type())\n\t}\n}\n\nfunc TestFrameToEvent_Nil(t *testing.T) {\n\tevent := events.FrameToEvent(nil)\n\tif event != nil {\n\t\tt.Errorf(\"FrameToEvent(nil) = %v, want nil\", event)\n\t}\n}\n\nfunc TestFrameToEvent_NonEventFrame(t *testing.T) {\n\t// Test with a non-event frame type\n\tframe := &frm.ErrorFrame{}\n\tevent := events.FrameToEvent(frame)\n\tif event != nil {\n\t\tt.Errorf(\"FrameToEvent(non-event) = %v, want nil\", event)\n\t}\n}\n\nfunc TestFrameToEvent_ClientRoutesChanged(t *testing.T) {\n\tframe := &frm.ClientRoutesChanged{\n\t\tChangeType:    \"UPDATED\",\n\t\tConnectionIDs: []string{\"c1\", \"\"},\n\t\tHostIDs:       []string{},\n\t}\n\n\tevent := events.FrameToEvent(frame)\n\tif event == nil {\n\t\tt.Fatal(\"FrameToEvent returned nil\")\n\t}\n\n\tclientEvent, ok := event.(*events.ClientRoutesChangedEvent)\n\tif !ok {\n\t\tt.Fatalf(\"Expected *ClientRoutesChangedEvent, got %T\", event)\n\t}\n\n\tif clientEvent.ChangeType != \"UPDATED\" {\n\t\tt.Errorf(\"ChangeType = %v, want UPDATED\", clientEvent.ChangeType)\n\t}\n\tif len(clientEvent.ConnectionIDs) != 2 || clientEvent.ConnectionIDs[1] != \"\" {\n\t\tt.Errorf(\"ConnectionIDs = %v, want [c1 \\\"\\\"]\", clientEvent.ConnectionIDs)\n\t}\n\tif len(clientEvent.HostIDs) != 0 {\n\t\tt.Errorf(\"HostIDs = %v, want empty\", clientEvent.HostIDs)\n\t}\n\tif clientEvent.Type() != events.ClusterEventTypeClientRoutesChanged {\n\t\tt.Errorf(\"Type() = %v, want ClusterEventTypeClientRoutesChanged\", clientEvent.Type())\n\t}\n}\n"
  },
  {
    "path": "events/events.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n// Package events provides public event types for Cassandra/ScyllaDB server events.\n// These events are sent by the server to notify clients of topology changes, status changes,\n// and schema changes.\npackage events\n\nimport (\n\t\"fmt\"\n\t\"net\"\n)\n\n// EventType represents the type of event\ntype EventType int\n\nconst (\n\t// ClusterEventTypeTopologyChange represents a topology change event (NEW_NODE, REMOVED_NODE, MOVED_NODE)\n\tClusterEventTypeTopologyChange EventType = iota\n\t// ClusterEventTypeStatusChange represents a status change event (UP, DOWN)\n\tClusterEventTypeStatusChange\n\t// ClusterEventTypeSchemaChangeKeyspace represents a keyspace schema change\n\tClusterEventTypeSchemaChangeKeyspace\n\t// ClusterEventTypeSchemaChangeTable represents a table schema change\n\tClusterEventTypeSchemaChangeTable\n\t// ClusterEventTypeSchemaChangeType represents a UDT schema change\n\tClusterEventTypeSchemaChangeType\n\t// ClusterEventTypeSchemaChangeFunction represents a function schema change\n\tClusterEventTypeSchemaChangeFunction\n\t// ClusterEventTypeSchemaChangeAggregate represents an aggregate schema change\n\tClusterEventTypeSchemaChangeAggregate\n\t// ClusterEventTypeClientRoutesChanged represents an event of update of `system.client_routes` table\n\tClusterEventTypeClientRoutesChanged\n\t// SessionEventTypeControlConnectionRecreated is fired when the session loses it's control connection to the cluster and has just been re-established it.\n\tSessionEventTypeControlConnectionRecreated\n)\n\nfunc (t EventType) IsClusterEvent() bool {\n\tswitch t {\n\tcase ClusterEventTypeTopologyChange:\n\t\treturn true\n\tcase ClusterEventTypeStatusChange:\n\t\treturn true\n\tcase ClusterEventTypeSchemaChangeKeyspace:\n\t\treturn true\n\tcase ClusterEventTypeSchemaChangeTable:\n\t\treturn true\n\tcase ClusterEventTypeSchemaChangeType:\n\t\treturn true\n\tcase ClusterEventTypeSchemaChangeFunction:\n\t\treturn true\n\tcase ClusterEventTypeSchemaChangeAggregate:\n\t\treturn true\n\tcase ClusterEventTypeClientRoutesChanged:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (t EventType) String() string {\n\tswitch t {\n\tcase ClusterEventTypeTopologyChange:\n\t\treturn \"CLUSTER<TOPOLOGY_CHANGE>\"\n\tcase ClusterEventTypeStatusChange:\n\t\treturn \"CLUSTER<STATUS_CHANGE>\"\n\tcase ClusterEventTypeSchemaChangeKeyspace:\n\t\treturn \"CLUSTER<SCHEMA_CHANGE_KEYSPACE>\"\n\tcase ClusterEventTypeSchemaChangeTable:\n\t\treturn \"CLUSTER<SCHEMA_CHANGE_TABLE>\"\n\tcase ClusterEventTypeSchemaChangeType:\n\t\treturn \"CLUSTER<SCHEMA_CHANGE_TYPE>\"\n\tcase ClusterEventTypeSchemaChangeFunction:\n\t\treturn \"CLUSTER<SCHEMA_CHANGE_FUNCTION>\"\n\tcase ClusterEventTypeSchemaChangeAggregate:\n\t\treturn \"CLUSTER<SCHEMA_CHANGE_AGGREGATE>\"\n\tcase ClusterEventTypeClientRoutesChanged:\n\t\treturn \"CLUSTER<CLIENT_ROUTES_CHANGE>\"\n\tcase SessionEventTypeControlConnectionRecreated:\n\t\treturn \"SESSION<CONTROL_CONNECTION_RECREATED>\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"UNKNOWN(%d)\", t)\n\t}\n}\n\n// Event is the common interface for all event types\ntype Event interface {\n\t// Type returns the type of event\n\tType() EventType\n\t// String returns a string representation of the event\n\tString() string\n}\n\n// TopologyChangeEvent represents a topology change in the cluster\ntype TopologyChangeEvent struct {\n\t// Change is the type of topology change (NEW_NODE, REMOVED_NODE, MOVED_NODE)\n\tChange string\n\t// Host is the IP address of the node\n\tHost net.IP\n\t// Port is the port number\n\tPort int\n}\n\n// Type returns ClusterEventTypeTopologyChange\nfunc (e *TopologyChangeEvent) Type() EventType {\n\treturn ClusterEventTypeTopologyChange\n}\n\n// String returns a string representation of the event\nfunc (e *TopologyChangeEvent) String() string {\n\treturn fmt.Sprintf(\"TopologyChange{change=%s, host=%s, port=%d}\", e.Change, e.Host, e.Port)\n}\n\n// StatusChangeEvent represents a status change of a node\ntype StatusChangeEvent struct {\n\t// Change is the type of status change (UP, DOWN)\n\tChange string\n\t// Host is the IP address of the node\n\tHost net.IP\n\t// Port is the port number\n\tPort int\n}\n\n// Type returns ClusterEventTypeStatusChange\nfunc (e *StatusChangeEvent) Type() EventType {\n\treturn ClusterEventTypeStatusChange\n}\n\n// String returns a string representation of the event\nfunc (e *StatusChangeEvent) String() string {\n\treturn fmt.Sprintf(\"StatusChange{change=%s, host=%s, port=%d}\", e.Change, e.Host, e.Port)\n}\n\n// SchemaChangeKeyspaceEvent represents a keyspace schema change\ntype SchemaChangeKeyspaceEvent struct {\n\t// Change is the type of change (CREATED, UPDATED, DROPPED)\n\tChange string\n\t// Keyspace is the name of the keyspace\n\tKeyspace string\n}\n\n// Type returns ClusterEventTypeSchemaChangeKeyspace\nfunc (e *SchemaChangeKeyspaceEvent) Type() EventType {\n\treturn ClusterEventTypeSchemaChangeKeyspace\n}\n\n// String returns a string representation of the event\nfunc (e *SchemaChangeKeyspaceEvent) String() string {\n\treturn fmt.Sprintf(\"SchemaChangeKeyspace{change=%s, keyspace=%s}\", e.Change, e.Keyspace)\n}\n\n// SchemaChangeTableEvent represents a table schema change\ntype SchemaChangeTableEvent struct {\n\t// Change is the type of change (CREATED, UPDATED, DROPPED)\n\tChange string\n\t// Keyspace is the name of the keyspace\n\tKeyspace string\n\t// Table is the name of the table\n\tTable string\n}\n\n// Type returns ClusterEventTypeSchemaChangeTable\nfunc (e *SchemaChangeTableEvent) Type() EventType {\n\treturn ClusterEventTypeSchemaChangeTable\n}\n\n// String returns a string representation of the event\nfunc (e *SchemaChangeTableEvent) String() string {\n\treturn fmt.Sprintf(\"SchemaChangeTable{change=%s, keyspace=%s, table=%s}\", e.Change, e.Keyspace, e.Table)\n}\n\n// SchemaChangeTypeEvent represents a UDT (User Defined Type) schema change\ntype SchemaChangeTypeEvent struct {\n\t// Change is the type of change (CREATED, UPDATED, DROPPED)\n\tChange string\n\t// Keyspace is the name of the keyspace\n\tKeyspace string\n\t// TypeName is the name of the UDT\n\tTypeName string\n}\n\n// Type returns ClusterEventTypeSchemaChangeType\nfunc (e *SchemaChangeTypeEvent) Type() EventType {\n\treturn ClusterEventTypeSchemaChangeType\n}\n\n// String returns a string representation of the event\nfunc (e *SchemaChangeTypeEvent) String() string {\n\treturn fmt.Sprintf(\"SchemaChangeType{change=%s, keyspace=%s, type=%s}\", e.Change, e.Keyspace, e.TypeName)\n}\n\n// SchemaChangeFunctionEvent represents a function schema change\ntype SchemaChangeFunctionEvent struct {\n\t// Change is the type of change (CREATED, UPDATED, DROPPED)\n\tChange string\n\t// Keyspace is the name of the keyspace\n\tKeyspace string\n\t// Function is the name of the function\n\tFunction string\n\t// Arguments is the list of argument types\n\tArguments []string\n}\n\n// Type returns ClusterEventTypeSchemaChangeFunction\nfunc (e *SchemaChangeFunctionEvent) Type() EventType {\n\treturn ClusterEventTypeSchemaChangeFunction\n}\n\n// String returns a string representation of the event\nfunc (e *SchemaChangeFunctionEvent) String() string {\n\treturn fmt.Sprintf(\"SchemaChangeFunction{change=%s, keyspace=%s, function=%s, args=%v}\",\n\t\te.Change, e.Keyspace, e.Function, e.Arguments)\n}\n\n// SchemaChangeAggregateEvent represents an aggregate schema change\ntype SchemaChangeAggregateEvent struct {\n\t// Change is the type of change (CREATED, UPDATED, DROPPED)\n\tChange string\n\t// Keyspace is the name of the keyspace\n\tKeyspace string\n\t// Aggregate is the name of the aggregate\n\tAggregate string\n\t// Arguments is the list of argument types\n\tArguments []string\n}\n\n// Type returns ClusterEventTypeSchemaChangeAggregate\nfunc (e *SchemaChangeAggregateEvent) Type() EventType {\n\treturn ClusterEventTypeSchemaChangeAggregate\n}\n\n// String returns a string representation of the event\nfunc (e *SchemaChangeAggregateEvent) String() string {\n\treturn fmt.Sprintf(\"SchemaChangeAggregate{change=%s, keyspace=%s, aggregate=%s, args=%v}\",\n\t\te.Change, e.Keyspace, e.Aggregate, e.Arguments)\n}\n\n// ClientRoutesChangedEvent represents an aggregate schema change\ntype ClientRoutesChangedEvent struct {\n\t// Change is the type of change (UPDATED)\n\tChangeType string\n\t// List of connection ids involved into update\n\tConnectionIDs []string\n\t// List of host ids involved into update\n\tHostIDs []string\n}\n\n// Type returns ClusterEventTypeClientRoutesChanged\nfunc (e *ClientRoutesChangedEvent) Type() EventType {\n\treturn ClusterEventTypeClientRoutesChanged\n}\n\n// String returns a string representation of the event\nfunc (e *ClientRoutesChangedEvent) String() string {\n\treturn fmt.Sprintf(\"ConnectionMetadataChanged{changeType=%s, ConnectionIDs=%s, HostIDs=%s}\",\n\t\te.ChangeType, e.ConnectionIDs, e.HostIDs)\n}\n\ntype HostInfo struct {\n\tHostID string\n\tHost   net.IP\n\tPort   int\n}\n\n// String returns a string representation of the event\nfunc (h *HostInfo) String() string {\n\treturn fmt.Sprintf(\"HostInfo{Host=%s, Port=%d, HostID=%s}\", h.Host, h.Port, h.HostID)\n}\n\n// ControlConnectionRecreatedEvent represents a control connection reconnection event.\ntype ControlConnectionRecreatedEvent struct {\n\tOldHost HostInfo\n\tNewHost HostInfo\n}\n\n// Type returns SessionEventTypeControlConnectionRecreated\nfunc (e *ControlConnectionRecreatedEvent) Type() EventType {\n\treturn SessionEventTypeControlConnectionRecreated\n}\n\n// String returns a string representation of the event\nfunc (e *ControlConnectionRecreatedEvent) String() string {\n\treturn fmt.Sprintf(\"ControlConnectionRecreatedEvent{OldHost=%s, NewHost=%s}\", e.OldHost.String(), e.NewHost.String())\n}\n"
  },
  {
    "path": "events/events_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage events\n\nimport (\n\t\"net\"\n\t\"testing\"\n)\n\nfunc TestTopologyChangeEvent(t *testing.T) {\n\tevent := &TopologyChangeEvent{\n\t\tChange: \"NEW_NODE\",\n\t\tHost:   net.ParseIP(\"192.168.1.1\"),\n\t\tPort:   9042,\n\t}\n\n\tif event.Type() != ClusterEventTypeTopologyChange {\n\t\tt.Errorf(\"Type() = %v, want %v\", event.Type(), ClusterEventTypeTopologyChange)\n\t}\n\n\tstr := event.String()\n\tif str == \"\" {\n\t\tt.Error(\"String() returned empty string\")\n\t}\n\tt.Logf(\"TopologyChangeEvent.String() = %s\", str)\n}\n\nfunc TestStatusChangeEvent(t *testing.T) {\n\tevent := &StatusChangeEvent{\n\t\tChange: \"UP\",\n\t\tHost:   net.ParseIP(\"192.168.1.2\"),\n\t\tPort:   9042,\n\t}\n\n\tif event.Type() != ClusterEventTypeStatusChange {\n\t\tt.Errorf(\"Type() = %v, want %v\", event.Type(), ClusterEventTypeStatusChange)\n\t}\n\n\tstr := event.String()\n\tif str == \"\" {\n\t\tt.Error(\"String() returned empty string\")\n\t}\n\tt.Logf(\"StatusChangeEvent.String() = %s\", str)\n}\n\nfunc TestSchemaChangeKeyspaceEvent(t *testing.T) {\n\tevent := &SchemaChangeKeyspaceEvent{\n\t\tChange:   \"CREATED\",\n\t\tKeyspace: \"test_keyspace\",\n\t}\n\n\tif event.Type() != ClusterEventTypeSchemaChangeKeyspace {\n\t\tt.Errorf(\"Type() = %v, want %v\", event.Type(), ClusterEventTypeSchemaChangeKeyspace)\n\t}\n\n\tstr := event.String()\n\tif str == \"\" {\n\t\tt.Error(\"String() returned empty string\")\n\t}\n\tt.Logf(\"SchemaChangeKeyspaceEvent.String() = %s\", str)\n}\n\nfunc TestSchemaChangeTableEvent(t *testing.T) {\n\tevent := &SchemaChangeTableEvent{\n\t\tChange:   \"UPDATED\",\n\t\tKeyspace: \"test_keyspace\",\n\t\tTable:    \"test_table\",\n\t}\n\n\tif event.Type() != ClusterEventTypeSchemaChangeTable {\n\t\tt.Errorf(\"Type() = %v, want %v\", event.Type(), ClusterEventTypeSchemaChangeTable)\n\t}\n\n\tstr := event.String()\n\tif str == \"\" {\n\t\tt.Error(\"String() returned empty string\")\n\t}\n\tt.Logf(\"SchemaChangeTableEvent.String() = %s\", str)\n}\n\nfunc TestSchemaChangeTypeEvent(t *testing.T) {\n\tevent := &SchemaChangeTypeEvent{\n\t\tChange:   \"DROPPED\",\n\t\tKeyspace: \"test_keyspace\",\n\t\tTypeName: \"test_type\",\n\t}\n\n\tif event.Type() != ClusterEventTypeSchemaChangeType {\n\t\tt.Errorf(\"Type() = %v, want %v\", event.Type(), ClusterEventTypeSchemaChangeType)\n\t}\n\n\tstr := event.String()\n\tif str == \"\" {\n\t\tt.Error(\"String() returned empty string\")\n\t}\n\tt.Logf(\"SchemaChangeTypeEvent.String() = %s\", str)\n}\n\nfunc TestSchemaChangeFunctionEvent(t *testing.T) {\n\tevent := &SchemaChangeFunctionEvent{\n\t\tChange:    \"CREATED\",\n\t\tKeyspace:  \"test_keyspace\",\n\t\tFunction:  \"test_function\",\n\t\tArguments: []string{\"int\", \"text\"},\n\t}\n\n\tif event.Type() != ClusterEventTypeSchemaChangeFunction {\n\t\tt.Errorf(\"Type() = %v, want %v\", event.Type(), ClusterEventTypeSchemaChangeFunction)\n\t}\n\n\tstr := event.String()\n\tif str == \"\" {\n\t\tt.Error(\"String() returned empty string\")\n\t}\n\tt.Logf(\"SchemaChangeFunctionEvent.String() = %s\", str)\n}\n\nfunc TestSchemaChangeAggregateEvent(t *testing.T) {\n\tevent := &SchemaChangeAggregateEvent{\n\t\tChange:    \"UPDATED\",\n\t\tKeyspace:  \"test_keyspace\",\n\t\tAggregate: \"test_aggregate\",\n\t\tArguments: []string{\"int\"},\n\t}\n\n\tif event.Type() != ClusterEventTypeSchemaChangeAggregate {\n\t\tt.Errorf(\"Type() = %v, want %v\", event.Type(), ClusterEventTypeSchemaChangeAggregate)\n\t}\n\n\tstr := event.String()\n\tif str == \"\" {\n\t\tt.Error(\"String() returned empty string\")\n\t}\n\tt.Logf(\"SchemaChangeAggregateEvent.String() = %s\", str)\n}\n\nfunc TestClientRoutesChangedEvent(t *testing.T) {\n\tevent := &ClientRoutesChangedEvent{\n\t\tChangeType:    \"UPDATED\",\n\t\tConnectionIDs: []string{\"c1\"},\n\t\tHostIDs:       []string{},\n\t}\n\n\tif event.Type() != ClusterEventTypeClientRoutesChanged {\n\t\tt.Errorf(\"Type() = %v, want %v\", event.Type(), ClusterEventTypeClientRoutesChanged)\n\t}\n\n\tstr := event.String()\n\tif str == \"\" {\n\t\tt.Error(\"String() returned empty string\")\n\t}\n\tt.Logf(\"ClientRoutesChangedEvent.String() = %s\", str)\n}\n\nfunc TestEventInterface(t *testing.T) {\n\tevents := []Event{\n\t\t&TopologyChangeEvent{Change: \"NEW_NODE\", Host: net.ParseIP(\"127.0.0.1\"), Port: 9042},\n\t\t&StatusChangeEvent{Change: \"UP\", Host: net.ParseIP(\"127.0.0.2\"), Port: 9042},\n\t\t&SchemaChangeKeyspaceEvent{Change: \"CREATED\", Keyspace: \"ks\"},\n\t\t&SchemaChangeTableEvent{Change: \"UPDATED\", Keyspace: \"ks\", Table: \"tbl\"},\n\t\t&SchemaChangeTypeEvent{Change: \"DROPPED\", Keyspace: \"ks\", TypeName: \"typ\"},\n\t\t&SchemaChangeFunctionEvent{Change: \"CREATED\", Keyspace: \"ks\", Function: \"fn\", Arguments: []string{}},\n\t\t&SchemaChangeAggregateEvent{Change: \"UPDATED\", Keyspace: \"ks\", Aggregate: \"agg\", Arguments: []string{}},\n\t\t&ClientRoutesChangedEvent{ChangeType: \"UPDATED\", ConnectionIDs: []string{\"c1\"}, HostIDs: []string{}},\n\t}\n\n\tfor _, event := range events {\n\t\tif event.Type() < ClusterEventTypeTopologyChange || event.Type() > ClusterEventTypeClientRoutesChanged {\n\t\t\tt.Errorf(\"Invalid event type: %v\", event.Type())\n\t\t}\n\t\tif event.String() == \"\" {\n\t\t\tt.Errorf(\"Event.String() returned empty string for %T\", event)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "events.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/events\"\n\t\"github.com/gocql/gocql/internal/debug\"\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\ntype eventDebouncer struct {\n\tlogger   StdLogger\n\ttimer    *time.Timer\n\tcallback func([]frame)\n\tquit     chan struct{}\n\tname     string\n\tevents   []frame\n\tmu       sync.Mutex\n}\n\nfunc newEventDebouncer(name string, eventHandler func([]frame), logger StdLogger) *eventDebouncer {\n\te := &eventDebouncer{\n\t\tname:     name,\n\t\tquit:     make(chan struct{}),\n\t\ttimer:    time.NewTimer(eventDebounceTime),\n\t\tcallback: eventHandler,\n\t\tlogger:   logger,\n\t}\n\te.timer.Stop()\n\tgo e.flusher()\n\n\treturn e\n}\n\nfunc (e *eventDebouncer) stop() {\n\te.quit <- struct{}{} // sync with flusher\n\tclose(e.quit)\n}\n\nfunc (e *eventDebouncer) flusher() {\n\tfor {\n\t\tselect {\n\t\tcase <-e.timer.C:\n\t\t\te.mu.Lock()\n\t\t\te.flush()\n\t\t\te.mu.Unlock()\n\t\tcase <-e.quit:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nconst (\n\teventBufferSize   = 1000\n\teventDebounceTime = 1 * time.Second\n)\n\n// flush must be called with mu locked\nfunc (e *eventDebouncer) flush() {\n\tif len(e.events) == 0 {\n\t\treturn\n\t}\n\n\t// if the flush interval is faster than the callback then we will end up calling\n\t// the callback multiple times, probably a bad idea. In this case we could drop\n\t// frames?\n\tgo e.callback(e.events)\n\te.events = make([]frame, 0, eventBufferSize)\n}\n\nfunc (e *eventDebouncer) debounce(frame frame) {\n\te.mu.Lock()\n\te.timer.Reset(eventDebounceTime)\n\n\t// TODO: probably need a warning to track if this threshold is too low\n\tif len(e.events) < eventBufferSize {\n\t\te.events = append(e.events, frame)\n\t} else {\n\t\te.logger.Printf(\"%s: buffer full, dropping event frame: %s\", e.name, frame)\n\t}\n\n\te.mu.Unlock()\n}\n\nfunc (s *Session) publishEvent(event events.Event) {\n\tif s.eventBus == nil {\n\t\treturn\n\t}\n\n\tif !s.eventBus.PublishEvent(event) {\n\t\ts.logger.Printf(\"can't publish event, eventbus is full, increase Cluster.EventBusConfig.InputEventsQueueSize; event is dropped\")\n\t}\n}\n\nfunc (s *Session) handleEvent(frame frame) {\n\tif debug.Enabled {\n\t\ts.logger.Printf(\"gocql: handling frame: %v\\n\", frame)\n\t}\n\n\tif event := events.FrameToEvent(frame); event != nil {\n\t\ts.publishEvent(event)\n\t}\n\n\tswitch f := frame.(type) {\n\tcase *frm.SchemaChangeKeyspace, *frm.SchemaChangeFunction,\n\t\t*frm.SchemaChangeTable, *frm.SchemaChangeAggregate, *frm.SchemaChangeType:\n\n\t\ts.schemaEvents.debounce(frame)\n\tcase *frm.TopologyChangeEventFrame, *frm.StatusChangeEventFrame:\n\t\ts.nodeEvents.debounce(frame)\n\tcase *frm.ClientRoutesChanged:\n\t\tbreak\n\tdefault:\n\t\ts.logger.Printf(\"gocql: invalid event frame (%T): %v\\n\", f, f)\n\t}\n}\n\nfunc (s *Session) handleSchemaEvent(frames []frame) {\n\t// TODO: debounce events\n\tfor _, frame := range frames {\n\t\tswitch f := frame.(type) {\n\t\tcase *frm.SchemaChangeKeyspace:\n\t\t\ts.metadataDescriber.invalidateKeyspaceSchema(f.Keyspace)\n\t\t\ts.handleKeyspaceChange(f.Keyspace, f.Change)\n\t\tcase *frm.SchemaChangeTable:\n\t\t\ts.metadataDescriber.invalidateTableSchema(f.Keyspace, f.Object)\n\t\t\ts.handleTableChange(f.Keyspace, f.Object, f.Change)\n\t\tcase *frm.SchemaChangeAggregate:\n\t\t\ts.metadataDescriber.invalidateKeyspaceSchema(f.Keyspace)\n\t\tcase *frm.SchemaChangeFunction:\n\t\t\ts.metadataDescriber.invalidateKeyspaceSchema(f.Keyspace)\n\t\tcase *frm.SchemaChangeType:\n\t\t\ts.metadataDescriber.invalidateKeyspaceSchema(f.Keyspace)\n\t\t}\n\t}\n}\n\nfunc (s *Session) handleKeyspaceChange(keyspace, change string) {\n\ts.control.awaitSchemaAgreement()\n\tif change == \"DROPPED\" || change == \"UPDATED\" {\n\t\ts.metadataDescriber.RemoveTabletsWithKeyspace(keyspace)\n\t}\n\ts.policy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: keyspace, Change: change})\n}\n\nfunc (s *Session) handleTableChange(keyspace, table, change string) {\n\tif change == \"DROPPED\" || change == \"UPDATED\" {\n\t\ts.metadataDescriber.RemoveTabletsWithTable(keyspace, table)\n\t}\n}\n\n// handleNodeEvent handles inbound status and topology change events.\n//\n// Status events are debounced by host IP; only the latest event is processed.\n//\n// Topology events are debounced by performing a single full topology refresh\n// whenever any topology event comes in.\n//\n// Processing topology change events before status change events ensures\n// that a NEW_NODE event is not dropped in favor of a newer UP event (which\n// would itself be dropped/ignored, as the node is not yet known).\nfunc (s *Session) handleNodeEvent(frames []frame) {\n\ttype nodeEvent struct {\n\t\tchange string\n\t\thost   net.IP\n\t\tport   int\n\t}\n\n\ttopologyEventReceived := false\n\t// status change events\n\tsEvents := make(map[string]*nodeEvent)\n\n\tfor _, frame := range frames {\n\t\tswitch f := frame.(type) {\n\t\tcase *frm.TopologyChangeEventFrame:\n\t\t\ttopologyEventReceived = true\n\t\tcase *frm.StatusChangeEventFrame:\n\t\t\tevent, ok := sEvents[f.Host.String()]\n\t\t\tif !ok {\n\t\t\t\tevent = &nodeEvent{change: f.Change, host: f.Host, port: f.Port}\n\t\t\t\tsEvents[f.Host.String()] = event\n\t\t\t}\n\t\t\tevent.change = f.Change\n\t\t}\n\t}\n\n\tif topologyEventReceived && !s.cfg.Events.DisableTopologyEvents {\n\t\ts.debounceRingRefresh()\n\t}\n\n\tfor _, f := range sEvents {\n\t\tif debug.Enabled {\n\t\t\ts.logger.Printf(\"gocql: dispatching status change event: %+v\\n\", f)\n\t\t}\n\n\t\t// ignore events we received if they were disabled\n\t\t// see https://github.com/apache/cassandra-gocql-driver/issues/1591\n\t\tswitch f.change {\n\t\tcase \"UP\":\n\t\t\tif !s.cfg.Events.DisableNodeStatusEvents {\n\t\t\t\ts.handleNodeUp(f.host, f.port)\n\t\t\t}\n\t\tcase \"DOWN\":\n\t\t\tif !s.cfg.Events.DisableNodeStatusEvents {\n\t\t\t\ts.handleNodeDown(f.host, f.port)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *Session) handleNodeUp(eventIp net.IP, eventPort int) {\n\tif debug.Enabled {\n\t\ts.logger.Printf(\"gocql: Session.handleNodeUp: %s:%d\\n\", eventIp.String(), eventPort)\n\t}\n\n\thost, ok := s.hostSource.getHostByIP(eventIp.String())\n\tif !ok {\n\t\ts.debounceRingRefresh()\n\t\treturn\n\t}\n\n\tif s.cfg.filterHost(host) {\n\t\treturn\n\t}\n\n\tif d := host.Version().nodeUpDelay(); d > 0 {\n\t\ttime.Sleep(d)\n\t}\n\ts.startPoolFill(host)\n}\n\nfunc (s *Session) startPoolFill(host *HostInfo) {\n\t// we let the pool call handleNodeConnected to change the host state\n\ts.pool.addHost(host)\n\ts.policy.AddHost(host)\n}\n\nfunc (s *Session) handleNodeConnected(host *HostInfo) {\n\tif debug.Enabled {\n\t\ts.logger.Printf(\"gocql: Session.handleNodeConnected: %s:%d\\n\", host.ConnectAddress(), host.Port())\n\t}\n\n\thost.setState(NodeUp)\n\n\tif !s.cfg.filterHost(host) {\n\t\ts.policy.HostUp(host)\n\t}\n}\n\nfunc (s *Session) handleNodeDown(ip net.IP, port int) {\n\tif debug.Enabled {\n\t\ts.logger.Printf(\"gocql: Session.handleNodeDown: %s:%d\\n\", ip.String(), port)\n\t}\n\n\thost, ok := s.hostSource.getHostByIP(ip.String())\n\tif ok {\n\t\thost.setState(NodeDown)\n\t\tif s.cfg.filterHost(host) {\n\t\t\treturn\n\t\t}\n\n\t\ts.policy.HostDown(host)\n\t\thostID := host.HostID()\n\t\ts.pool.removeHost(hostID)\n\t}\n}\n"
  },
  {
    "path": "events_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"net\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\nfunc TestEventDebounce(t *testing.T) {\n\tt.Parallel()\n\n\tconst eventCount = 150\n\tvar eventsSeen atomic.Int64\n\tdone := make(chan struct{}, 1)\n\n\tdebouncer := newEventDebouncer(\"testDebouncer\", func(events []frame) {\n\t\tif eventsSeen.Add(int64(len(events))) >= eventCount {\n\t\t\tselect {\n\t\t\tcase done <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}, &defaultLogger{})\n\tdefer debouncer.stop()\n\n\tfor i := 0; i < eventCount; i++ {\n\t\tdebouncer.debounce(&frm.StatusChangeEventFrame{\n\t\t\tChange: \"UP\",\n\t\t\tHost:   net.IPv4(127, 0, 0, 1),\n\t\t\tPort:   9042,\n\t\t})\n\t}\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(10 * time.Second):\n\t\tt.Fatalf(\"timed out waiting for events: saw %d of %d\", eventsSeen.Load(), eventCount)\n\t}\n\tif n := eventsSeen.Load(); n != eventCount {\n\t\tt.Fatalf(\"expected to see %d events but got %d\", eventCount, n)\n\t}\n}\n\n// TestEventDebounceMultipleFlushes verifies that the debouncer correctly\n// accumulates events across multiple flush cycles without panicking.\n// This is a regression test for a race where the callback could fire\n// more than once (due to timer re-fires), causing a negative WaitGroup\n// counter panic in the original test.\nfunc TestEventDebounceMultipleFlushes(t *testing.T) {\n\tt.Parallel()\n\n\tconst eventCount = 50\n\tvar eventsSeen atomic.Int64\n\tvar flushCount atomic.Int64\n\tfirstFlushDone := make(chan struct{}, 1)\n\tdone := make(chan struct{}, 1)\n\n\tdebouncer := newEventDebouncer(\"testDebouncerMulti\", func(events []frame) {\n\t\tif flushCount.Add(1) == 1 {\n\t\t\tselect {\n\t\t\tcase firstFlushDone <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\tif eventsSeen.Add(int64(len(events))) >= eventCount {\n\t\t\tselect {\n\t\t\tcase done <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}, &defaultLogger{})\n\tdefer debouncer.stop()\n\n\t// Send events in two batches separated by more than eventDebounceTime\n\t// to force at least two separate flush cycles.\n\tfor i := 0; i < eventCount/2; i++ {\n\t\tdebouncer.debounce(&frm.StatusChangeEventFrame{\n\t\t\tChange: \"UP\",\n\t\t\tHost:   net.IPv4(127, 0, 0, 1),\n\t\t\tPort:   9042,\n\t\t})\n\t}\n\n\tselect {\n\tcase <-firstFlushDone:\n\tcase <-time.After(10 * time.Second):\n\t\tt.Fatalf(\"timed out waiting for first flush: saw %d events across %d flushes\", eventsSeen.Load(), flushCount.Load())\n\t}\n\n\tfor i := 0; i < eventCount/2; i++ {\n\t\tdebouncer.debounce(&frm.StatusChangeEventFrame{\n\t\t\tChange: \"UP\",\n\t\t\tHost:   net.IPv4(127, 0, 0, 1),\n\t\t\tPort:   9042,\n\t\t})\n\t}\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(10 * time.Second):\n\t\tt.Fatalf(\"timed out waiting for events: saw %d of %d\", eventsSeen.Load(), eventCount)\n\t}\n\tif n := eventsSeen.Load(); n != eventCount {\n\t\tt.Fatalf(\"expected to see %d events but got %d\", eventCount, n)\n\t}\n\tif f := flushCount.Load(); f < 2 {\n\t\tt.Fatalf(\"expected at least 2 flush cycles but got %d\", f)\n\t}\n}\n"
  },
  {
    "path": "events_unit_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/internal/tests/mock\"\n\t\"github.com/gocql/gocql/tablets\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\nvar (\n\ttypeVarchar = NativeType{proto: 4, typ: TypeVarchar}\n\ttypeBoolean = NativeType{proto: 4, typ: TypeBoolean}\n\ttypeDouble  = NativeType{proto: 4, typ: TypeDouble}\n\ttypeInt     = NativeType{proto: 4, typ: TypeInt}\n\ttypeMapSS   = CollectionType{\n\t\tNativeType: NativeType{proto: 4, typ: TypeMap},\n\t\tKey:        NativeType{proto: 4, typ: TypeVarchar},\n\t\tElem:       NativeType{proto: 4, typ: TypeVarchar},\n\t}\n\ttypeMapSB = CollectionType{\n\t\tNativeType: NativeType{proto: 4, typ: TypeMap},\n\t\tKey:        NativeType{proto: 4, typ: TypeVarchar},\n\t\tElem:       NativeType{proto: 4, typ: TypeBlob},\n\t}\n\ttypeSetS = CollectionType{\n\t\tNativeType: NativeType{proto: 4, typ: TypeSet},\n\t\tElem:       NativeType{proto: 4, typ: TypeVarchar},\n\t}\n)\n\nvar keyspaceMeta = resultMetadata{\n\tcolumns: []ColumnInfo{\n\t\t{Name: \"durable_writes\", TypeInfo: typeBoolean},\n\t\t{Name: \"replication\", TypeInfo: typeMapSS},\n\t},\n\tactualColCount: 2,\n\tcolCount:       2,\n}\n\nvar tableMeta = resultMetadata{\n\tcolumns: []ColumnInfo{\n\t\t{Name: \"table_name\", TypeInfo: typeVarchar},\n\t\t{Name: \"bloom_filter_fp_chance\", TypeInfo: typeDouble},\n\t\t{Name: \"caching\", TypeInfo: typeMapSS},\n\t\t{Name: \"comment\", TypeInfo: typeVarchar},\n\t\t{Name: \"compaction\", TypeInfo: typeMapSS},\n\t\t{Name: \"compression\", TypeInfo: typeMapSS},\n\t\t{Name: \"crc_check_chance\", TypeInfo: typeDouble},\n\t\t{Name: \"default_time_to_live\", TypeInfo: typeInt},\n\t\t{Name: \"gc_grace_seconds\", TypeInfo: typeInt},\n\t\t{Name: \"max_index_interval\", TypeInfo: typeInt},\n\t\t{Name: \"memtable_flush_period_in_ms\", TypeInfo: typeInt},\n\t\t{Name: \"min_index_interval\", TypeInfo: typeInt},\n\t\t{Name: \"speculative_retry\", TypeInfo: typeVarchar},\n\t\t{Name: \"flags\", TypeInfo: typeSetS},\n\t\t{Name: \"extensions\", TypeInfo: typeMapSB},\n\t},\n\tactualColCount: 15,\n\tcolCount:       15,\n}\n\nvar columnMeta = resultMetadata{\n\tcolumns: []ColumnInfo{\n\t\t{Name: \"table_name\", TypeInfo: typeVarchar},\n\t\t{Name: \"column_name\", TypeInfo: typeVarchar},\n\t\t{Name: \"clustering_order\", TypeInfo: typeVarchar},\n\t\t{Name: \"type\", TypeInfo: typeVarchar},\n\t\t{Name: \"kind\", TypeInfo: typeVarchar},\n\t\t{Name: \"position\", TypeInfo: typeInt},\n\t},\n\tactualColCount: 6,\n\tcolCount:       6,\n}\n\nfunc mustMarshal(info TypeInfo, value any) []byte {\n\tb, err := Marshal(info, value)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"mustMarshal(%v, %v): %v\", info, value, err))\n\t}\n\treturn b\n}\n\nfunc marshalRow(meta resultMetadata, values []any) [][]byte {\n\tif len(meta.columns) != len(values) {\n\t\tpanic(fmt.Sprintf(\"marshalRow: column count %d != value count %d\", len(meta.columns), len(values)))\n\t}\n\trow := make([][]byte, len(values))\n\tfor i, col := range meta.columns {\n\t\trow[i] = mustMarshal(col.TypeInfo, values[i])\n\t}\n\treturn row\n}\n\nfunc makeKeyspaceRow(durableWrites bool) [][]byte {\n\treplication := map[string]string{\n\t\t\"class\":              \"org.apache.cassandra.locator.SimpleStrategy\",\n\t\t\"replication_factor\": \"1\",\n\t}\n\treturn marshalRow(keyspaceMeta, []any{durableWrites, replication})\n}\n\nfunc makeTableRow(tableName string) [][]byte {\n\treturn marshalRow(tableMeta, []any{\n\t\ttableName,              // table_name\n\t\tfloat64(0.01),          // bloom_filter_fp_chance\n\t\tmap[string]string(nil), // caching\n\t\t\"\",                     // comment\n\t\tmap[string]string(nil), // compaction\n\t\tmap[string]string(nil), // compression\n\t\tfloat64(0),             // crc_check_chance\n\t\t0,                      // default_time_to_live\n\t\t0,                      // gc_grace_seconds\n\t\t0,                      // max_index_interval\n\t\t0,                      // memtable_flush_period_in_ms\n\t\t0,                      // min_index_interval\n\t\t\"\",                     // speculative_retry\n\t\t[]string(nil),          // flags\n\t\tmap[string][]byte(nil), // extensions\n\t})\n}\n\nfunc makeColumnRow(tableName, colName, kind string, position int) [][]byte {\n\treturn marshalRow(columnMeta, []any{\n\t\ttableName, // table_name\n\t\tcolName,   // column_name\n\t\t\"none\",    // clustering_order\n\t\t\"int\",     // type\n\t\tkind,      // kind\n\t\tposition,  // position\n\t})\n}\n\nfunc makeIter(meta resultMetadata, rows ...[][]byte) *Iter {\n\tif len(rows) == 0 {\n\t\treturn &Iter{}\n\t}\n\tvar allData [][]byte\n\tfor _, row := range rows {\n\t\tallData = append(allData, row...)\n\t}\n\treturn &Iter{\n\t\tmeta:    meta,\n\t\tframer:  &mock.MockFramer{Data: allData},\n\t\tnumRows: len(rows),\n\t}\n}\n\ntype tableInfo struct {\n\tname    string\n\tcolumns []columnInfo\n}\n\ntype columnInfo struct {\n\tname     string\n\tkind     string // \"partition_key\", \"clustering\", \"regular\"\n\tposition int\n}\n\ntype schemaDataMock struct {\n\tfakeControlConn\n\n\tmu                        sync.Mutex\n\tawaitSchemaAgreementCalls int\n\tqueries                   []queryRecord\n\n\tknownKeyspaces map[string][]tableInfo\n\tqueryDelay     time.Duration\n\tqueryError     error // if set, querySystem returns an Iter with this error\n}\n\nfunc (m *schemaDataMock) awaitSchemaAgreement() error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.awaitSchemaAgreementCalls++\n\treturn nil\n}\n\nfunc (m *schemaDataMock) query(statement string, values ...any) *Iter {\n\tm.mu.Lock()\n\tm.queries = append(m.queries, queryRecord{method: \"query\", stmt: statement})\n\tdelay := m.queryDelay\n\tm.mu.Unlock()\n\n\tif delay > 0 {\n\t\ttime.Sleep(delay)\n\t}\n\n\treturn &Iter{}\n}\n\nfunc (m *schemaDataMock) querySystem(statement string, values ...any) *Iter {\n\tm.mu.Lock()\n\tm.queries = append(m.queries, queryRecord{method: \"querySystem\", stmt: statement})\n\tdelay := m.queryDelay\n\tqueryErr := m.queryError\n\tm.mu.Unlock()\n\n\tif delay > 0 {\n\t\ttime.Sleep(delay)\n\t}\n\n\tif queryErr != nil {\n\t\treturn &Iter{err: queryErr}\n\t}\n\n\tif strings.HasPrefix(statement, \"SELECT durable_writes, replication FROM system_schema.keyspaces\") {\n\t\tksName, _ := values[0].(string)\n\t\tif _, ok := m.knownKeyspaces[ksName]; ok {\n\t\t\treturn makeIter(keyspaceMeta, makeKeyspaceRow(true))\n\t\t}\n\t\treturn &Iter{}\n\t}\n\n\tif strings.HasPrefix(statement, \"SELECT * FROM system_schema.tables WHERE keyspace_name = ?\") &&\n\t\t!strings.Contains(statement, \"AND table_name\") {\n\t\tksName, _ := values[0].(string)\n\t\ttables, ok := m.knownKeyspaces[ksName]\n\t\tif !ok || len(tables) == 0 {\n\t\t\treturn &Iter{}\n\t\t}\n\t\tvar rows [][]byte\n\t\tfor _, t := range tables {\n\t\t\trows = append(rows, makeTableRow(t.name)...)\n\t\t}\n\t\treturn &Iter{\n\t\t\tmeta:    tableMeta,\n\t\t\tframer:  &mock.MockFramer{Data: rows},\n\t\t\tnumRows: len(tables),\n\t\t}\n\t}\n\n\tif strings.HasPrefix(statement, \"SELECT * FROM system_schema.tables WHERE keyspace_name = ? AND table_name = ?\") {\n\t\tksName, _ := values[0].(string)\n\t\ttblName, _ := values[1].(string)\n\t\ttables, ok := m.knownKeyspaces[ksName]\n\t\tif ok {\n\t\t\tfor _, t := range tables {\n\t\t\t\tif t.name == tblName {\n\t\t\t\t\treturn makeIter(tableMeta, makeTableRow(t.name))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn &Iter{}\n\t}\n\n\tif strings.HasPrefix(statement, \"SELECT \"+columnMetadataColumns+\" FROM system_schema.columns WHERE keyspace_name = ?\") &&\n\t\t!strings.Contains(statement, \"AND table_name\") {\n\t\tksName, _ := values[0].(string)\n\t\ttables, ok := m.knownKeyspaces[ksName]\n\t\tif !ok {\n\t\t\treturn &Iter{}\n\t\t}\n\t\tvar rows [][]byte\n\t\tcount := 0\n\t\tfor _, t := range tables {\n\t\t\tfor _, c := range t.columns {\n\t\t\t\trows = append(rows, makeColumnRow(t.name, c.name, c.kind, c.position)...)\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t\tif count == 0 {\n\t\t\treturn &Iter{}\n\t\t}\n\t\treturn &Iter{\n\t\t\tmeta:    columnMeta,\n\t\t\tframer:  &mock.MockFramer{Data: rows},\n\t\t\tnumRows: count,\n\t\t}\n\t}\n\n\tif strings.HasPrefix(statement, \"SELECT \"+columnMetadataColumns+\" FROM system_schema.columns WHERE keyspace_name = ? AND table_name = ?\") {\n\t\tksName, _ := values[0].(string)\n\t\ttblName, _ := values[1].(string)\n\t\ttables, ok := m.knownKeyspaces[ksName]\n\t\tif !ok {\n\t\t\treturn &Iter{}\n\t\t}\n\t\tvar rows [][]byte\n\t\tcount := 0\n\t\tfor _, t := range tables {\n\t\t\tif t.name != tblName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, c := range t.columns {\n\t\t\t\trows = append(rows, makeColumnRow(t.name, c.name, c.kind, c.position)...)\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t\tif count == 0 {\n\t\t\treturn &Iter{}\n\t\t}\n\t\treturn &Iter{\n\t\t\tmeta:    columnMeta,\n\t\t\tframer:  &mock.MockFramer{Data: rows},\n\t\t\tnumRows: count,\n\t\t}\n\t}\n\n\treturn &Iter{}\n}\n\nfunc (m *schemaDataMock) setQueryError(err error) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.queryError = err\n}\n\nfunc (m *schemaDataMock) resetQueries() {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.queries = nil\n}\n\nfunc (m *schemaDataMock) getStatements() []string {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tstmts := make([]string, len(m.queries))\n\tfor i, q := range m.queries {\n\t\tstmts[i] = q.stmt\n\t}\n\treturn stmts\n}\n\nfunc (m *schemaDataMock) getQueryCount() int {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\treturn len(m.queries)\n}\n\nfunc (m *schemaDataMock) getAwaitSchemaAgreementCalls() int {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\treturn m.awaitSchemaAgreementCalls\n}\n\nfunc newSchemaEventTestSession(control controlConnection, policy HostSelectionPolicy, keyspace string) *Session {\n\ts := &Session{\n\t\tcontrol: control,\n\t\tpolicy:  policy,\n\t\tlogger:  log.Default(),\n\t\tcfg:     ClusterConfig{Keyspace: keyspace},\n\t}\n\ts.hostSource = &ringDescriber{cfg: &s.cfg, logger: s.logger}\n\ts.metadataDescriber = &metadataDescriber{\n\t\tsession: s,\n\t\tmetadata: &Metadata{\n\t\t\ttabletsMetadata: tablets.NewCowTabletList(),\n\t\t},\n\t}\n\truntime.SetFinalizer(s, func(s *Session) {\n\t\ts.Close()\n\t})\n\treturn s\n}\n\nfunc newSchemaEventTestSessionWithMock(mockCtrl *schemaDataMock) *Session {\n\ts := newSchemaEventTestSession(mockCtrl, &trackingPolicy{}, \"\")\n\ts.useSystemSchema = true\n\ts.hasAggregatesAndFunctions = false\n\treturn s\n}\n\nfunc populateKeyspace(s *Session, ksName string, tableNames ...string) {\n\tks := &KeyspaceMetadata{\n\t\tName:          ksName,\n\t\tDurableWrites: true,\n\t\tTables:        make(map[string]*TableMetadata),\n\t}\n\tfor _, tbl := range tableNames {\n\t\tks.Tables[tbl] = &TableMetadata{\n\t\t\tKeyspace: ksName,\n\t\t\tName:     tbl,\n\t\t\tColumns: map[string]*ColumnMetadata{\n\t\t\t\t\"id\": {Keyspace: ksName, Table: tbl, Name: \"id\", Kind: ColumnPartitionKey},\n\t\t\t},\n\t\t}\n\t}\n\ts.metadataDescriber.metadata.keyspaceMetadata.set(ksName, ks)\n}\n\ntype trackingPolicy struct {\n\troundRobinHostPolicy\n\tmu                   sync.Mutex\n\tkeyspaceChangedCalls []KeyspaceUpdateEvent\n}\n\nfunc (t *trackingPolicy) KeyspaceChanged(event KeyspaceUpdateEvent) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.keyspaceChangedCalls = append(t.keyspaceChangedCalls, event)\n}\n\nfunc (t *trackingPolicy) getKeyspaceChangedCalls() []KeyspaceUpdateEvent {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tdst := make([]KeyspaceUpdateEvent, len(t.keyspaceChangedCalls))\n\tcopy(dst, t.keyspaceChangedCalls)\n\treturn dst\n}\n\ntype queryRecord struct {\n\tmethod string\n\tstmt   string\n}\n\nfunc addTestTablets(t *testing.T, session *Session, ksName, tblName string) {\n\tt.Helper()\n\tt1, err := tablets.TabletInfoBuilder{\n\t\tKeyspaceName: ksName,\n\t\tTableName:    tblName,\n\t\tFirstToken:   0,\n\t\tLastToken:    100,\n\t}.Build()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt2, err := tablets.TabletInfoBuilder{\n\t\tKeyspaceName: ksName,\n\t\tTableName:    tblName,\n\t\tFirstToken:   101,\n\t\tLastToken:    200,\n\t}.Build()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession.metadataDescriber.AddTablet(t1)\n\tsession.metadataDescriber.AddTablet(t2)\n\tsession.metadataDescriber.metadata.tabletsMetadata.Flush()\n}\n\nfunc TestHandleSchemaEvent(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"cache_state\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\ttests := []struct {\n\t\t\tname           string\n\t\t\tkeyspaces      map[string][]string // ks → tables to pre-populate\n\t\t\ttablets        [][2]string         // (ks, table) pairs to add tablets for\n\t\t\tevent          frame\n\t\t\twantKsGone     []string    // keyspaces removed from cache\n\t\t\twantKsPresent  []string    // keyspaces still in cache\n\t\t\twantTblGone    [][2]string // (ks, table) removed from Tables map\n\t\t\twantTblPresent [][2]string // (ks, table) still in Tables map\n\t\t\twantTblInvalid [][2]string // (ks, table) in tablesInvalidated\n\t\t\twantTablets    int         // expected tablet count; -1 to skip check\n\t\t}{\n\t\t\t{\n\t\t\t\tname:        \"keyspace/CREATED clears cache\",\n\t\t\t\tkeyspaces:   map[string][]string{\"test_ks\": {\"tbl_a\", \"tbl_b\"}},\n\t\t\t\tevent:       &frm.SchemaChangeKeyspace{Change: \"CREATED\", Keyspace: \"test_ks\"},\n\t\t\t\twantKsGone:  []string{\"test_ks\"},\n\t\t\t\twantTablets: -1,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"keyspace/UPDATED clears cache and removes tablets\",\n\t\t\t\tkeyspaces:   map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\ttablets:     [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\tevent:       &frm.SchemaChangeKeyspace{Change: \"UPDATED\", Keyspace: \"test_ks\"},\n\t\t\t\twantKsGone:  []string{\"test_ks\"},\n\t\t\t\twantTablets: 0,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"keyspace/DROPPED clears cache and removes tablets\",\n\t\t\t\tkeyspaces:   map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\ttablets:     [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\tevent:       &frm.SchemaChangeKeyspace{Change: \"DROPPED\", Keyspace: \"test_ks\"},\n\t\t\t\twantKsGone:  []string{\"test_ks\"},\n\t\t\t\twantTablets: 0,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"keyspace/CREATED does not remove tablets\",\n\t\t\t\tkeyspaces:   map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\ttablets:     [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\tevent:       &frm.SchemaChangeKeyspace{Change: \"CREATED\", Keyspace: \"test_ks\"},\n\t\t\t\twantKsGone:  []string{\"test_ks\"},\n\t\t\t\twantTablets: 2,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:           \"table/CREATED invalidates table only\",\n\t\t\t\tkeyspaces:      map[string][]string{\"test_ks\": {\"tbl_a\", \"tbl_b\"}},\n\t\t\t\tevent:          &frm.SchemaChangeTable{Change: \"CREATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t\twantKsPresent:  []string{\"test_ks\"},\n\t\t\t\twantTblGone:    [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\twantTblPresent: [][2]string{{\"test_ks\", \"tbl_b\"}},\n\t\t\t\twantTblInvalid: [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\twantTablets:    -1,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:           \"table/UPDATED invalidates table and removes tablets\",\n\t\t\t\tkeyspaces:      map[string][]string{\"test_ks\": {\"tbl_a\", \"tbl_b\"}},\n\t\t\t\ttablets:        [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\tevent:          &frm.SchemaChangeTable{Change: \"UPDATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t\twantKsPresent:  []string{\"test_ks\"},\n\t\t\t\twantTblGone:    [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\twantTblPresent: [][2]string{{\"test_ks\", \"tbl_b\"}},\n\t\t\t\twantTablets:    0,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"table/DROPPED removes tablets\",\n\t\t\t\tkeyspaces:   map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\ttablets:     [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\tevent:       &frm.SchemaChangeTable{Change: \"DROPPED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t\twantTablets: 0,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"table/CREATED does not remove tablets\",\n\t\t\t\tkeyspaces:   map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\ttablets:     [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\tevent:       &frm.SchemaChangeTable{Change: \"CREATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t\twantTablets: 2,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"type/CREATED clears entire keyspace\",\n\t\t\t\tkeyspaces:   map[string][]string{\"test_ks\": {\"tbl_a\", \"tbl_b\"}},\n\t\t\t\tevent:       &frm.SchemaChangeType{Change: \"CREATED\", Keyspace: \"test_ks\", Object: \"my_type\"},\n\t\t\t\twantKsGone:  []string{\"test_ks\"},\n\t\t\t\twantTablets: -1,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"function/CREATED clears entire keyspace\",\n\t\t\t\tkeyspaces:   map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\tevent:       &frm.SchemaChangeFunction{Change: \"CREATED\", Keyspace: \"test_ks\", Name: \"fn\", Args: []string{\"int\"}},\n\t\t\t\twantKsGone:  []string{\"test_ks\"},\n\t\t\t\twantTablets: -1,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"aggregate/CREATED clears entire keyspace\",\n\t\t\t\tkeyspaces:   map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\tevent:       &frm.SchemaChangeAggregate{Change: \"CREATED\", Keyspace: \"test_ks\", Name: \"agg\", Args: []string{\"int\"}},\n\t\t\t\twantKsGone:  []string{\"test_ks\"},\n\t\t\t\twantTablets: -1,\n\t\t\t},\n\t\t\t// Cross-isolation\n\t\t\t{\n\t\t\t\tname:           \"keyspace/DROPPED does not affect other keyspace\",\n\t\t\t\tkeyspaces:      map[string][]string{\"ks_a\": {\"tbl_a\"}, \"ks_b\": {\"tbl_b\"}},\n\t\t\t\tevent:          &frm.SchemaChangeKeyspace{Change: \"DROPPED\", Keyspace: \"ks_a\"},\n\t\t\t\twantKsGone:     []string{\"ks_a\"},\n\t\t\t\twantKsPresent:  []string{\"ks_b\"},\n\t\t\t\twantTblPresent: [][2]string{{\"ks_b\", \"tbl_b\"}},\n\t\t\t\twantTablets:    -1,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:           \"table/UPDATED does not affect other tables\",\n\t\t\t\tkeyspaces:      map[string][]string{\"test_ks\": {\"tbl_a\", \"tbl_b\", \"tbl_c\"}},\n\t\t\t\tevent:          &frm.SchemaChangeTable{Change: \"UPDATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t\twantKsPresent:  []string{\"test_ks\"},\n\t\t\t\twantTblGone:    [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\twantTblPresent: [][2]string{{\"test_ks\", \"tbl_b\"}, {\"test_ks\", \"tbl_c\"}},\n\t\t\t\twantTablets:    -1,\n\t\t\t},\n\t\t\t// Tablet cross-isolation\n\t\t\t{\n\t\t\t\tname:        \"table/DROPPED for different table keeps tablets\",\n\t\t\t\tkeyspaces:   map[string][]string{\"test_ks\": {\"tbl_a\", \"tbl_b\"}},\n\t\t\t\ttablets:     [][2]string{{\"test_ks\", \"tbl_a\"}},\n\t\t\t\tevent:       &frm.SchemaChangeTable{Change: \"DROPPED\", Keyspace: \"test_ks\", Object: \"tbl_b\"},\n\t\t\t\twantTablets: 2,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"keyspace/DROPPED for different keyspace keeps tablets\",\n\t\t\t\tkeyspaces:   map[string][]string{\"ks_a\": {\"tbl_a\"}, \"ks_b\": {\"tbl_b\"}},\n\t\t\t\ttablets:     [][2]string{{\"ks_a\", \"tbl_a\"}},\n\t\t\t\tevent:       &frm.SchemaChangeKeyspace{Change: \"DROPPED\", Keyspace: \"ks_b\"},\n\t\t\t\twantTablets: 2,\n\t\t\t},\n\t\t}\n\n\t\tfor _, tt := range tests {\n\t\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\t\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\t\t\tdefer s.Close()\n\t\t\t\tfor ks, tables := range tt.keyspaces {\n\t\t\t\t\tpopulateKeyspace(s, ks, tables...)\n\t\t\t\t}\n\t\t\t\tfor _, tb := range tt.tablets {\n\t\t\t\t\taddTestTablets(t, s, tb[0], tb[1])\n\t\t\t\t}\n\n\t\t\t\ts.handleSchemaEvent([]frame{tt.event})\n\t\t\t\ts.metadataDescriber.metadata.tabletsMetadata.Flush()\n\n\t\t\t\tfor _, ks := range tt.wantKsGone {\n\t\t\t\t\tif _, found := s.metadataDescriber.metadata.keyspaceMetadata.getKeyspace(ks); found {\n\t\t\t\t\t\tt.Errorf(\"keyspace %q should have been removed from cache\", ks)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, ks := range tt.wantKsPresent {\n\t\t\t\t\tif _, found := s.metadataDescriber.metadata.keyspaceMetadata.getKeyspace(ks); !found {\n\t\t\t\t\t\tt.Errorf(\"keyspace %q should still be in cache\", ks)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, pair := range tt.wantTblGone {\n\t\t\t\t\tks, _ := s.metadataDescriber.metadata.keyspaceMetadata.getKeyspace(pair[0])\n\t\t\t\t\tif ks != nil {\n\t\t\t\t\t\tif _, ok := ks.Tables[pair[1]]; ok {\n\t\t\t\t\t\t\tt.Errorf(\"table %s.%s should have been removed from Tables map\", pair[0], pair[1])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, pair := range tt.wantTblPresent {\n\t\t\t\t\tks, _ := s.metadataDescriber.metadata.keyspaceMetadata.getKeyspace(pair[0])\n\t\t\t\t\tif ks == nil {\n\t\t\t\t\t\tt.Errorf(\"keyspace %q not found when checking table %s\", pair[0], pair[1])\n\t\t\t\t\t} else if _, ok := ks.Tables[pair[1]]; !ok {\n\t\t\t\t\t\tt.Errorf(\"table %s.%s should still be in Tables map\", pair[0], pair[1])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, pair := range tt.wantTblInvalid {\n\t\t\t\t\tks, _ := s.metadataDescriber.metadata.keyspaceMetadata.getKeyspace(pair[0])\n\t\t\t\t\tif ks == nil {\n\t\t\t\t\t\tt.Errorf(\"keyspace %q not found when checking tablesInvalidated %s\", pair[0], pair[1])\n\t\t\t\t\t} else if _, ok := ks.tablesInvalidated[pair[1]]; !ok {\n\t\t\t\t\t\tt.Errorf(\"table %s.%s should be in tablesInvalidated\", pair[0], pair[1])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif tt.wantTablets >= 0 {\n\t\t\t\t\tif n := len(s.metadataDescriber.getTablets()); n != tt.wantTablets {\n\t\t\t\t\t\tt.Errorf(\"expected %d tablets, got %d\", tt.wantTablets, n)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"callbacks\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\ttests := []struct {\n\t\t\tname                string\n\t\t\tpopulateTables      []string\n\t\t\tevent               frame\n\t\t\twantSchemaAgreement int\n\t\t\twantKsChanged       []KeyspaceUpdateEvent\n\t\t}{\n\t\t\t{\n\t\t\t\tname:                \"keyspace event calls schema agreement and policy\",\n\t\t\t\tevent:               &frm.SchemaChangeKeyspace{Change: \"UPDATED\", Keyspace: \"test_ks\"},\n\t\t\t\twantSchemaAgreement: 1,\n\t\t\t\twantKsChanged:       []KeyspaceUpdateEvent{{Keyspace: \"test_ks\", Change: \"UPDATED\"}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:                \"table event: no schema agreement, no policy callback\",\n\t\t\t\tpopulateTables:      []string{\"tbl_a\"},\n\t\t\t\tevent:               &frm.SchemaChangeTable{Change: \"CREATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t\twantSchemaAgreement: 0,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:                \"type event: no schema agreement, no policy callback\",\n\t\t\t\tevent:               &frm.SchemaChangeType{Change: \"CREATED\", Keyspace: \"test_ks\", Object: \"my_type\"},\n\t\t\t\twantSchemaAgreement: 0,\n\t\t\t},\n\t\t}\n\n\t\tfor _, tt := range tests {\n\t\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\t\t\tpolicy := &trackingPolicy{}\n\t\t\t\ts := newSchemaEventTestSession(ctrl, policy, \"\")\n\t\t\t\tdefer s.Close()\n\t\t\t\ts.useSystemSchema = true\n\t\t\t\tpopulateKeyspace(s, \"test_ks\", tt.populateTables...)\n\n\t\t\t\ts.handleSchemaEvent([]frame{tt.event})\n\n\t\t\t\tif got := ctrl.getAwaitSchemaAgreementCalls(); got != tt.wantSchemaAgreement {\n\t\t\t\t\tt.Fatalf(\"awaitSchemaAgreement: got %d, want %d\", got, tt.wantSchemaAgreement)\n\t\t\t\t}\n\t\t\t\tkc := policy.getKeyspaceChangedCalls()\n\t\t\t\tif len(tt.wantKsChanged) == 0 {\n\t\t\t\t\tif len(kc) != 0 {\n\t\t\t\t\t\tt.Fatalf(\"KeyspaceChanged should not be called, got %+v\", kc)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif len(kc) != len(tt.wantKsChanged) {\n\t\t\t\t\t\tt.Fatalf(\"KeyspaceChanged: got %d calls, want %d\", len(kc), len(tt.wantKsChanged))\n\t\t\t\t\t}\n\t\t\t\t\tfor i, want := range tt.wantKsChanged {\n\t\t\t\t\t\tif kc[i] != want {\n\t\t\t\t\t\t\tt.Errorf(\"KeyspaceChanged[%d]: got %+v, want %+v\", i, kc[i], want)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tfullRefresh := func(ksName string) map[string]int {\n\t\treturn map[string]int{\n\t\t\t\"SELECT durable_writes, replication FROM system_schema.keyspaces WHERE keyspace_name = ?\": 1,\n\t\t\t\"SELECT * FROM system_schema.tables WHERE keyspace_name = ?\":                              1,\n\t\t\t\"SELECT \" + columnMetadataColumns + \" FROM system_schema.columns WHERE keyspace_name = ?\": 1,\n\t\t\t\"SELECT * FROM system_schema.types WHERE keyspace_name = ?\":                               1,\n\t\t\t\"SELECT * FROM system_schema.indexes WHERE keyspace_name = ?\":                             1,\n\t\t\t\"SELECT * FROM system_schema.views WHERE keyspace_name = ?\":                               1,\n\t\t\tfmt.Sprintf(\"DESCRIBE KEYSPACE %s WITH INTERNALS\", ksName):                                1,\n\t\t}\n\t}\n\ttableRefresh := map[string]int{\n\t\t\"SELECT * FROM system_schema.tables WHERE keyspace_name = ? AND table_name = ?\":                              1,\n\t\t\"SELECT \" + columnMetadataColumns + \" FROM system_schema.columns WHERE keyspace_name = ? AND table_name = ?\": 1,\n\t\t\"SELECT * FROM system_schema.indexes WHERE keyspace_name = ? AND table_name = ?\":                             1,\n\t\t\"SELECT * FROM system_schema.views WHERE keyspace_name = ? AND base_table_name = ? ALLOW FILTERING\":          1,\n\t}\n\tnoQueries := map[string]int{}\n\n\tassertExpectedQueries := func(t *testing.T, ctrl *schemaDataMock, expected map[string]int) {\n\t\tt.Helper()\n\t\tstmts := ctrl.getStatements()\n\t\tif len(expected) == 0 {\n\t\t\tif len(stmts) != 0 {\n\t\t\t\tt.Errorf(\"expected 0 queries, got %d:\\n%s\", len(stmts), strings.Join(stmts, \"\\n\"))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\twantTotal := 0\n\t\tfor stmt, wantCount := range expected {\n\t\t\twantTotal += wantCount\n\t\t\tgotCount := 0\n\t\t\tfor _, s := range stmts {\n\t\t\t\tif s == stmt {\n\t\t\t\t\tgotCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif gotCount != wantCount {\n\t\t\t\tt.Errorf(\"query %q: got %d, want %d\", stmt, gotCount, wantCount)\n\t\t\t}\n\t\t}\n\t\tif len(stmts) != wantTotal {\n\t\t\tt.Errorf(\"total queries: got %d, want %d\\nqueries:\\n%s\",\n\t\t\t\tlen(stmts), wantTotal, strings.Join(stmts, \"\\n\"))\n\t\t}\n\t}\n\n\tt.Run(\"GetKeyspace\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\ttests := []struct {\n\t\t\tname                string\n\t\t\tknownKeyspaces      map[string][]tableInfo\n\t\t\tpopulateKs          map[string][]string\n\t\t\tdisableSystemSchema bool\n\t\t\tevent               frame // nil = no event\n\t\t\tgetKeyspace         string\n\t\t\twantError           bool\n\t\t\texpectedQueries     map[string]int // nil = skip check; empty = expect 0 queries\n\t\t\twantNoRequery       bool           // second identical call fires 0 queries\n\t\t}{\n\t\t\t{\n\t\t\t\tname: \"after keyspace event: refreshes and caches\",\n\t\t\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\t\t\"test_ks\": {{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}}},\n\t\t\t\t},\n\t\t\t\tpopulateKs:      map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\tevent:           &frm.SchemaChangeKeyspace{Change: \"UPDATED\", Keyspace: \"test_ks\"},\n\t\t\t\tgetKeyspace:     \"test_ks\",\n\t\t\t\texpectedQueries: fullRefresh(\"test_ks\"),\n\t\t\t\twantNoRequery:   true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:            \"after table event: returns cached, no queries\",\n\t\t\t\tpopulateKs:      map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\tevent:           &frm.SchemaChangeTable{Change: \"UPDATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t\tgetKeyspace:     \"test_ks\",\n\t\t\t\texpectedQueries: noQueries,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"after type event: refreshes and caches\",\n\t\t\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\t\t\"test_ks\": {{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}}},\n\t\t\t\t},\n\t\t\t\tpopulateKs:      map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\tevent:           &frm.SchemaChangeType{Change: \"CREATED\", Keyspace: \"test_ks\", Object: \"my_type\"},\n\t\t\t\tgetKeyspace:     \"test_ks\",\n\t\t\t\texpectedQueries: fullRefresh(\"test_ks\"),\n\t\t\t\twantNoRequery:   true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:            \"uncached keyspace: refreshes and caches\",\n\t\t\t\tknownKeyspaces:  map[string][]tableInfo{\"new_ks\": {}},\n\t\t\t\tgetKeyspace:     \"new_ks\",\n\t\t\t\texpectedQueries: fullRefresh(\"new_ks\"),\n\t\t\t\twantNoRequery:   true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"unknown keyspace: returns error\",\n\t\t\t\tgetKeyspace: \"nonexistent\",\n\t\t\t\twantError:   true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:                \"useSystemSchema=false: returns error\",\n\t\t\t\tdisableSystemSchema: true,\n\t\t\t\tgetKeyspace:         \"test_ks\",\n\t\t\t\twantError:           true,\n\t\t\t},\n\t\t}\n\n\t\tfor _, tt := range tests {\n\t\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\tknownKs := tt.knownKeyspaces\n\t\t\t\tif knownKs == nil {\n\t\t\t\t\tknownKs = map[string][]tableInfo{}\n\t\t\t\t}\n\t\t\t\tctrl := &schemaDataMock{knownKeyspaces: knownKs}\n\t\t\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\t\t\tdefer s.Close()\n\t\t\t\tif tt.disableSystemSchema {\n\t\t\t\t\ts.useSystemSchema = false\n\t\t\t\t}\n\t\t\t\tfor ks, tables := range tt.populateKs {\n\t\t\t\t\tpopulateKeyspace(s, ks, tables...)\n\t\t\t\t}\n\t\t\t\tif tt.event != nil {\n\t\t\t\t\ts.handleSchemaEvent([]frame{tt.event})\n\t\t\t\t}\n\n\t\t\t\tctrl.resetQueries()\n\n\t\t\t\tks, err := s.metadataDescriber.GetKeyspace(tt.getKeyspace)\n\t\t\t\tif tt.wantError {\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tt.Fatal(\"expected error\")\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"GetKeyspace failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tif ks.Name != tt.getKeyspace {\n\t\t\t\t\tt.Fatalf(\"expected keyspace %s, got %s\", tt.getKeyspace, ks.Name)\n\t\t\t\t}\n\t\t\t\tif tt.expectedQueries != nil {\n\t\t\t\t\tassertExpectedQueries(t, ctrl, tt.expectedQueries)\n\t\t\t\t}\n\t\t\t\tif tt.wantNoRequery {\n\t\t\t\t\tctrl.resetQueries()\n\t\t\t\t\t_, err = s.metadataDescriber.GetKeyspace(tt.getKeyspace)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"second GetKeyspace failed: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tassertExpectedQueries(t, ctrl, noQueries)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"GetTable\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\ttests := []struct {\n\t\t\tname            string\n\t\t\tknownKeyspaces  map[string][]tableInfo\n\t\t\tpopulateKs      map[string][]string\n\t\t\tevent           frame\n\t\t\tgetTable        [2]string // [ks, table]\n\t\t\twantError       bool\n\t\t\texpectedQueries map[string]int // nil = skip check; empty = expect 0 queries\n\t\t\texpectNoRequery bool\n\t\t}{\n\t\t\t{\n\t\t\t\tname: \"after table event: refreshes only that table\",\n\t\t\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\t\t\"test_ks\": {\n\t\t\t\t\t\t{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t\t\t\t{name: \"tbl_b\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tpopulateKs:      map[string][]string{\"test_ks\": {\"tbl_a\", \"tbl_b\"}},\n\t\t\t\tevent:           &frm.SchemaChangeTable{Change: \"UPDATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t\tgetTable:        [2]string{\"test_ks\", \"tbl_a\"},\n\t\t\t\texpectedQueries: tableRefresh,\n\t\t\t\texpectNoRequery: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:            \"after table event: other table returns cached directly\",\n\t\t\t\tpopulateKs:      map[string][]string{\"test_ks\": {\"tbl_a\", \"tbl_b\"}},\n\t\t\t\tevent:           &frm.SchemaChangeTable{Change: \"UPDATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t\tgetTable:        [2]string{\"test_ks\", \"tbl_b\"},\n\t\t\t\texpectedQueries: noQueries,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"after keyspace event: refreshes full keyspace\",\n\t\t\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\t\t\"test_ks\": {{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}}},\n\t\t\t\t},\n\t\t\t\tpopulateKs:      map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\tevent:           &frm.SchemaChangeKeyspace{Change: \"UPDATED\", Keyspace: \"test_ks\"},\n\t\t\t\tgetTable:        [2]string{\"test_ks\", \"tbl_a\"},\n\t\t\t\texpectedQueries: fullRefresh(\"test_ks\"),\n\t\t\t\texpectNoRequery: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"after type event: refreshes full keyspace\",\n\t\t\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\t\t\"test_ks\": {{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}}},\n\t\t\t\t},\n\t\t\t\tpopulateKs:      map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\tevent:           &frm.SchemaChangeType{Change: \"CREATED\", Keyspace: \"test_ks\", Object: \"my_type\"},\n\t\t\t\tgetTable:        [2]string{\"test_ks\", \"tbl_a\"},\n\t\t\t\texpectedQueries: fullRefresh(\"test_ks\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"unknown table: returns error\",\n\t\t\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\t\t\"test_ks\": {{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}}},\n\t\t\t\t},\n\t\t\t\tpopulateKs: map[string][]string{\"test_ks\": {\"tbl_a\"}},\n\t\t\t\tgetTable:   [2]string{\"test_ks\", \"nonexistent\"},\n\t\t\t\twantError:  true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:      \"unknown keyspace: returns error\",\n\t\t\t\tgetTable:  [2]string{\"nonexistent\", \"tbl_a\"},\n\t\t\t\twantError: true,\n\t\t\t},\n\t\t}\n\n\t\tfor _, tt := range tests {\n\t\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\tknownKs := tt.knownKeyspaces\n\t\t\t\tif knownKs == nil {\n\t\t\t\t\tknownKs = map[string][]tableInfo{}\n\t\t\t\t}\n\t\t\t\tctrl := &schemaDataMock{knownKeyspaces: knownKs}\n\t\t\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\t\t\tdefer s.Close()\n\t\t\t\tfor ks, tables := range tt.populateKs {\n\t\t\t\t\tpopulateKeyspace(s, ks, tables...)\n\t\t\t\t}\n\t\t\t\tif tt.event != nil {\n\t\t\t\t\ts.handleSchemaEvent([]frame{tt.event})\n\t\t\t\t}\n\n\t\t\t\tctrl.resetQueries()\n\n\t\t\t\ttbl, err := s.metadataDescriber.GetTable(tt.getTable[0], tt.getTable[1])\n\t\t\t\tif tt.wantError {\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tt.Fatal(\"expected error\")\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"GetTable failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tif tbl.Name != tt.getTable[1] {\n\t\t\t\t\tt.Fatalf(\"expected table %s, got %s\", tt.getTable[1], tbl.Name)\n\t\t\t\t}\n\t\t\t\tif tt.expectedQueries != nil {\n\t\t\t\t\tassertExpectedQueries(t, ctrl, tt.expectedQueries)\n\t\t\t\t}\n\t\t\t\tif tt.expectNoRequery {\n\t\t\t\t\tctrl.resetQueries()\n\t\t\t\t\t_, err = s.metadataDescriber.GetTable(tt.getTable[0], tt.getTable[1])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"second GetTable failed: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tassertExpectedQueries(t, ctrl, noQueries)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"batch/multiple_table_events\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\t\"test_ks\": {\n\t\t\t\t\t{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t\t\t{name: \"tbl_b\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t\t\t{name: \"tbl_c\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\", \"tbl_b\", \"tbl_c\")\n\n\t\ts.handleSchemaEvent([]frame{\n\t\t\t&frm.SchemaChangeTable{Change: \"UPDATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t&frm.SchemaChangeTable{Change: \"UPDATED\", Keyspace: \"test_ks\", Object: \"tbl_b\"},\n\t\t})\n\n\t\tks, _ := s.metadataDescriber.metadata.keyspaceMetadata.getKeyspace(\"test_ks\")\n\t\tif _, ok := ks.Tables[\"tbl_a\"]; ok {\n\t\t\tt.Fatal(\"tbl_a should be invalidated\")\n\t\t}\n\t\tif _, ok := ks.Tables[\"tbl_b\"]; ok {\n\t\t\tt.Fatal(\"tbl_b should be invalidated\")\n\t\t}\n\t\tif _, ok := ks.Tables[\"tbl_c\"]; !ok {\n\t\t\tt.Fatal(\"tbl_c should still be cached\")\n\t\t}\n\n\t\tctrl.resetQueries()\n\n\t\t_, err := s.metadataDescriber.GetTable(\"test_ks\", \"tbl_a\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"GetTable(tbl_a) failed: %v\", err)\n\t\t}\n\t\tif ctrl.getQueryCount() == 0 {\n\t\t\tt.Fatal(\"expected queries for tbl_a\")\n\t\t}\n\n\t\tctrl.resetQueries()\n\n\t\t_, err = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_b\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"GetTable(tbl_b) failed: %v\", err)\n\t\t}\n\t\tif ctrl.getQueryCount() == 0 {\n\t\t\tt.Fatal(\"expected queries for tbl_b\")\n\t\t}\n\n\t\tctrl.resetQueries()\n\n\t\t_, err = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_c\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"GetTable(tbl_c) failed: %v\", err)\n\t\t}\n\t\tif got := ctrl.getQueryCount(); got != 0 {\n\t\t\tt.Fatalf(\"tbl_c not invalidated, expected 0 queries, got %d\", got)\n\t\t}\n\t})\n\n\tt.Run(\"batch/mixed_keyspace_and_table_events\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\t\"test_ks\": {\n\t\t\t\t\t{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tpolicy := &trackingPolicy{}\n\t\ts := newSchemaEventTestSession(ctrl, policy, \"\")\n\t\tdefer s.Close()\n\t\ts.useSystemSchema = true\n\t\ts.hasAggregatesAndFunctions = false\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\")\n\n\t\ts.handleSchemaEvent([]frame{\n\t\t\t&frm.SchemaChangeTable{Change: \"CREATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t\t&frm.SchemaChangeKeyspace{Change: \"UPDATED\", Keyspace: \"test_ks\"},\n\t\t})\n\n\t\tif got := ctrl.getAwaitSchemaAgreementCalls(); got != 1 {\n\t\t\tt.Fatalf(\"awaitSchemaAgreement: got %d, want 1\", got)\n\t\t}\n\t\tif _, found := s.metadataDescriber.metadata.keyspaceMetadata.getKeyspace(\"test_ks\"); found {\n\t\t\tt.Fatal(\"keyspace should have been removed from cache by keyspace event\")\n\t\t}\n\n\t\tctrl.resetQueries()\n\n\t\tks, err := s.metadataDescriber.GetKeyspace(\"test_ks\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"GetKeyspace failed: %v\", err)\n\t\t}\n\t\tif ks.Name != \"test_ks\" {\n\t\t\tt.Fatalf(\"expected test_ks, got %s\", ks.Name)\n\t\t}\n\t\tif got := ctrl.getQueryCount(); got == 0 {\n\t\t\tt.Fatal(\"expected queries after keyspace was cleared\")\n\t\t}\n\t})\n}\n\n// TestSchemaRefreshConcurrent validates that concurrent GetKeyspace/GetTable\n// calls for an uncached or invalidated keyspace result in only a single set\n// of schema queries, not one per caller.\nfunc TestSchemaRefreshConcurrent(t *testing.T) {\n\tt.Parallel()\n\n\tconst concurrency = 10\n\n\tknownKeyspaces := map[string][]tableInfo{\n\t\t\"test_ks\": {\n\t\t\t{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t},\n\t}\n\n\tfullRefreshCount := 7  // keyspace + tables + columns + types + indexes + views + DESCRIBE\n\ttableRefreshCount := 4 // tables + columns + indexes + views (filtered by table_name)\n\n\tt.Run(\"GetKeyspace/uncached\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: knownKeyspaces,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\n\t\tvar wg sync.WaitGroup\n\t\tfor range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, _ = s.metadataDescriber.GetKeyspace(\"test_ks\")\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\n\t\tif got := ctrl.getQueryCount(); got != fullRefreshCount {\n\t\t\tt.Errorf(\"expected %d queries (single full refresh), got %d\", fullRefreshCount, got)\n\t\t}\n\t})\n\n\tt.Run(\"GetKeyspace/after_invalidation\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: knownKeyspaces,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\")\n\n\t\ts.handleSchemaEvent([]frame{\n\t\t\t&frm.SchemaChangeKeyspace{Change: \"UPDATED\", Keyspace: \"test_ks\"},\n\t\t})\n\n\t\tctrl.resetQueries()\n\n\t\tvar wg sync.WaitGroup\n\t\tfor range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, _ = s.metadataDescriber.GetKeyspace(\"test_ks\")\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\n\t\tif got := ctrl.getQueryCount(); got != fullRefreshCount {\n\t\t\tt.Errorf(\"expected %d queries (single full refresh), got %d\", fullRefreshCount, got)\n\t\t}\n\t})\n\n\tt.Run(\"GetTable/after_table_invalidation\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: knownKeyspaces,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\")\n\n\t\ts.handleSchemaEvent([]frame{\n\t\t\t&frm.SchemaChangeTable{Change: \"UPDATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t})\n\n\t\tctrl.resetQueries()\n\n\t\tvar wg sync.WaitGroup\n\t\tfor range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, _ = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_a\")\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\n\t\tif got := ctrl.getQueryCount(); got != tableRefreshCount {\n\t\t\tt.Errorf(\"expected %d queries (single table refresh), got %d\", tableRefreshCount, got)\n\t\t}\n\t})\n\n\tt.Run(\"GetTable/stale_snapshot_after_refresh_does_not_refresh_twice\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: knownKeyspaces,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\")\n\n\t\ts.handleSchemaEvent([]frame{\n\t\t\t&frm.SchemaChangeTable{Change: \"UPDATED\", Keyspace: \"test_ks\", Object: \"tbl_a\"},\n\t\t})\n\n\t\tstaleKeyspace, wasReloaded, err := s.metadataDescriber.getKeyspaceInternal(\"test_ks\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"getKeyspaceInternal returned unexpected error: %v\", err)\n\t\t}\n\t\tif _, found := staleKeyspace.Tables[\"tbl_a\"]; found {\n\t\t\tt.Fatal(\"expected stale keyspace snapshot to have invalidated table removed\")\n\t\t}\n\n\t\tif err := s.metadataDescriber.deduplicatedRefreshTable(\"test_ks\", \"tbl_a\"); err != nil {\n\t\t\tt.Fatalf(\"deduplicatedRefreshTable returned unexpected error: %v\", err)\n\t\t}\n\n\t\tctrl.resetQueries()\n\n\t\ttbl, refreshNeeded, err := s.metadataDescriber.getTableFromSnapshot(\"test_ks\", \"tbl_a\", staleKeyspace, wasReloaded)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"getTableFromSnapshot returned unexpected error: %v\", err)\n\t\t}\n\t\tif refreshNeeded {\n\t\t\tt.Fatal(\"expected latest published keyspace metadata to suppress an extra refresh\")\n\t\t}\n\t\tif tbl == nil || tbl.Name != \"tbl_a\" {\n\t\t\tt.Fatalf(\"unexpected table metadata: %#v\", tbl)\n\t\t}\n\t\tif got := ctrl.getQueryCount(); got != 0 {\n\t\t\tt.Fatalf(\"expected stale snapshot lookup to avoid extra queries, got %d\", got)\n\t\t}\n\t})\n}\n\n// TestConcurrentSchemaRefreshErrorHandling verifies that concurrent\n// GetKeyspace and GetTable calls behave correctly when the underlying\n// schema queries succeed or fail, including mixed scenarios where\n// errors are injected mid-flight.\nfunc TestConcurrentSchemaRefreshErrorHandling(t *testing.T) {\n\tt.Parallel()\n\n\tconst concurrency = 10\n\n\tdefaultTables := map[string][]tableInfo{\n\t\t\"test_ks\": {\n\t\t\t{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t{name: \"tbl_b\", columns: []columnInfo{{name: \"pk\", kind: \"partition_key\", position: 0}}},\n\t\t},\n\t}\n\n\tt.Run(\"GetKeyspace/all_succeed\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\n\t\tvar wg sync.WaitGroup\n\t\tresults := make([]*KeyspaceMetadata, concurrency)\n\t\terrs := make([]error, concurrency)\n\t\tfor i := range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tresults[idx], errs[idx] = s.metadataDescriber.GetKeyspace(\"test_ks\")\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\n\t\tfor i := range concurrency {\n\t\t\tif errs[i] != nil {\n\t\t\t\tt.Errorf(\"goroutine %d: unexpected error: %v\", i, errs[i])\n\t\t\t}\n\t\t\tif results[i] == nil {\n\t\t\t\tt.Errorf(\"goroutine %d: got nil metadata\", i)\n\t\t\t} else if results[i].Name != \"test_ks\" {\n\t\t\t\tt.Errorf(\"goroutine %d: expected keyspace test_ks, got %s\", i, results[i].Name)\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"GetKeyspace/all_fail\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tinjectedErr := fmt.Errorf(\"injected query failure\")\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t\tqueryError:     injectedErr,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\n\t\tvar wg sync.WaitGroup\n\t\terrs := make([]error, concurrency)\n\t\tfor i := range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, errs[idx] = s.metadataDescriber.GetKeyspace(\"test_ks\")\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\n\t\tfor i := range concurrency {\n\t\t\tif errs[i] == nil {\n\t\t\t\tt.Errorf(\"goroutine %d: expected error, got nil\", i)\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"GetKeyspace/fail_then_succeed\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\t// First wave fails, second wave succeeds — verifies singleflight\n\t\t// does not cache the error permanently.\n\t\tinjectedErr := fmt.Errorf(\"transient failure\")\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t\tqueryError:     injectedErr,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\n\t\t// Wave 1: all fail.\n\t\tvar wg sync.WaitGroup\n\t\tfor range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, _ = s.metadataDescriber.GetKeyspace(\"test_ks\")\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\n\t\t// Clear error and retry — should succeed.\n\t\tctrl.setQueryError(nil)\n\t\tctrl.resetQueries()\n\n\t\tks, err := s.metadataDescriber.GetKeyspace(\"test_ks\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"second attempt should succeed, got: %v\", err)\n\t\t}\n\t\tif ks.Name != \"test_ks\" {\n\t\t\tt.Fatalf(\"expected keyspace test_ks, got %s\", ks.Name)\n\t\t}\n\t})\n\n\tt.Run(\"GetKeyspace/nonexistent_keyspace\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\n\t\tvar wg sync.WaitGroup\n\t\terrs := make([]error, concurrency)\n\t\tfor i := range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, errs[idx] = s.metadataDescriber.GetKeyspace(\"no_such_ks\")\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\n\t\tfor i := range concurrency {\n\t\t\tif errs[i] == nil {\n\t\t\t\tt.Errorf(\"goroutine %d: expected ErrKeyspaceDoesNotExist, got nil\", i)\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"GetTable/all_succeed\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\")\n\t\ts.metadataDescriber.invalidateTableSchema(\"test_ks\", \"tbl_a\")\n\n\t\tvar wg sync.WaitGroup\n\t\tresults := make([]*TableMetadata, concurrency)\n\t\terrs := make([]error, concurrency)\n\t\tfor i := range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tresults[idx], errs[idx] = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_a\")\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\n\t\tfor i := range concurrency {\n\t\t\tif errs[i] != nil {\n\t\t\t\tt.Errorf(\"goroutine %d: unexpected error: %v\", i, errs[i])\n\t\t\t}\n\t\t\tif results[i] == nil {\n\t\t\t\tt.Errorf(\"goroutine %d: got nil table metadata\", i)\n\t\t\t} else if results[i].Name != \"tbl_a\" {\n\t\t\t\tt.Errorf(\"goroutine %d: expected tbl_a, got %s\", i, results[i].Name)\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"GetTable/all_fail\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tinjectedErr := fmt.Errorf(\"injected table query failure\")\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryError:     injectedErr,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\")\n\t\ts.metadataDescriber.invalidateTableSchema(\"test_ks\", \"tbl_a\")\n\n\t\tvar wg sync.WaitGroup\n\t\terrs := make([]error, concurrency)\n\t\tfor i := range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, errs[idx] = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_a\")\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\n\t\tfor i := range concurrency {\n\t\t\tif errs[i] == nil {\n\t\t\t\tt.Errorf(\"goroutine %d: expected error, got nil\", i)\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"GetTable/fail_then_succeed\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tinjectedErr := fmt.Errorf(\"transient table failure\")\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t\tqueryError:     injectedErr,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\")\n\t\ts.metadataDescriber.invalidateTableSchema(\"test_ks\", \"tbl_a\")\n\n\t\t// Wave 1: all fail.\n\t\tvar wg sync.WaitGroup\n\t\tfor range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, _ = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_a\")\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\n\t\t// Clear error, re-invalidate (the failed refresh may have left\n\t\t// tablesInvalidated in an inconsistent state), and retry.\n\t\tctrl.setQueryError(nil)\n\t\tctrl.resetQueries()\n\t\ts.metadataDescriber.invalidateTableSchema(\"test_ks\", \"tbl_a\")\n\n\t\ttbl, err := s.metadataDescriber.GetTable(\"test_ks\", \"tbl_a\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"second attempt should succeed, got: %v\", err)\n\t\t}\n\t\tif tbl.Name != \"tbl_a\" {\n\t\t\tt.Fatalf(\"expected tbl_a, got %s\", tbl.Name)\n\t\t}\n\t})\n\n\tt.Run(\"GetTable/nonexistent_table\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryDelay:     10 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\")\n\n\t\tvar wg sync.WaitGroup\n\t\terrs := make([]error, concurrency)\n\t\tfor i := range concurrency {\n\t\t\twg.Add(1)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, errs[idx] = s.metadataDescriber.GetTable(\"test_ks\", \"no_such_table\")\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\n\t\tfor i := range concurrency {\n\t\t\tif errs[i] == nil {\n\t\t\t\tt.Errorf(\"goroutine %d: expected ErrNotFound, got nil\", i)\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"GetKeyspace_and_GetTable/concurrent_mixed\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\t// Exercises the interplay between concurrent keyspace and table\n\t\t// refreshes hitting the singleflight groups simultaneously.\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryDelay:     5 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\n\t\tvar wg sync.WaitGroup\n\t\tksErrs := make([]error, concurrency)\n\t\ttblErrs := make([]error, concurrency)\n\t\tfor i := range concurrency {\n\t\t\twg.Add(2)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, ksErrs[idx] = s.metadataDescriber.GetKeyspace(\"test_ks\")\n\t\t\t}(i)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, tblErrs[idx] = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_a\")\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\n\t\tfor i := range concurrency {\n\t\t\tif ksErrs[i] != nil {\n\t\t\t\tt.Errorf(\"GetKeyspace goroutine %d: unexpected error: %v\", i, ksErrs[i])\n\t\t\t}\n\t\t\tif tblErrs[i] != nil {\n\t\t\t\tt.Errorf(\"GetTable goroutine %d: unexpected error: %v\", i, tblErrs[i])\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"GetTable/different_tables_concurrent\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\t// Two different tables invalidated concurrently: each gets its own\n\t\t// singleflight key, so both refresh independently.\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryDelay:     5 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\", \"tbl_b\")\n\t\ts.metadataDescriber.invalidateTableSchema(\"test_ks\", \"tbl_a\")\n\t\ts.metadataDescriber.invalidateTableSchema(\"test_ks\", \"tbl_b\")\n\n\t\tvar wg sync.WaitGroup\n\t\taErrs := make([]error, concurrency)\n\t\tbErrs := make([]error, concurrency)\n\t\tfor i := range concurrency {\n\t\t\twg.Add(2)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, aErrs[idx] = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_a\")\n\t\t\t}(i)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, bErrs[idx] = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_b\")\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\n\t\tfor i := range concurrency {\n\t\t\tif aErrs[i] != nil {\n\t\t\t\tt.Errorf(\"tbl_a goroutine %d: unexpected error: %v\", i, aErrs[i])\n\t\t\t}\n\t\t\tif bErrs[i] != nil {\n\t\t\t\tt.Errorf(\"tbl_b goroutine %d: unexpected error: %v\", i, bErrs[i])\n\t\t\t}\n\t\t}\n\n\t\t// Verify both tables are now cached.\n\t\tfor _, name := range []string{\"tbl_a\", \"tbl_b\"} {\n\t\t\ttbl, err := s.metadataDescriber.GetTable(\"test_ks\", name)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"GetTable(%s) after refresh: %v\", name, err)\n\t\t\t} else if tbl.Name != name {\n\t\t\t\tt.Errorf(\"expected %s, got %s\", name, tbl.Name)\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"GetTable/different_tables_one_fails\", func(t *testing.T) {\n\t\tt.Parallel()\n\t\t// tbl_a exists in the mock, tbl_x does not — concurrent refreshes\n\t\t// for both: one succeeds, one gets ErrNotFound.\n\t\tctrl := &schemaDataMock{\n\t\t\tknownKeyspaces: defaultTables,\n\t\t\tqueryDelay:     5 * time.Millisecond,\n\t\t}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\", \"tbl_x\")\n\t\ts.metadataDescriber.invalidateTableSchema(\"test_ks\", \"tbl_a\")\n\t\ts.metadataDescriber.invalidateTableSchema(\"test_ks\", \"tbl_x\")\n\n\t\tvar wg sync.WaitGroup\n\t\taErrs := make([]error, concurrency)\n\t\txErrs := make([]error, concurrency)\n\t\tfor i := range concurrency {\n\t\t\twg.Add(2)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, aErrs[idx] = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_a\")\n\t\t\t}(i)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, xErrs[idx] = s.metadataDescriber.GetTable(\"test_ks\", \"tbl_x\")\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\n\t\tfor i := range concurrency {\n\t\t\tif aErrs[i] != nil {\n\t\t\t\tt.Errorf(\"tbl_a goroutine %d: unexpected error: %v\", i, aErrs[i])\n\t\t\t}\n\t\t\tif xErrs[i] == nil {\n\t\t\t\tt.Errorf(\"tbl_x goroutine %d: expected ErrNotFound, got nil\", i)\n\t\t\t}\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "example_batch_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/gocql/gocql\"\n)\n\n// Example_batch demonstrates how to execute a batch of statements.\nfunc Example_batch() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate table example.batches(pk int, ck int, description text, PRIMARY KEY(pk, ck));\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.ProtoVersion = 4\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tctx := context.Background()\n\n\tb := session.Batch(gocql.UnloggedBatch).WithContext(ctx)\n\tb.Entries = append(b.Entries, gocql.BatchEntry{\n\t\tStmt:       \"INSERT INTO example.batches (pk, ck, description) VALUES (?, ?, ?)\",\n\t\tArgs:       []any{1, 2, \"1.2\"},\n\t\tIdempotent: true,\n\t})\n\tb.Entries = append(b.Entries, gocql.BatchEntry{\n\t\tStmt:       \"INSERT INTO example.batches (pk, ck, description) VALUES (?, ?, ?)\",\n\t\tArgs:       []any{1, 3, \"1.3\"},\n\t\tIdempotent: true,\n\t})\n\n\terr = session.ExecuteBatch(b)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = b.Query(\"INSERT INTO example.batches (pk, ck, description) VALUES (?, ?, ?)\", 1, 4, \"1.4\").\n\t\tQuery(\"INSERT INTO example.batches (pk, ck, description) VALUES (?, ?, ?)\", 1, 5, \"1.5\").\n\t\tExec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tscanner := session.Query(\"SELECT pk, ck, description FROM example.batches\").Iter().Scanner()\n\tfor scanner.Next() {\n\t\tvar pk, ck int32\n\t\tvar description string\n\t\terr = scanner.Scan(&pk, &ck, &description)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfmt.Println(pk, ck, description)\n\t}\n\t// 1 2 1.2\n\t// 1 3 1.3\n\t// 1 4 1.4\n\t// 1 5 1.5\n}\n"
  },
  {
    "path": "example_dynamic_columns_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"text/tabwriter\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\n// Example_dynamicColumns demonstrates how to handle dynamic column list.\nfunc Example_dynamicColumns() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate table example.table1(pk text, ck int, value1 text, value2 int, PRIMARY KEY(pk, ck));\n\tinsert into example.table1 (pk, ck, value1, value2) values ('a', 1, 'b', 2);\n\tinsert into example.table1 (pk, ck, value1, value2) values ('c', 3, 'd', 4);\n\tinsert into example.table1 (pk, ck, value1, value2) values ('c', 5, null, null);\n\tcreate table example.table2(pk int, value1 timestamp, PRIMARY KEY(pk));\n\tinsert into example.table2 (pk, value1) values (1, '2020-01-02 03:04:05');\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.ProtoVersion = 4\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tprintQuery := func(ctx context.Context, session *gocql.Session, stmt string, values ...any) error {\n\t\titer := session.Query(stmt, values...).WithContext(ctx).Iter()\n\t\tfmt.Println(stmt)\n\t\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ',\n\t\t\t0)\n\t\tfor i, columnInfo := range iter.Columns() {\n\t\t\tif i > 0 {\n\t\t\t\tfmt.Fprint(w, \"\\t| \")\n\t\t\t}\n\t\t\tfmt.Fprintf(w, \"%s (%s)\", columnInfo.Name, columnInfo.TypeInfo)\n\t\t}\n\n\t\tfor {\n\t\t\trd, err := iter.RowData()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !iter.Scan(rd.Values...) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfmt.Fprint(w, \"\\n\")\n\t\t\tfor i, val := range rd.Values {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tfmt.Fprint(w, \"\\t| \")\n\t\t\t\t}\n\n\t\t\t\tfmt.Fprint(w, reflect.Indirect(reflect.ValueOf(val)).Interface())\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprint(w, \"\\n\")\n\t\tw.Flush()\n\t\tfmt.Println()\n\n\t\treturn iter.Close()\n\t}\n\n\tctx := context.Background()\n\n\terr = printQuery(ctx, session, \"SELECT * FROM table1\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = printQuery(ctx, session, \"SELECT value2, pk, ck FROM table1\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = printQuery(ctx, session, \"SELECT * FROM table2\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// SELECT * FROM table1\n\t// pk (varchar) | ck (int) | value1 (varchar) | value2 (int)\n\t// a            | 1        | b                | 2\n\t// c            | 3        | d                | 4\n\t// c            | 5        |                  | 0\n\t//\n\t// SELECT value2, pk, ck FROM table1\n\t// value2 (int) | pk (varchar) | ck (int)\n\t// 2            | a            | 1\n\t// 4            | c            | 3\n\t// 0            | c            | 5\n\t//\n\t// SELECT * FROM table2\n\t// pk (int) | value1 (timestamp)\n\t// 1        | 2020-01-02 03:04:05 +0000 UTC\n}\n"
  },
  {
    "path": "example_lwt_batch_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/gocql/gocql\"\n)\n\n// ExampleSession_MapExecuteBatchCAS demonstrates how to execute a batch lightweight transaction.\nfunc ExampleSession_MapExecuteBatchCAS() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate table example.my_lwt_batch_table(pk text, ck text, version int, value text, PRIMARY KEY(pk, ck));\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.ProtoVersion = 4\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tctx := context.Background()\n\n\terr = session.Query(\"INSERT INTO example.my_lwt_batch_table (pk, ck, version, value) VALUES (?, ?, ?, ?)\",\n\t\t\"pk1\", \"ck1\", 1, \"a\").WithContext(ctx).Exec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = session.Query(\"INSERT INTO example.my_lwt_batch_table (pk, ck, version, value) VALUES (?, ?, ?, ?)\",\n\t\t\"pk1\", \"ck2\", 1, \"A\").WithContext(ctx).Exec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\texecuteBatch := func(ck2Version int) {\n\t\tb := session.Batch(gocql.LoggedBatch)\n\t\tb.Entries = append(b.Entries, gocql.BatchEntry{\n\t\t\tStmt: \"UPDATE my_lwt_batch_table SET value=? WHERE pk=? AND ck=? IF version=?\",\n\t\t\tArgs: []any{\"b\", \"pk1\", \"ck1\", 1},\n\t\t})\n\t\tb.Entries = append(b.Entries, gocql.BatchEntry{\n\t\t\tStmt: \"UPDATE my_lwt_batch_table SET value=? WHERE pk=? AND ck=? IF version=?\",\n\t\t\tArgs: []any{\"B\", \"pk1\", \"ck2\", ck2Version},\n\t\t})\n\t\tm := make(map[string]any)\n\t\tapplied, iter, err := session.MapExecuteBatchCAS(b.WithContext(ctx), m)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfmt.Println(applied, m)\n\n\t\tm = make(map[string]any)\n\t\tfor iter.MapScan(m) {\n\t\t\tfmt.Println(m)\n\t\t\tm = make(map[string]any)\n\t\t}\n\n\t\tif err := iter.Close(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tprintState := func() {\n\t\tscanner := session.Query(\"SELECT ck, value FROM example.my_lwt_batch_table WHERE pk = ?\", \"pk1\").\n\t\t\tWithContext(ctx).Iter().Scanner()\n\t\tfor scanner.Next() {\n\t\t\tvar ck, value string\n\t\t\terr = scanner.Scan(&ck, &value)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(ck, value)\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\texecuteBatch(0)\n\tprintState()\n\texecuteBatch(1)\n\tprintState()\n\n\t// false map[ck:ck1 pk:pk1 version:1]\n\t// map[[applied]:false ck:ck2 pk:pk1 version:1]\n\t// ck1 a\n\t// ck2 A\n\t// true map[]\n\t// ck1 b\n\t// ck2 B\n}\n"
  },
  {
    "path": "example_lwt_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\n// ExampleQuery_MapScanCAS demonstrates how to execute a single-statement lightweight transaction.\nfunc ExampleQuery_MapScanCAS() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate table example.my_lwt_table(pk int, version int, value text, PRIMARY KEY(pk));\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.ProtoVersion = 4\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tctx := context.Background()\n\n\terr = session.Query(\"INSERT INTO example.my_lwt_table (pk, version, value) VALUES (?, ?, ?)\",\n\t\t1, 1, \"a\").WithContext(ctx).Exec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tm := make(map[string]any)\n\tapplied, err := session.Query(\"UPDATE example.my_lwt_table SET value = ? WHERE pk = ? IF version = ?\",\n\t\t\"b\", 1, 0).WithContext(ctx).MapScanCAS(m)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(applied, m)\n\n\tvar value string\n\terr = session.Query(\"SELECT value FROM example.my_lwt_table WHERE pk = ?\", 1).WithContext(ctx).\n\t\tScan(&value)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(value)\n\n\tm = make(map[string]any)\n\tapplied, err = session.Query(\"UPDATE example.my_lwt_table SET value = ? WHERE pk = ? IF version = ?\",\n\t\t\"b\", 1, 1).WithContext(ctx).MapScanCAS(m)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(applied, m)\n\n\tvar value2 string\n\terr = session.Query(\"SELECT value FROM example.my_lwt_table WHERE pk = ?\", 1).WithContext(ctx).\n\t\tScan(&value2)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(value2)\n\t// false map[version:1]\n\t// a\n\t// true map[]\n\t// b\n}\n"
  },
  {
    "path": "example_marshaler_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\n// MyMarshaler implements Marshaler and Unmarshaler.\n// It represents a version number stored as string.\ntype MyMarshaler struct {\n\tmajor, minor, patch int\n}\n\nfunc (m MyMarshaler) MarshalCQL(info gocql.TypeInfo) ([]byte, error) {\n\treturn gocql.Marshal(info, fmt.Sprintf(\"%d.%d.%d\", m.major, m.minor, m.patch))\n}\n\nfunc (m *MyMarshaler) UnmarshalCQL(info gocql.TypeInfo, data []byte) error {\n\tvar s string\n\terr := gocql.Unmarshal(info, data, &s)\n\tif err != nil {\n\t\treturn err\n\t}\n\tparts := strings.SplitN(s, \".\", 3)\n\tif len(parts) != 3 {\n\t\treturn fmt.Errorf(\"parse version %q: %d parts instead of 3\", s, len(parts))\n\t}\n\tmajor, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse version %q major number: %v\", s, err)\n\t}\n\tminor, err := strconv.Atoi(parts[1])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse version %q minor number: %v\", s, err)\n\t}\n\tpatch, err := strconv.Atoi(parts[2])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse version %q patch number: %v\", s, err)\n\t}\n\tm.major = major\n\tm.minor = minor\n\tm.patch = patch\n\treturn nil\n}\n\n// Example_marshalerUnmarshaler demonstrates how to implement a Marshaler and Unmarshaler.\nfunc Example_marshalerUnmarshaler() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate table example.my_marshaler_table(pk int, value text, PRIMARY KEY(pk));\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.ProtoVersion = 4\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tctx := context.Background()\n\n\tvalue := MyMarshaler{\n\t\tmajor: 1,\n\t\tminor: 2,\n\t\tpatch: 3,\n\t}\n\terr = session.Query(\"INSERT INTO example.my_marshaler_table (pk, value) VALUES (?, ?)\",\n\t\t1, value).WithContext(ctx).Exec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar stringValue string\n\terr = session.Query(\"SELECT value FROM example.my_marshaler_table WHERE pk = 1\").WithContext(ctx).\n\t\tScan(&stringValue)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(stringValue)\n\tvar unmarshaledValue MyMarshaler\n\terr = session.Query(\"SELECT value FROM example.my_marshaler_table WHERE pk = 1\").WithContext(ctx).\n\t\tScan(&unmarshaledValue)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(unmarshaledValue)\n\t// 1.2.3\n\t// {1 2 3}\n}\n"
  },
  {
    "path": "example_nulls_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\n// Example_nulls demonstrates how to distinguish between null and zero value when needed.\n//\n// Null values are unmarshalled as zero value of the type. If you need to distinguish for example between text\n// column being null and empty string, you can unmarshal into *string field.\nfunc Example_nulls() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate table example.stringvals(id int, value text, PRIMARY KEY(id));\n\tinsert into example.stringvals (id, value) values (1, null);\n\tinsert into example.stringvals (id, value) values (2, '');\n\tinsert into example.stringvals (id, value) values (3, 'hello');\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\tscanner := session.Query(`SELECT id, value FROM stringvals`).Iter().Scanner()\n\tfor scanner.Next() {\n\t\tvar (\n\t\t\tid  int32\n\t\t\tval *string\n\t\t)\n\t\terr := scanner.Scan(&id, &val)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif val != nil {\n\t\t\tfmt.Printf(\"Row %d is %q\\n\", id, *val)\n\t\t} else {\n\t\t\tfmt.Printf(\"Row %d is null\\n\", id)\n\t\t}\n\n\t}\n\terr = scanner.Err()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// Row 1 is null\n\t// Row 2 is \"\"\n\t// Row 3 is \"hello\"\n}\n"
  },
  {
    "path": "example_paging_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\n// Example_paging demonstrates how to manually fetch pages and use page state.\n//\n// See also package documentation about paging.\nfunc Example_paging() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate table example.itoa(id int, description text, PRIMARY KEY(id));\n\tinsert into example.itoa (id, description) values (1, 'one');\n\tinsert into example.itoa (id, description) values (2, 'two');\n\tinsert into example.itoa (id, description) values (3, 'three');\n\tinsert into example.itoa (id, description) values (4, 'four');\n\tinsert into example.itoa (id, description) values (5, 'five');\n\tinsert into example.itoa (id, description) values (6, 'six');\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.ProtoVersion = 4\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tvar pageState []byte\n\tfor {\n\t\t// We use PageSize(2) for the sake of example, use larger values in production (default is 5000) for performance\n\t\t// reasons.\n\t\titer := session.Query(`SELECT id, description FROM itoa`).PageSize(2).PageState(pageState).Iter()\n\t\tnextPageState := iter.PageState()\n\t\tscanner := iter.Scanner()\n\t\tfor scanner.Next() {\n\t\t\tvar (\n\t\t\t\tid          int\n\t\t\t\tdescription string\n\t\t\t)\n\t\t\terr = scanner.Scan(&id, &description)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(id, description)\n\t\t}\n\t\terr = scanner.Err()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfmt.Printf(\"next page state: %+v\\n\", nextPageState)\n\t\tif len(nextPageState) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tpageState = nextPageState\n\t}\n\t// 5 five\n\t// 1 one\n\t// next page state: [4 0 0 0 1 0 240 127 255 255 253 0]\n\t// 2 two\n\t// 4 four\n\t// next page state: [4 0 0 0 4 0 240 127 255 255 251 0]\n\t// 6 six\n\t// 3 three\n\t// next page state: [4 0 0 0 3 0 240 127 255 255 249 0]\n\t// next page state: []\n}\n"
  },
  {
    "path": "example_set_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sort\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\n// Example_set demonstrates how to use sets.\nfunc Example_set() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate table example.sets(id int, value set<text>, PRIMARY KEY(id));\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\terr = session.Query(`UPDATE sets SET value=? WHERE id=1`, []string{\"alpha\", \"beta\", \"gamma\"}).Exec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = session.Query(`UPDATE sets SET value=value+? WHERE id=1`, \"epsilon\").Exec()\n\tif err != nil {\n\t\t// This does not work because the ? expects a set, not a single item.\n\t\tfmt.Printf(\"expected error: %v\\n\", err)\n\t}\n\terr = session.Query(`UPDATE sets SET value=value+? WHERE id=1`, []string{\"delta\"}).Exec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// map[x]struct{} is supported too.\n\ttoRemove := map[string]struct{}{\n\t\t\"alpha\": {},\n\t\t\"gamma\": {},\n\t}\n\terr = session.Query(`UPDATE sets SET value=value-? WHERE id=1`, toRemove).Exec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tscanner := session.Query(`SELECT id, value FROM sets`).Iter().Scanner()\n\tfor scanner.Next() {\n\t\tvar (\n\t\t\tid  int32\n\t\t\tval []string\n\t\t)\n\t\terr := scanner.Scan(&id, &val)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tsort.Strings(val)\n\t\tfmt.Printf(\"Row %d is %v\\n\", id, val)\n\t}\n\terr = scanner.Err()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// expected error: can not marshal string into set(varchar)\n\t// Row 1 is [beta delta]\n}\n"
  },
  {
    "path": "example_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\nfunc Example() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate table example.tweet(timeline text, id UUID, text text, PRIMARY KEY(id));\n\tcreate index on example.tweet(timeline);\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.Consistency = gocql.Quorum\n\t// connect to the cluster\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tctx := context.Background()\n\n\t// insert a tweet\n\tif err := session.Query(`INSERT INTO tweet (timeline, id, text) VALUES (?, ?, ?)`,\n\t\t\"me\", gocql.TimeUUID(), \"hello world\").WithContext(ctx).Exec(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar id gocql.UUID\n\tvar text string\n\n\t/* Search for a specific set of records whose 'timeline' column matches\n\t * the value 'me'. The secondary index that we created earlier will be\n\t * used for optimizing the search */\n\tif err := session.Query(`SELECT id, text FROM tweet WHERE timeline = ? LIMIT 1`,\n\t\t\"me\").WithContext(ctx).Consistency(gocql.One).Scan(&id, &text); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(\"Tweet:\", id, text)\n\tfmt.Println()\n\n\t// list all tweets\n\tscanner := session.Query(`SELECT id, text FROM tweet WHERE timeline = ?`,\n\t\t\"me\").WithContext(ctx).Iter().Scanner()\n\tfor scanner.Next() {\n\t\terr = scanner.Scan(&id, &text)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfmt.Println(\"Tweet:\", id, text)\n\t}\n\t// scanner.Err() closes the iterator, so scanner nor iter should be used afterwards.\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// Tweet: cad53821-3731-11eb-971c-708bcdaada84 hello world\n\t//\n\t// Tweet: cad53821-3731-11eb-971c-708bcdaada84 hello world\n\t// Tweet: d577ab85-3731-11eb-81eb-708bcdaada84 hello world\n}\n"
  },
  {
    "path": "example_udt_map_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\n// Example_userDefinedTypesMap demonstrates how to work with user-defined types as maps.\n// See also Example_userDefinedTypesStruct and examples for UDTMarshaler and UDTUnmarshaler if you want to map to structs.\nfunc Example_userDefinedTypesMap() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate type example.my_udt (field_a text, field_b int);\n\tcreate table example.my_udt_table(pk int, value frozen<my_udt>, PRIMARY KEY(pk));\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.ProtoVersion = 4\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tctx := context.Background()\n\n\tvalue := map[string]any{\n\t\t\"field_a\": \"a value\",\n\t\t\"field_b\": 42,\n\t}\n\terr = session.Query(\"INSERT INTO example.my_udt_table (pk, value) VALUES (?, ?)\",\n\t\t1, value).WithContext(ctx).Exec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar readValue map[string]any\n\n\terr = session.Query(\"SELECT value FROM example.my_udt_table WHERE pk = 1\").WithContext(ctx).Scan(&readValue)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(readValue[\"field_a\"])\n\tfmt.Println(readValue[\"field_b\"])\n\t// a value\n\t// 42\n}\n"
  },
  {
    "path": "example_udt_marshaler_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"context\"\n\t\"log\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\n// MyUDTMarshaler implements UDTMarshaler.\ntype MyUDTMarshaler struct {\n\tfieldA string\n\tfieldB int32\n}\n\n// MarshalUDT marshals the selected field to bytes.\nfunc (m MyUDTMarshaler) MarshalUDT(name string, info gocql.TypeInfo) ([]byte, error) {\n\tswitch name {\n\tcase \"field_a\":\n\t\treturn gocql.Marshal(info, m.fieldA)\n\tcase \"field_b\":\n\t\treturn gocql.Marshal(info, m.fieldB)\n\tdefault:\n\t\t// If you want to be strict and return error un unknown field, you can do so here instead.\n\t\t// Returning nil, nil will set the value of unknown fields to null, which might be handy if you want\n\t\t// to be forward-compatible when a new field is added to the UDT.\n\t\treturn nil, nil\n\t}\n}\n\n// ExampleUDTMarshaler demonstrates how to implement a UDTMarshaler.\nfunc ExampleUDTMarshaler() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate type example.my_udt (field_a text, field_b int);\n\tcreate table example.my_udt_table(pk int, value frozen<my_udt>, PRIMARY KEY(pk));\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.ProtoVersion = 4\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tctx := context.Background()\n\n\tvalue := MyUDTMarshaler{\n\t\tfieldA: \"a value\",\n\t\tfieldB: 42,\n\t}\n\terr = session.Query(\"INSERT INTO example.my_udt_table (pk, value) VALUES (?, ?)\",\n\t\t1, value).WithContext(ctx).Exec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "example_udt_struct_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\ntype MyUDT struct {\n\tFieldA string `cql:\"field_a\"`\n\tFieldB int32  `cql:\"field_b\"`\n}\n\n// Example_userDefinedTypesStruct demonstrates how to work with user-defined types as structs.\n// See also examples for UDTMarshaler and UDTUnmarshaler if you need more control/better performance.\nfunc Example_userDefinedTypesStruct() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate type example.my_udt (field_a text, field_b int);\n\tcreate table example.my_udt_table(pk int, value frozen<my_udt>, PRIMARY KEY(pk));\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.ProtoVersion = 4\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tctx := context.Background()\n\n\tvalue := MyUDT{\n\t\tFieldA: \"a value\",\n\t\tFieldB: 42,\n\t}\n\terr = session.Query(\"INSERT INTO example.my_udt_table (pk, value) VALUES (?, ?)\",\n\t\t1, value).WithContext(ctx).Exec()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar readValue MyUDT\n\n\terr = session.Query(\"SELECT value FROM example.my_udt_table WHERE pk = 1\").WithContext(ctx).Scan(&readValue)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(readValue.FieldA)\n\tfmt.Println(readValue.FieldB)\n\t// a value\n\t// 42\n}\n"
  },
  {
    "path": "example_udt_unmarshaler_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\tgocql \"github.com/gocql/gocql\"\n)\n\n// MyUDTUnmarshaler implements UDTUnmarshaler.\ntype MyUDTUnmarshaler struct {\n\tfieldA string\n\tfieldB int32\n}\n\n// UnmarshalUDT unmarshals the field identified by name into MyUDTUnmarshaler.\nfunc (m *MyUDTUnmarshaler) UnmarshalUDT(name string, info gocql.TypeInfo, data []byte) error {\n\tswitch name {\n\tcase \"field_a\":\n\t\treturn gocql.Unmarshal(info, data, &m.fieldA)\n\tcase \"field_b\":\n\t\treturn gocql.Unmarshal(info, data, &m.fieldB)\n\tdefault:\n\t\t// If you want to be strict and return error un unknown field, you can do so here instead.\n\t\t// Returning nil will ignore unknown fields, which might be handy if you want\n\t\t// to be forward-compatible when a new field is added to the UDT.\n\t\treturn nil\n\t}\n}\n\n// ExampleUDTUnmarshaler demonstrates how to implement a UDTUnmarshaler.\nfunc ExampleUDTUnmarshaler() {\n\t/* The example assumes the following CQL was used to setup the keyspace:\n\tcreate keyspace example with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\tcreate type example.my_udt (field_a text, field_b int);\n\tcreate table example.my_udt_table(pk int, value frozen<my_udt>, PRIMARY KEY(pk));\n\tinsert into example.my_udt_table (pk, value) values (1, {field_a: 'a value', field_b: 42});\n\t*/\n\tcluster := gocql.NewCluster(\"localhost:9042\")\n\tcluster.Keyspace = \"example\"\n\tcluster.ProtoVersion = 4\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\tctx := context.Background()\n\n\tvar value MyUDTUnmarshaler\n\terr = session.Query(\"SELECT value FROM example.my_udt_table WHERE pk = 1\").WithContext(ctx).Scan(&value)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(value.fieldA)\n\tfmt.Println(value.fieldB)\n\t// a value\n\t// 42\n}\n"
  },
  {
    "path": "exec.go",
    "content": "package gocql\n\nimport (\n\t\"fmt\"\n)\n\n// SingleHostQueryExecutor allows to quickly execute diagnostic queries while\n// connected to only a single node.\n// The executor opens only a single connection to a node and does not use\n// connection pools.\n// Consistency level used is ONE.\n// Retry policy is applied, attempts are visible in query metrics but query\n// observer is not notified.\ntype SingleHostQueryExecutor struct {\n\tsession *Session\n\tcontrol *controlConn\n}\n\n// Exec executes the query without returning any rows.\nfunc (e SingleHostQueryExecutor) Exec(stmt string, values ...any) error {\n\treturn e.control.query(stmt, values...).Close()\n}\n\n// Iter executes the query and returns an iterator capable of iterating\n// over all results.\nfunc (e SingleHostQueryExecutor) Iter(stmt string, values ...any) *Iter {\n\treturn e.control.query(stmt, values...)\n}\n\nfunc (e SingleHostQueryExecutor) Close() {\n\tif e.control != nil {\n\t\te.control.close()\n\t}\n\tif e.session != nil {\n\t\te.session.Close()\n\t}\n}\n\n// NewSingleHostQueryExecutor creates a SingleHostQueryExecutor by connecting\n// to one of the hosts specified in the ClusterConfig.\n// If ProtoVersion is not specified version 4 is used.\n// Caller is responsible for closing the executor after use.\nfunc NewSingleHostQueryExecutor(cfg *ClusterConfig) (e SingleHostQueryExecutor, err error) {\n\t// Check that hosts in the ClusterConfig is not empty\n\tif len(cfg.Hosts) < 1 {\n\t\terr = ErrNoHosts\n\t\treturn\n\t}\n\n\tc := *cfg\n\n\t// If protocol version not set assume 4 and skip discovery\n\tif c.ProtoVersion == 0 {\n\t\tc.ProtoVersion = protoVersion4\n\t}\n\n\t// Close in case of error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\te.Close()\n\t\t}\n\t}()\n\n\t// Create uninitialised session\n\tc.disableInit = true\n\tif e.session, err = NewSession(c); err != nil {\n\t\terr = fmt.Errorf(\"new session: %w\", err)\n\t\treturn\n\t}\n\n\tvar hosts []*HostInfo\n\tif hosts, err = resolveInitialEndpoints(c.DNSResolver, c.Hosts, c.Port, c.Logger); err != nil {\n\t\terr = fmt.Errorf(\"addrs to hosts: %w\", err)\n\t\treturn\n\t}\n\n\t// Create control connection to one of the hosts\n\te.control = createControlConn(e.session)\n\n\t// shuffle endpoints so not all drivers will connect to the same initial\n\t// node.\n\thosts = shuffleHosts(hosts)\n\n\tconncfg := *e.control.session.connCfg\n\tconncfg.disableCoalesce = true\n\n\tvar conn *Conn\n\n\tfor _, host := range hosts {\n\t\tconn, err = e.control.session.dial(e.control.session.ctx, host, &conncfg, e.control)\n\t\tif err != nil {\n\t\t\te.control.session.logger.Printf(\"gocql: unable to dial control conn %v:%v: %v\\n\", host.ConnectAddress(), host.Port(), err)\n\t\t\tcontinue\n\t\t}\n\t\terr = e.control.setupConn(conn)\n\t\tif err == nil {\n\t\t\tconn.finalizeConnection()\n\t\t\tbreak\n\t\t}\n\t\te.control.session.logger.Printf(\"gocql: unable setup control conn %v:%v: %v\\n\", host.ConnectAddress(), host.Port(), err)\n\t\tconn.Close()\n\t\tconn = nil\n\t}\n\n\tif conn == nil {\n\t\terr = fmt.Errorf(\"setup: %w\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n"
  },
  {
    "path": "exec_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport (\n\t\"testing\"\n)\n\nfunc TestSingleHostQueryExecutor(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\n\te, err := NewSingleHostQueryExecutor(cluster)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer e.Close()\n\n\titer := e.Iter(\"SELECT now() FROM system.local\")\n\n\tvar date []byte\n\titer.Scan(&date)\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(date) == 0 {\n\t\tt.Fatal(\"expected date\")\n\t}\n}\n"
  },
  {
    "path": "export_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nvar FlagRunSslTest = flagRunSslTest\nvar FlagDistribution = flagDistribution\nvar CreateCluster = createCluster\nvar TestLogger = &testLogger{}\nvar WaitUntilPoolsStopFilling = waitUntilPoolsStopFilling\n\nfunc GetRingAllHosts(sess *Session) []*HostInfo {\n\treturn sess.hostSource.getHostsList()\n}\n"
  },
  {
    "path": "filters.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport \"fmt\"\n\n// HostFilter interface is used when a host is discovered via server sent events.\ntype HostFilter interface {\n\t// Called when a new host is discovered, returning true will cause the host\n\t// to be added to the pools.\n\tAccept(host *HostInfo) bool\n}\n\n// HostFilterFunc converts a func(host HostInfo) bool into a HostFilter\ntype HostFilterFunc func(host *HostInfo) bool\n\nfunc (fn HostFilterFunc) Accept(host *HostInfo) bool {\n\treturn fn(host)\n}\n\n// AcceptAllFilter will accept all hosts\nfunc AcceptAllFilter() HostFilter {\n\treturn HostFilterFunc(func(host *HostInfo) bool {\n\t\treturn true\n\t})\n}\n\nfunc DenyAllFilter() HostFilter {\n\treturn HostFilterFunc(func(host *HostInfo) bool {\n\t\treturn false\n\t})\n}\n\n// DataCenterHostFilter filters all hosts such that they are in the same data center\n// as the supplied data center.\nfunc DataCenterHostFilter(dataCenter string) HostFilter {\n\treturn HostFilterFunc(func(host *HostInfo) bool {\n\t\treturn host.DataCenter() == dataCenter\n\t})\n}\n\n// Deprecated: Use DataCenterHostFilter instead.\n// DataCentreHostFilter is an alias that doesn't use the preferred spelling.\nfunc DataCentreHostFilter(dataCenter string) HostFilter {\n\treturn DataCenterHostFilter(dataCenter)\n}\n\n// WhiteListHostFilter filters incoming hosts by checking that their address is\n// in the initial hosts whitelist. It probes all known addresses of a host\n// (connect, rpc, broadcast, listen, peer, preferred, translated CQL) for a match.\nfunc WhiteListHostFilter(hosts ...string) HostFilter {\n\thostInfos, err := resolveInitialEndpoints(defaultDnsResolver, hosts, 9042, nopLogger{})\n\tif err != nil {\n\t\t// dont want to panic here, but rather not break the API\n\t\tpanic(fmt.Errorf(\"unable to lookup host info from address: %v\", err))\n\t}\n\n\tm := make(map[string]bool, len(hostInfos))\n\tfor _, host := range hostInfos {\n\t\tm[host.ConnectAddress().String()] = true\n\t}\n\n\treturn HostFilterFunc(func(host *HostInfo) bool {\n\t\thost.mu.RLock()\n\t\tdefer host.mu.RUnlock()\n\n\t\tif validIpAddr(host.rpcAddress) && m[host.rpcAddress.String()] {\n\t\t\treturn true\n\t\t}\n\t\tif validIpAddr(host.broadcastAddress) && m[host.broadcastAddress.String()] {\n\t\t\treturn true\n\t\t}\n\t\tif validIpAddr(host.listenAddress) && m[host.listenAddress.String()] {\n\t\t\treturn true\n\t\t}\n\t\tif validIpAddr(host.peer) && m[host.peer.String()] {\n\t\t\treturn true\n\t\t}\n\t\tif validIpAddr(host.preferredIP) && m[host.preferredIP.String()] {\n\t\t\treturn true\n\t\t}\n\t\tif host.translatedAddresses != nil && host.translatedAddresses.CQL.IsValid() && m[host.translatedAddresses.CQL.Address.String()] {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n}\n"
  },
  {
    "path": "filters_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"net\"\n\t\"testing\"\n)\n\nfunc TestFilter_WhiteList(t *testing.T) {\n\tt.Parallel()\n\n\tf := WhiteListHostFilter(\"127.0.0.1\", \"127.0.0.2\")\n\ttests := [...]struct {\n\t\taddr   net.IP\n\t\taccept bool\n\t}{\n\t\t{net.ParseIP(\"127.0.0.1\"), true},\n\t\t{net.ParseIP(\"127.0.0.2\"), true},\n\t\t{net.ParseIP(\"127.0.0.3\"), false},\n\t}\n\n\tfor i, test := range tests {\n\t\tif f.Accept(&HostInfo{rpcAddress: test.addr}) {\n\t\t\tif !test.accept {\n\t\t\t\tt.Errorf(\"%d: should not have been accepted but was\", i)\n\t\t\t}\n\t\t} else if test.accept {\n\t\t\tt.Errorf(\"%d: should have been accepted but wasn't\", i)\n\t\t}\n\t}\n}\n\nfunc TestFilter_AllowAll(t *testing.T) {\n\tt.Parallel()\n\n\tf := AcceptAllFilter()\n\ttests := [...]struct {\n\t\taddr   net.IP\n\t\taccept bool\n\t}{\n\t\t{net.ParseIP(\"127.0.0.1\"), true},\n\t\t{net.ParseIP(\"127.0.0.2\"), true},\n\t\t{net.ParseIP(\"127.0.0.3\"), true},\n\t}\n\n\tfor i, test := range tests {\n\t\tif f.Accept(&HostInfo{connectAddress: test.addr}) {\n\t\t\tif !test.accept {\n\t\t\t\tt.Errorf(\"%d: should not have been accepted but was\", i)\n\t\t\t}\n\t\t} else if test.accept {\n\t\t\tt.Errorf(\"%d: should have been accepted but wasn't\", i)\n\t\t}\n\t}\n}\n\nfunc TestFilter_DenyAll(t *testing.T) {\n\tt.Parallel()\n\n\tf := DenyAllFilter()\n\ttests := [...]struct {\n\t\taddr   net.IP\n\t\taccept bool\n\t}{\n\t\t{net.ParseIP(\"127.0.0.1\"), false},\n\t\t{net.ParseIP(\"127.0.0.2\"), false},\n\t\t{net.ParseIP(\"127.0.0.3\"), false},\n\t}\n\n\tfor i, test := range tests {\n\t\tif f.Accept(&HostInfo{connectAddress: test.addr}) {\n\t\t\tif !test.accept {\n\t\t\t\tt.Errorf(\"%d: should not have been accepted but was\", i)\n\t\t\t}\n\t\t} else if test.accept {\n\t\t\tt.Errorf(\"%d: should have been accepted but wasn't\", i)\n\t\t}\n\t}\n}\n\nfunc TestFilter_WhiteList_MatchesRPCAddress(t *testing.T) {\n\tt.Parallel()\n\n\tf := WhiteListHostFilter(\"127.0.0.1\")\n\n\thost := &HostInfo{\n\t\tconnectAddress: net.ParseIP(\"10.0.0.1\"),\n\t\trpcAddress:     net.ParseIP(\"127.0.0.1\"),\n\t}\n\tif !f.Accept(host) {\n\t\tt.Error(\"should have been accepted via rpcAddress but wasn't\")\n\t}\n}\n\nfunc TestFilter_WhiteList_MatchesBroadcastAddress(t *testing.T) {\n\tt.Parallel()\n\n\tf := WhiteListHostFilter(\"127.0.0.1\")\n\n\thost := &HostInfo{\n\t\tconnectAddress:   net.ParseIP(\"10.0.0.1\"),\n\t\tbroadcastAddress: net.ParseIP(\"127.0.0.1\"),\n\t}\n\tif !f.Accept(host) {\n\t\tt.Error(\"should have been accepted via broadcastAddress but wasn't\")\n\t}\n}\n\nfunc TestFilter_WhiteList_MatchesListenAddress(t *testing.T) {\n\tt.Parallel()\n\n\tf := WhiteListHostFilter(\"127.0.0.1\")\n\n\thost := &HostInfo{\n\t\tconnectAddress: net.ParseIP(\"10.0.0.1\"),\n\t\tlistenAddress:  net.ParseIP(\"127.0.0.1\"),\n\t}\n\tif !f.Accept(host) {\n\t\tt.Error(\"should have been accepted via listenAddress but wasn't\")\n\t}\n}\n\nfunc TestFilter_WhiteList_MatchesPeer(t *testing.T) {\n\tt.Parallel()\n\n\tf := WhiteListHostFilter(\"127.0.0.1\")\n\n\thost := &HostInfo{\n\t\tconnectAddress: net.ParseIP(\"10.0.0.1\"),\n\t\tpeer:           net.ParseIP(\"127.0.0.1\"),\n\t}\n\tif !f.Accept(host) {\n\t\tt.Error(\"should have been accepted via peer but wasn't\")\n\t}\n}\n\nfunc TestFilter_WhiteList_MatchesPreferredIP(t *testing.T) {\n\tt.Parallel()\n\n\tf := WhiteListHostFilter(\"127.0.0.1\")\n\n\thost := &HostInfo{\n\t\tconnectAddress: net.ParseIP(\"10.0.0.1\"),\n\t\tpreferredIP:    net.ParseIP(\"127.0.0.1\"),\n\t}\n\tif !f.Accept(host) {\n\t\tt.Error(\"should have been accepted via preferredIP but wasn't\")\n\t}\n}\n\nfunc TestFilter_WhiteList_MatchesTranslatedAddress(t *testing.T) {\n\tt.Parallel()\n\n\tf := WhiteListHostFilter(\"127.0.0.1\")\n\n\thost := &HostInfo{\n\t\tconnectAddress: net.ParseIP(\"10.0.0.1\"),\n\t\ttranslatedAddresses: &translatedAddresses{\n\t\t\tCQL: AddressPort{Address: net.ParseIP(\"127.0.0.1\"), Port: 9042},\n\t\t},\n\t}\n\tif !f.Accept(host) {\n\t\tt.Error(\"should have been accepted via translatedAddresses but wasn't\")\n\t}\n}\n\nfunc TestFilter_WhiteList_NoMatchWhenNoAddressMatches(t *testing.T) {\n\tt.Parallel()\n\n\tf := WhiteListHostFilter(\"127.0.0.1\")\n\n\thost := &HostInfo{\n\t\tconnectAddress:   net.ParseIP(\"10.0.0.1\"),\n\t\trpcAddress:       net.ParseIP(\"10.0.0.2\"),\n\t\tbroadcastAddress: net.ParseIP(\"10.0.0.3\"),\n\t\tlistenAddress:    net.ParseIP(\"10.0.0.4\"),\n\t\tpeer:             net.ParseIP(\"10.0.0.5\"),\n\t\tpreferredIP:      net.ParseIP(\"10.0.0.6\"),\n\t}\n\tif f.Accept(host) {\n\t\tt.Error(\"should not have been accepted but was\")\n\t}\n}\n\nfunc TestFilter_WhiteList_EmptyHost(t *testing.T) {\n\tt.Parallel()\n\n\tf := WhiteListHostFilter(\"127.0.0.1\")\n\n\thost := &HostInfo{}\n\tif f.Accept(host) {\n\t\tt.Error(\"empty host should not have been accepted\")\n\t}\n}\n\nfunc TestFilter_DataCenter(t *testing.T) {\n\tt.Parallel()\n\n\tf := DataCenterHostFilter(\"dc1\")\n\tfDeprecated := DataCentreHostFilter(\"dc1\")\n\n\ttests := [...]struct {\n\t\tdc     string\n\t\taccept bool\n\t}{\n\t\t{\"dc1\", true},\n\t\t{\"dc2\", false},\n\t}\n\n\tfor i, test := range tests {\n\t\tif f.Accept(&HostInfo{dataCenter: test.dc}) {\n\t\t\tif !test.accept {\n\t\t\t\tt.Errorf(\"%d: should not have been accepted but was\", i)\n\t\t\t}\n\t\t} else if test.accept {\n\t\t\tt.Errorf(\"%d: should have been accepted but wasn't\", i)\n\t\t}\n\n\t\tif f.Accept(&HostInfo{dataCenter: test.dc}) != fDeprecated.Accept(&HostInfo{dataCenter: test.dc}) {\n\t\t\tt.Errorf(\"%d: DataCenterHostFilter and DataCentreHostFilter should be the same\", i)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "frame.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\ntype unsetColumn struct{}\n\n// UnsetValue represents a value used in a query binding that will be ignored by Cassandra.\n//\n// By setting a field to the unset value Cassandra will ignore the write completely.\n// The main advantage is the ability to keep the same prepared statement even when you don't\n// want to update some fields, where before you needed to make another prepared statement.\n//\n// UnsetValue is only available when using the version 4 of the protocol.\nvar UnsetValue = unsetColumn{}\n\ntype namedValue struct {\n\tvalue any\n\tname  string\n}\n\n// NamedValue produce a value which will bind to the named parameter in a query\nfunc NamedValue(name string, value any) any {\n\treturn &namedValue{\n\t\tname:  name,\n\t\tvalue: value,\n\t}\n}\n\nconst (\n\tprotoDirectionMask = 0x80\n\tprotoVersionMask   = 0x7F\n\tprotoVersion1      = 0x01\n\tprotoVersion2      = 0x02\n\tprotoVersion3      = 0x03\n\tprotoVersion4      = 0x04\n\tprotoVersion5      = 0x05\n\n\tmaxFrameSize = 256 * 1024 * 1024\n)\n\n// DEPRECATED use Consistency type, SerialConsistency is now an alias for backwards compatibility.\ntype SerialConsistency = Consistency\n\ntype Consistency uint16\n\nconst (\n\tAny         Consistency = 0x00\n\tOne         Consistency = 0x01\n\tTwo         Consistency = 0x02\n\tThree       Consistency = 0x03\n\tQuorum      Consistency = 0x04\n\tAll         Consistency = 0x05\n\tLocalQuorum Consistency = 0x06\n\tEachQuorum  Consistency = 0x07\n\tSerial      Consistency = 0x08\n\tLocalSerial Consistency = 0x09\n\tLocalOne    Consistency = 0x0A\n)\n\nfunc (c Consistency) String() string {\n\tswitch c {\n\tcase Any:\n\t\treturn \"ANY\"\n\tcase One:\n\t\treturn \"ONE\"\n\tcase Two:\n\t\treturn \"TWO\"\n\tcase Three:\n\t\treturn \"THREE\"\n\tcase Quorum:\n\t\treturn \"QUORUM\"\n\tcase All:\n\t\treturn \"ALL\"\n\tcase LocalQuorum:\n\t\treturn \"LOCAL_QUORUM\"\n\tcase EachQuorum:\n\t\treturn \"EACH_QUORUM\"\n\tcase Serial:\n\t\treturn \"SERIAL\"\n\tcase LocalSerial:\n\t\treturn \"LOCAL_SERIAL\"\n\tcase LocalOne:\n\t\treturn \"LOCAL_ONE\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"UNKNOWN_CONS_0x%x\", uint16(c))\n\t}\n}\n\nfunc (c Consistency) IsSerial() bool {\n\treturn c == Serial || c == LocalSerial\n}\n\nfunc (c Consistency) MarshalText() (text []byte, err error) {\n\treturn []byte(c.String()), nil\n}\n\nfunc (c *Consistency) UnmarshalText(text []byte) error {\n\tswitch string(text) {\n\tcase \"ANY\":\n\t\t*c = Any\n\tcase \"ONE\":\n\t\t*c = One\n\tcase \"TWO\":\n\t\t*c = Two\n\tcase \"THREE\":\n\t\t*c = Three\n\tcase \"QUORUM\":\n\t\t*c = Quorum\n\tcase \"ALL\":\n\t\t*c = All\n\tcase \"LOCAL_QUORUM\":\n\t\t*c = LocalQuorum\n\tcase \"EACH_QUORUM\":\n\t\t*c = EachQuorum\n\tcase \"SERIAL\":\n\t\t*c = Serial\n\tcase \"LOCAL_SERIAL\":\n\t\t*c = LocalSerial\n\tcase \"LOCAL_ONE\":\n\t\t*c = LocalOne\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid consistency %q\", string(text))\n\t}\n\treturn nil\n}\n\nfunc ParseConsistency(s string) Consistency {\n\tvar c Consistency\n\tif err := c.UnmarshalText([]byte(strings.ToUpper(s))); err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\n// ParseConsistencyWrapper wraps gocql.ParseConsistency to provide an err\n// return instead of a panic\nfunc ParseConsistencyWrapper(s string) (consistency Consistency, err error) {\n\terr = consistency.UnmarshalText([]byte(strings.ToUpper(s)))\n\treturn\n}\n\nconst (\n\tapacheCassandraTypePrefix = \"org.apache.cassandra.db.marshal.\"\n)\n\nvar (\n\tErrFrameTooBig = errors.New(\"frame length is bigger than the maximum allowed\")\n)\n\nfunc readInt(p []byte) int32 {\n\treturn int32(binary.BigEndian.Uint32(p[:4]))\n}\n\nconst defaultBufSize = 128\n\ntype ObservedFrameHeader struct {\n\t// StartHeader is the time we started reading the frame header off the network connection.\n\tStart time.Time\n\t// EndHeader is the time we finished reading the frame header off the network connection.\n\tEnd time.Time\n\t// Host is Host of the connection the frame header was read from.\n\tHost    *HostInfo\n\tLength  int32\n\tStream  int16\n\tVersion frm.ProtoVersion\n\tFlags   byte\n\tOpcode  frm.Op\n}\n\nfunc (f ObservedFrameHeader) String() string {\n\treturn fmt.Sprintf(\"[observed header version=%s flags=0x%x stream=%d op=%s length=%d]\", f.Version, f.Flags, f.Stream, f.Opcode, f.Length)\n}\n\n// FrameHeaderObserver is the interface implemented by frame observers / stat collectors.\n//\n// Experimental, this interface and use may change\ntype FrameHeaderObserver interface {\n\t// ObserveFrameHeader gets called on every received frame header.\n\tObserveFrameHeader(context.Context, ObservedFrameHeader)\n}\n\n// framerInterface represents a frame reader/writer for the CQL protocol.\n//\n// Framers are pooled and reused. Any byte slices returned from frame parsing\n// methods may be backed by pooled buffers that are reused after Release() is\n// called. If data must outlive the framer, use readBytesCopy() instead of\n// readBytes() when implementing parseFrame(), or copy returned byte slices\n// before calling Release().\n//\n// After Release() is called, the framer and any slices derived from its\n// buffers must not be accessed.\ntype framerInterface interface {\n\tReadBytesInternal() ([]byte, error)\n\tGetCustomPayload() map[string][]byte\n\tGetHeaderWarnings() []string\n\t// Release returns the framer to its pool (if pooled).\n\t// Must be called when the framer is no longer needed.\n\t// Safe to call multiple times; subsequent calls are no-ops.\n\tRelease()\n}\n\nconst headSize = 9\n\n// a framer is responsible for reading, writing and parsing frames on a single stream\ntype framer struct {\n\tcompressor            Compressor\n\theader                *frm.FrameHeader\n\tcustomPayload         map[string][]byte\n\trelease               func()\n\ttraceID               []byte\n\treadBuffer            []byte\n\tbuf                   []byte\n\tflagLWT               int\n\trateLimitingErrorCode int\n\tflags                 byte\n\tproto                 byte\n\ttabletsRoutingV1      bool\n\treleased              atomic.Bool\n}\n\nfunc newFramer(compressor Compressor, version byte) *framer {\n\tbuf := make([]byte, defaultBufSize)\n\tf := &framer{\n\t\tbuf:        buf[:0],\n\t\treadBuffer: buf,\n\t}\n\tvar flags byte\n\tif compressor != nil {\n\t\tflags |= frm.FlagCompress\n\t}\n\tif version == protoVersion5 {\n\t\tflags |= frm.FlagBetaProtocol\n\t}\n\n\tversion &= protoVersionMask\n\tf.compressor = compressor\n\tf.proto = version\n\tf.flags = flags\n\tf.header = nil\n\tf.traceID = nil\n\n\tf.tabletsRoutingV1 = false\n\n\treturn f\n}\n\n// Release returns the framer to its pool. If the framer was not obtained\n// from a pool (release is nil), this is a no-op.\n//\n// Conn.releaseFramer owns the released-state guard, so this method delegates\n// directly to the release closure.\nfunc (f *framer) Release() {\n\tif f.release != nil {\n\t\tf.release()\n\t}\n}\n\nfunc newFramerWithExts(compressor Compressor, version byte, cqlProtoExts []cqlProtocolExtension, logger StdLogger) *framer {\n\n\tf := newFramer(compressor, version)\n\n\tif lwtExt := findCQLProtoExtByName(cqlProtoExts, lwtAddMetadataMarkKey); lwtExt != nil {\n\t\tcastedExt, ok := lwtExt.(*lwtAddMetadataMarkExt)\n\t\tif !ok {\n\t\t\tlogger.Println(\n\t\t\t\tfmt.Errorf(\"failed to cast CQL protocol extension identified by name %s to type %T\",\n\t\t\t\t\tlwtAddMetadataMarkKey, lwtAddMetadataMarkExt{}))\n\t\t\treturn f\n\t\t}\n\t\tf.flagLWT = castedExt.lwtOptMetaBitMask\n\t}\n\n\tif rateLimitErrorExt := findCQLProtoExtByName(cqlProtoExts, rateLimitError); rateLimitErrorExt != nil {\n\t\tcastedExt, ok := rateLimitErrorExt.(*rateLimitExt)\n\t\tif !ok {\n\t\t\tlogger.Println(\n\t\t\t\tfmt.Errorf(\"failed to cast CQL protocol extension identified by name %s to type %T\",\n\t\t\t\t\trateLimitError, rateLimitExt{}))\n\t\t\treturn f\n\t\t}\n\t\tf.rateLimitingErrorCode = castedExt.rateLimitErrorCode\n\t}\n\n\tif tabletsExt := findCQLProtoExtByName(cqlProtoExts, tabletsRoutingV1); tabletsExt != nil {\n\t\t_, ok := tabletsExt.(*tabletsRoutingV1Ext)\n\t\tif !ok {\n\t\t\tlogger.Println(\n\t\t\t\tfmt.Errorf(\"failed to cast CQL protocol extension identified by name %s to type %T\",\n\t\t\t\t\ttabletsRoutingV1, tabletsRoutingV1Ext{}))\n\t\t\treturn f\n\t\t}\n\t\tf.tabletsRoutingV1 = true\n\t}\n\n\treturn f\n}\n\ntype frame interface {\n\tHeader() frm.FrameHeader\n}\n\nfunc readHeader(r io.Reader, p []byte) (head frm.FrameHeader, err error) {\n\t_, err = io.ReadFull(r, p[:headSize])\n\tif err != nil {\n\t\treturn frm.FrameHeader{}, err\n\t}\n\n\thead.Version = frm.ProtoVersion(p[0])\n\tversion := head.Version.Version()\n\n\tif version < protoVersion3 || version > protoVersion5 {\n\t\treturn frm.FrameHeader{}, fmt.Errorf(\"gocql: unsupported protocol response version: %d\", version)\n\t}\n\n\thead.Flags = p[1]\n\n\thead.Stream = int(int16(binary.BigEndian.Uint16(p[2:4])))\n\thead.Op = frm.Op(p[4])\n\thead.Length = int(readInt(p[5:]))\n\n\treturn head, nil\n}\n\n// explicitly enables tracing for the framers outgoing requests\nfunc (f *framer) trace() {\n\tf.flags |= frm.FlagTracing\n}\n\n// explicitly enables the custom payload flag\nfunc (f *framer) payload() {\n\tf.flags |= frm.FlagCustomPayload\n}\n\n// reads a frame form the wire into the framers buffer\nfunc (f *framer) readFrame(r io.Reader, head *frm.FrameHeader) error {\n\tif head.Length < 0 {\n\t\treturn fmt.Errorf(\"frame body length can not be less than 0: %d\", head.Length)\n\t} else if head.Length > maxFrameSize {\n\t\t// need to free up the connection to be used again\n\t\t_, err := io.CopyN(io.Discard, r, int64(head.Length))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error whilst trying to discard frame with invalid length: %v\", err)\n\t\t}\n\t\treturn ErrFrameTooBig\n\t}\n\n\tif cap(f.readBuffer) >= head.Length {\n\t\tf.buf = f.readBuffer[:head.Length]\n\t} else {\n\t\tf.readBuffer = make([]byte, head.Length)\n\t\tf.buf = f.readBuffer\n\t}\n\n\t// assume the underlying reader takes care of timeouts and retries\n\tn, err := io.ReadFull(r, f.buf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to read frame body: read %d/%d bytes: %v\", n, head.Length, err)\n\t}\n\n\tif head.Flags&frm.FlagCompress == frm.FlagCompress {\n\t\tif f.compressor == nil {\n\t\t\treturn NewErrProtocol(\"no compressor available with compressed frame body\")\n\t\t}\n\n\t\tf.buf, err = f.compressor.Decode(f.buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tf.header = head\n\treturn nil\n}\n\nfunc (f *framer) parseFrame() (frame frame, err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tif _, ok := r.(runtime.Error); ok {\n\t\t\t\tpanic(r)\n\t\t\t}\n\t\t\terr = r.(error)\n\t\t}\n\t}()\n\n\tif f.header.Version.Request() {\n\t\treturn nil, NewErrProtocol(\"got a request frame from server: %v\", f.header.Version)\n\t}\n\n\tif f.header.Flags&frm.FlagTracing == frm.FlagTracing {\n\t\tf.readTrace()\n\t}\n\n\tif f.header.Flags&frm.FlagWarning == frm.FlagWarning {\n\t\tf.header.Warnings = f.readStringList()\n\t}\n\n\tif f.header.Flags&frm.FlagCustomPayload == frm.FlagCustomPayload {\n\t\tf.customPayload = f.readBytesMap()\n\t}\n\n\t// assumes that the frame body has been read into rbuf\n\tswitch f.header.Op {\n\tcase frm.OpError:\n\t\tframe = f.parseErrorFrame()\n\tcase frm.OpReady:\n\t\tframe = f.parseReadyFrame()\n\tcase frm.OpResult:\n\t\tframe, err = f.parseResultFrame()\n\tcase frm.OpSupported:\n\t\tframe = f.parseSupportedFrame()\n\tcase frm.OpAuthenticate:\n\t\tframe = f.parseAuthenticateFrame()\n\tcase frm.OpAuthChallenge:\n\t\tframe = f.parseAuthChallengeFrame()\n\tcase frm.OpAuthSuccess:\n\t\tframe = f.parseAuthSuccessFrame()\n\tcase frm.OpEvent:\n\t\tframe = f.parseEventFrame()\n\tdefault:\n\t\treturn nil, NewErrProtocol(\"unknown op in frame header: %s\", f.header.Op)\n\t}\n\n\treturn\n}\n\nfunc (f *framer) parseErrorFrame() frame {\n\tcode := f.readInt()\n\tmsg := f.readString()\n\n\terrD := frm.ErrorFrame{\n\t\tFrameHeader: *f.header,\n\t\tCode:        code,\n\t\tMessage:     msg,\n\t}\n\n\tswitch code {\n\tcase ErrCodeUnavailable:\n\t\tcl := f.readConsistency()\n\t\trequired := f.readInt()\n\t\talive := f.readInt()\n\t\treturn &RequestErrUnavailable{\n\t\t\tErrorFrame:  errD,\n\t\t\tConsistency: cl,\n\t\t\tRequired:    required,\n\t\t\tAlive:       alive,\n\t\t}\n\tcase ErrCodeWriteTimeout:\n\t\tcl := f.readConsistency()\n\t\treceived := f.readInt()\n\t\tblockfor := f.readInt()\n\t\twriteType := f.readString()\n\t\treturn &RequestErrWriteTimeout{\n\t\t\tErrorFrame:  errD,\n\t\t\tConsistency: cl,\n\t\t\tReceived:    received,\n\t\t\tBlockFor:    blockfor,\n\t\t\tWriteType:   writeType,\n\t\t}\n\tcase ErrCodeReadTimeout:\n\t\tcl := f.readConsistency()\n\t\treceived := f.readInt()\n\t\tblockfor := f.readInt()\n\t\tdataPresent := f.readByte()\n\t\treturn &RequestErrReadTimeout{\n\t\t\tErrorFrame:  errD,\n\t\t\tConsistency: cl,\n\t\t\tReceived:    received,\n\t\t\tBlockFor:    blockfor,\n\t\t\tDataPresent: dataPresent,\n\t\t}\n\tcase ErrCodeAlreadyExists:\n\t\tks := f.readString()\n\t\ttable := f.readString()\n\t\treturn &RequestErrAlreadyExists{\n\t\t\tErrorFrame: errD,\n\t\t\tKeyspace:   ks,\n\t\t\tTable:      table,\n\t\t}\n\tcase ErrCodeUnprepared:\n\t\treturn &RequestErrUnprepared{\n\t\t\tErrorFrame:  errD,\n\t\t\tStatementId: f.readShortBytesCopy(),\n\t\t}\n\tcase ErrCodeReadFailure:\n\t\tres := &RequestErrReadFailure{\n\t\t\tErrorFrame: errD,\n\t\t}\n\t\tres.Consistency = f.readConsistency()\n\t\tres.Received = f.readInt()\n\t\tres.BlockFor = f.readInt()\n\t\tif f.proto > protoVersion4 {\n\t\t\tres.ErrorMap = f.readErrorMap()\n\t\t\tres.NumFailures = len(res.ErrorMap)\n\t\t} else {\n\t\t\tres.NumFailures = f.readInt()\n\t\t}\n\t\tres.DataPresent = f.readByte() != 0\n\n\t\treturn res\n\tcase ErrCodeWriteFailure:\n\t\tres := &RequestErrWriteFailure{\n\t\t\tErrorFrame: errD,\n\t\t}\n\t\tres.Consistency = f.readConsistency()\n\t\tres.Received = f.readInt()\n\t\tres.BlockFor = f.readInt()\n\t\tif f.proto > protoVersion4 {\n\t\t\tres.ErrorMap = f.readErrorMap()\n\t\t\tres.NumFailures = len(res.ErrorMap)\n\t\t} else {\n\t\t\tres.NumFailures = f.readInt()\n\t\t}\n\t\tres.WriteType = f.readString()\n\t\treturn res\n\tcase ErrCodeFunctionFailure:\n\t\tres := &RequestErrFunctionFailure{\n\t\t\tErrorFrame: errD,\n\t\t}\n\t\tres.Keyspace = f.readString()\n\t\tres.Function = f.readString()\n\t\tres.ArgTypes = f.readStringList()\n\t\treturn res\n\n\tcase ErrCodeCDCWriteFailure:\n\t\tres := &RequestErrCDCWriteFailure{\n\t\t\tErrorFrame: errD,\n\t\t}\n\t\treturn res\n\tcase ErrCodeCASWriteUnknown:\n\t\tres := &RequestErrCASWriteUnknown{\n\t\t\tErrorFrame: errD,\n\t\t}\n\t\tres.Consistency = f.readConsistency()\n\t\tres.Received = f.readInt()\n\t\tres.BlockFor = f.readInt()\n\t\treturn res\n\tcase ErrCodeInvalid, ErrCodeBootstrapping, ErrCodeConfig, ErrCodeCredentials, ErrCodeOverloaded,\n\t\tErrCodeProtocol, ErrCodeServer, ErrCodeSyntax, ErrCodeTruncate, ErrCodeUnauthorized:\n\t\t// TODO(zariel): we should have some distinct types for these errors\n\t\treturn errD\n\tdefault:\n\t\tif f.rateLimitingErrorCode != 0 && code == f.rateLimitingErrorCode {\n\t\t\tres := &RequestErrRateLimitReached{\n\t\t\t\tErrorFrame: errD,\n\t\t\t}\n\t\t\tres.OpType = OpType(f.readByte())\n\t\t\tres.RejectedByCoordinator = f.readByte() != 0\n\t\t\treturn res\n\t\t} else {\n\t\t\treturn &UnknownServerError{\n\t\t\t\tErrorFrame: errD,\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (f *framer) readErrorMap() (errMap ErrorMap) {\n\terrMap = make(ErrorMap)\n\tnumErrs := f.readInt()\n\tfor i := 0; i < numErrs; i++ {\n\t\tip := f.readInetAdressOnly().String()\n\t\terrMap[ip] = f.readShort()\n\t}\n\treturn\n}\n\nfunc (f *framer) writeHeader(flags byte, op frm.Op, stream int) {\n\tf.buf = append(f.buf[:0],\n\t\tf.proto, flags, byte(stream>>8), byte(stream),\n\t\t// pad out length\n\t\tbyte(op), 0, 0, 0, 0,\n\t)\n}\n\nfunc (f *framer) setLength(length int) {\n\tf.buf[5] = byte(length >> 24)\n\tf.buf[6] = byte(length >> 16)\n\tf.buf[7] = byte(length >> 8)\n\tf.buf[8] = byte(length)\n}\n\nfunc (f *framer) finish() error {\n\tbufLen := len(f.buf)\n\tif bufLen > maxFrameSize {\n\t\t// huge app frame, lets remove it so it doesn't bloat the heap\n\t\tf.buf = make([]byte, defaultBufSize)\n\t\treturn ErrFrameTooBig\n\t}\n\n\tif f.buf[1]&frm.FlagCompress == frm.FlagCompress {\n\t\tif f.compressor == nil {\n\t\t\tpanic(\"compress flag set with no compressor\")\n\t\t}\n\n\t\t// TODO: only compress frames which are big enough\n\t\tcompressed, err := f.compressor.Encode(f.buf[headSize:])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf.buf = append(f.buf[:headSize], compressed...)\n\t\tbufLen = len(f.buf)\n\t}\n\tlength := bufLen - headSize\n\tf.setLength(length)\n\n\treturn nil\n}\n\nfunc (f *framer) writeTo(w io.Writer) error {\n\t_, err := w.Write(f.buf)\n\treturn err\n}\n\nfunc (f *framer) readTrace() {\n\tif len(f.buf) < 16 {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to read trace uuid require 16 got: %d\", len(f.buf)))\n\t}\n\tif len(f.traceID) != 16 {\n\t\tf.traceID = make([]byte, 16)\n\t}\n\tcopy(f.traceID, f.buf[:16])\n\tf.buf = f.buf[16:]\n}\n\nfunc (f *framer) parseReadyFrame() frame {\n\treturn &frm.ReadyFrame{\n\t\tFrameHeader: *f.header,\n\t}\n}\n\n// TODO: if we move the body buffer onto the frameHeader then we only need a single\n// framer, and can move the methods onto the header.\nfunc (f *framer) parseSupportedFrame() frame {\n\treturn &frm.SupportedFrame{\n\t\tFrameHeader: *f.header,\n\n\t\tSupported: f.readStringMultiMap(),\n\t}\n}\n\ntype writeStartupFrame struct {\n\topts map[string]string\n}\n\nfunc (w writeStartupFrame) String() string {\n\treturn fmt.Sprintf(\"[startup opts=%+v]\", w.opts)\n}\n\nfunc (w *writeStartupFrame) buildFrame(f *framer, streamID int) error {\n\tf.writeHeader(f.flags&^frm.FlagCompress, frm.OpStartup, streamID)\n\tf.writeStringMap(w.opts)\n\n\treturn f.finish()\n}\n\ntype writePrepareFrame struct {\n\tcustomPayload map[string][]byte\n\tstatement     string\n\tkeyspace      string\n}\n\nfunc (w *writePrepareFrame) buildFrame(f *framer, streamID int) error {\n\tif len(w.customPayload) > 0 {\n\t\tf.payload()\n\t}\n\tf.writeHeader(f.flags, frm.OpPrepare, streamID)\n\tf.writeCustomPayload(&w.customPayload)\n\tf.writeLongString(w.statement)\n\n\tvar flags uint32 = 0\n\tif w.keyspace != \"\" {\n\t\tif f.proto > protoVersion4 {\n\t\t\tflags |= frm.FlagWithPreparedKeyspace\n\t\t} else {\n\t\t\tpanic(fmt.Errorf(\"the keyspace can only be set with protocol 5 or higher\"))\n\t\t}\n\t}\n\tif f.proto > protoVersion4 {\n\t\tf.writeUint(flags)\n\t}\n\tif w.keyspace != \"\" {\n\t\tf.writeString(w.keyspace)\n\t}\n\n\treturn f.finish()\n}\n\nfunc (f *framer) readTypeInfo() TypeInfo {\n\t// TODO: factor this out so the same code paths can be used to parse custom\n\t// types and other types, as much of the logic will be duplicated.\n\tid := f.readShort()\n\n\tsimple := NativeType{\n\t\tproto: f.proto,\n\t\ttyp:   Type(id),\n\t}\n\n\t// Fast path for simple native types (through TypeDuration).\n\tif id > 0 && id <= uint16(TypeDuration) {\n\t\treturn simple\n\t}\n\n\tif simple.typ == TypeCustom {\n\t\tsimple.custom = f.readString()\n\t\tif cassType := getApacheCassandraType(simple.custom); cassType != TypeCustom {\n\t\t\tsimple.typ = cassType\n\t\t}\n\t}\n\n\tswitch simple.typ {\n\tcase TypeTuple:\n\t\tn := f.readShort()\n\t\ttuple := TupleTypeInfo{\n\t\t\tNativeType: simple,\n\t\t\tElems:      make([]TypeInfo, n),\n\t\t}\n\n\t\tfor i := 0; i < int(n); i++ {\n\t\t\ttuple.Elems[i] = f.readTypeInfo()\n\t\t}\n\n\t\treturn tuple\n\n\tcase TypeUDT:\n\t\tudt := UDTTypeInfo{\n\t\t\tNativeType: simple,\n\t\t}\n\t\tudt.KeySpace = f.readString()\n\t\tudt.Name = f.readString()\n\n\t\tn := f.readShort()\n\t\tudt.Elements = make([]UDTField, n)\n\t\tfor i := 0; i < int(n); i++ {\n\t\t\tfield := &udt.Elements[i]\n\t\t\tfield.Name = f.readString()\n\t\t\tfield.Type = f.readTypeInfo()\n\t\t}\n\n\t\treturn udt\n\tcase TypeMap, TypeList, TypeSet:\n\t\tcollection := CollectionType{\n\t\t\tNativeType: simple,\n\t\t}\n\n\t\tif simple.typ == TypeMap {\n\t\t\tcollection.Key = f.readTypeInfo()\n\t\t}\n\n\t\tcollection.Elem = f.readTypeInfo()\n\n\t\treturn collection\n\tcase TypeCustom:\n\t\tvectorTypePrefix := apacheCassandraTypePrefix + \"VectorType\"\n\t\tif strings.HasPrefix(simple.custom, vectorTypePrefix) {\n\t\t\tspec := strings.TrimPrefix(simple.custom, vectorTypePrefix)\n\t\t\tspec = spec[1 : len(spec)-1] // remove parenthesis\n\t\t\tidx := strings.LastIndex(spec, \",\")\n\t\t\ttypeStr := spec[:idx]\n\t\t\tdimStr := spec[idx+1:]\n\t\t\tsubType := getCassandraLongType(strings.TrimSpace(typeStr), f.proto, nopLogger{})\n\t\t\tdim, _ := strconv.Atoi(strings.TrimSpace(dimStr))\n\t\t\tvector := VectorType{\n\t\t\t\tNativeType: simple,\n\t\t\t\tSubType:    subType,\n\t\t\t\tDimensions: dim,\n\t\t\t}\n\t\t\treturn vector\n\t\t}\n\t}\n\n\treturn simple\n}\n\ntype preparedMetadata struct {\n\tkeyspace string\n\ttable    string\n\t// proto v4+\n\tpkeyColumns []int\n\tresultMetadata\n\t// LWT query detected\n\tlwt bool\n}\n\nfunc (r preparedMetadata) String() string {\n\treturn fmt.Sprintf(\"[prepared flags=0x%x pkey=%v paging_state=% X columns=%v col_count=%d actual_col_count=%d lwt=%t]\",\n\t\tr.flags, r.pkeyColumns, r.pagingState, r.columns, r.colCount, r.actualColCount, r.lwt)\n}\n\nfunc (f *framer) parsePreparedMetadata() preparedMetadata {\n\t// TODO: deduplicate this from parseMetadata\n\tmeta := preparedMetadata{}\n\n\tmeta.flags = f.readInt()\n\tmeta.colCount = f.readInt()\n\tif meta.colCount < 0 {\n\t\tpanic(fmt.Errorf(\"received negative column count: %d\", meta.colCount))\n\t}\n\tmeta.actualColCount = meta.colCount\n\n\tif f.proto >= protoVersion4 {\n\t\tpkeyCount := f.readInt()\n\t\tpkeys := make([]int, pkeyCount)\n\t\tfor i := 0; i < pkeyCount; i++ {\n\t\t\tpkeys[i] = int(f.readShort())\n\t\t}\n\t\tmeta.pkeyColumns = pkeys\n\t}\n\n\tmeta.lwt = meta.flags&f.flagLWT == f.flagLWT\n\n\tif meta.flags&frm.FlagHasMorePages == frm.FlagHasMorePages {\n\t\tmeta.pagingState = f.readBytesCopy()\n\t}\n\n\tif meta.flags&frm.FlagNoMetaData == frm.FlagNoMetaData {\n\t\treturn meta\n\t}\n\n\tglobalSpec := meta.flags&frm.FlagGlobalTableSpec == frm.FlagGlobalTableSpec\n\tif globalSpec {\n\t\tmeta.keyspace = f.readString()\n\t\tmeta.table = f.readString()\n\t}\n\n\tvar cols []ColumnInfo\n\treadPerColumnSpec := !globalSpec\n\tvar tracker keyspaceTableTracker\n\tif meta.colCount < 1000 {\n\t\t// preallocate columninfo to avoid excess copying\n\t\tcols = make([]ColumnInfo, meta.colCount)\n\t\tfor i := 0; i < meta.colCount; i++ {\n\t\t\tcol := &cols[i]\n\t\t\tkeyspace, table := f.readColWithSpec(col, &meta.resultMetadata, globalSpec, meta.keyspace, meta.table, i, readPerColumnSpec)\n\t\t\tif readPerColumnSpec {\n\t\t\t\ttracker.track(i, keyspace, table)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// use append, huge number of columns usually indicates a corrupt frame or\n\t\t// just a huge row.\n\t\tfor i := 0; i < meta.colCount; i++ {\n\t\t\tvar col ColumnInfo\n\t\t\tkeyspace, table := f.readColWithSpec(&col, &meta.resultMetadata, globalSpec, meta.keyspace, meta.table, i, readPerColumnSpec)\n\t\t\tif readPerColumnSpec {\n\t\t\t\ttracker.track(i, keyspace, table)\n\t\t\t}\n\t\t\tcols = append(cols, col)\n\t\t}\n\t}\n\n\tif !globalSpec && meta.colCount > 0 && tracker.allSame {\n\t\tmeta.keyspace = tracker.keyspace\n\t\tmeta.table = tracker.table\n\t}\n\n\tmeta.columns = cols\n\n\treturn meta\n}\n\ntype resultMetadata struct {\n\tpagingState []byte\n\t// this is a count of the total number of columns which can be scanned,\n\t// it is at minimum len(columns) but may be larger, for instance when a column\n\t// is a UDT or tuple.\n\tcolumns        []ColumnInfo\n\tflags          int\n\tcolCount       int\n\tactualColCount int\n}\n\nfunc (r *resultMetadata) morePages() bool {\n\treturn r.flags&frm.FlagHasMorePages == frm.FlagHasMorePages\n}\n\nfunc (r resultMetadata) String() string {\n\treturn fmt.Sprintf(\"[metadata flags=0x%x paging_state=% X columns=%v]\", r.flags, r.pagingState, r.columns)\n}\n\n// keyspaceTableTracker tracks whether all columns share the same keyspace/table.\ntype keyspaceTableTracker struct {\n\tkeyspace string\n\ttable    string\n\tallSame  bool\n}\n\nfunc (t *keyspaceTableTracker) track(colIndex int, keyspace, table string) {\n\tif colIndex == 0 {\n\t\tt.keyspace = keyspace\n\t\tt.table = table\n\t\tt.allSame = true\n\t} else if t.allSame && (keyspace != t.keyspace || table != t.table) {\n\t\tt.allSame = false\n\t}\n}\n\nfunc (f *framer) readColWithSpec(col *ColumnInfo, meta *resultMetadata, globalSpec bool, keyspace, table string, colIndex int, readPerColumnSpec bool) (string, string) {\n\tif readPerColumnSpec {\n\t\t// Per-column table spec encoding: read keyspace/table for this column.\n\t\tcol.Keyspace = f.readString()\n\t\tcol.Table = f.readString()\n\t} else {\n\t\tif !globalSpec && colIndex != 0 {\n\t\t\t// Skip per-column keyspace/table already read from column 0.\n\t\t\tf.skipString()\n\t\t\tf.skipString()\n\t\t}\n\t\tcol.Keyspace = keyspace\n\t\tcol.Table = table\n\t}\n\n\tcol.Name = f.readString()\n\tcol.TypeInfo = f.readTypeInfo()\n\tif tuple, ok := col.TypeInfo.(TupleTypeInfo); ok {\n\t\t// -1 because we already included the tuple column\n\t\tmeta.actualColCount += len(tuple.Elems) - 1\n\t}\n\n\treturn col.Keyspace, col.Table\n}\n\nfunc (f *framer) parseResultMetadata() resultMetadata {\n\tvar meta resultMetadata\n\n\tmeta.flags = f.readInt()\n\tmeta.colCount = f.readInt()\n\tif meta.colCount < 0 {\n\t\tpanic(fmt.Errorf(\"received negative column count: %d\", meta.colCount))\n\t}\n\tmeta.actualColCount = meta.colCount\n\n\tif meta.flags&frm.FlagHasMorePages == frm.FlagHasMorePages {\n\t\tmeta.pagingState = f.readBytesCopy()\n\t}\n\n\tif meta.flags&frm.FlagNoMetaData == frm.FlagNoMetaData {\n\t\treturn meta\n\t}\n\n\tglobalSpec := meta.flags&frm.FlagGlobalTableSpec == frm.FlagGlobalTableSpec\n\n\t// Read keyspace/table once and reuse for all columns. ROWS results are\n\t// always single-table; when !globalSpec this consumes column 0's wire\n\t// values and readColWithSpec skips the rest via skipString().\n\tvar keyspace, table string\n\tif globalSpec || meta.colCount > 0 {\n\t\tkeyspace = f.readString()\n\t\ttable = f.readString()\n\t}\n\n\tvar cols []ColumnInfo\n\tif meta.colCount < 1000 {\n\t\t// preallocate columninfo to avoid excess copying\n\t\tcols = make([]ColumnInfo, meta.colCount)\n\t\tfor i := 0; i < meta.colCount; i++ {\n\t\t\tf.readColWithSpec(&cols[i], &meta, globalSpec, keyspace, table, i, false)\n\t\t}\n\n\t} else {\n\t\t// use append, huge number of columns usually indicates a corrupt frame or\n\t\t// just a huge row.\n\t\tfor i := 0; i < meta.colCount; i++ {\n\t\t\tvar col ColumnInfo\n\t\t\tf.readColWithSpec(&col, &meta, globalSpec, keyspace, table, i, false)\n\t\t\tcols = append(cols, col)\n\t\t}\n\t}\n\n\tmeta.columns = cols\n\n\treturn meta\n}\n\ntype resultVoidFrame struct {\n\tfrm.FrameHeader\n}\n\nfunc (f *resultVoidFrame) String() string {\n\treturn \"[result_void]\"\n}\n\nfunc (f *framer) parseResultFrame() (frame, error) {\n\tkind := f.readInt()\n\n\tswitch kind {\n\tcase frm.ResultKindVoid:\n\t\treturn &resultVoidFrame{FrameHeader: *f.header}, nil\n\tcase frm.ResultKindRows:\n\t\treturn f.parseResultRows(), nil\n\tcase frm.ResultKindKeyspace:\n\t\treturn f.parseResultSetKeyspace(), nil\n\tcase frm.ResultKindPrepared:\n\t\treturn f.parseResultPrepared(), nil\n\tcase frm.ResultKindSchemaChanged:\n\t\treturn f.parseResultSchemaChange(), nil\n\t}\n\n\treturn nil, NewErrProtocol(\"unknown result kind: %x\", kind)\n}\n\ntype resultRowsFrame struct {\n\tfrm.FrameHeader\n\n\tmeta resultMetadata\n\t// dont parse the rows here as we only need to do it once\n\tnumRows int\n}\n\nfunc (f *resultRowsFrame) String() string {\n\treturn fmt.Sprintf(\"[result_rows meta=%v]\", f.meta)\n}\n\nfunc (f *framer) parseResultRows() frame {\n\tresult := &resultRowsFrame{}\n\tresult.meta = f.parseResultMetadata()\n\n\tresult.numRows = f.readInt()\n\tif result.numRows < 0 {\n\t\tpanic(fmt.Errorf(\"invalid row_count in result frame: %d\", result.numRows))\n\t}\n\n\treturn result\n}\n\ntype resultKeyspaceFrame struct {\n\tkeyspace string\n\tfrm.FrameHeader\n}\n\nfunc (r *resultKeyspaceFrame) String() string {\n\treturn fmt.Sprintf(\"[result_keyspace keyspace=%s]\", r.keyspace)\n}\n\nfunc (f *framer) parseResultSetKeyspace() frame {\n\treturn &resultKeyspaceFrame{\n\t\tFrameHeader: *f.header,\n\t\tkeyspace:    f.readString(),\n\t}\n}\n\ntype resultPreparedFrame struct {\n\tpreparedID []byte\n\trespMeta   resultMetadata\n\tfrm.FrameHeader\n\treqMeta preparedMetadata\n}\n\nfunc (f *framer) parseResultPrepared() frame {\n\tframe := &resultPreparedFrame{\n\t\tFrameHeader: *f.header,\n\t\tpreparedID:  f.readShortBytesCopy(),\n\t\treqMeta:     f.parsePreparedMetadata(),\n\t}\n\n\tframe.respMeta = f.parseResultMetadata()\n\n\treturn frame\n}\n\nfunc (f *framer) parseResultSchemaChange() frame {\n\tchange := f.readString()\n\ttarget := f.readString()\n\n\t// TODO: could just use a separate type for each target\n\tswitch target {\n\tcase \"KEYSPACE\":\n\t\treturn &frm.SchemaChangeKeyspace{\n\t\t\tFrameHeader: *f.header,\n\t\t\tChange:      change,\n\t\t\tKeyspace:    f.readString(),\n\t\t}\n\tcase \"TABLE\":\n\t\treturn &frm.SchemaChangeTable{\n\t\t\tFrameHeader: *f.header,\n\t\t\tChange:      change,\n\t\t\tKeyspace:    f.readString(),\n\t\t\tObject:      f.readString(),\n\t\t}\n\tcase \"TYPE\":\n\t\treturn &frm.SchemaChangeType{\n\t\t\tFrameHeader: *f.header,\n\t\t\tChange:      change,\n\t\t\tKeyspace:    f.readString(),\n\t\t\tObject:      f.readString(),\n\t\t}\n\tcase \"FUNCTION\":\n\t\treturn &frm.SchemaChangeFunction{\n\t\t\tFrameHeader: *f.header,\n\t\t\tChange:      change,\n\t\t\tKeyspace:    f.readString(),\n\t\t\tName:        f.readString(),\n\t\t\tArgs:        f.readStringList(),\n\t\t}\n\tcase \"AGGREGATE\":\n\t\treturn &frm.SchemaChangeAggregate{\n\t\t\tFrameHeader: *f.header,\n\t\t\tChange:      change,\n\t\t\tKeyspace:    f.readString(),\n\t\t\tName:        f.readString(),\n\t\t\tArgs:        f.readStringList(),\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"gocql: unknown SCHEMA_CHANGE target: %q change: %q\", target, change))\n\t}\n}\n\nfunc (f *framer) parseAuthenticateFrame() frame {\n\treturn &frm.AuthenticateFrame{\n\t\tFrameHeader: *f.header,\n\t\tClass:       f.readString(),\n\t}\n}\n\nfunc (f *framer) parseAuthSuccessFrame() frame {\n\treturn &frm.AuthSuccessFrame{\n\t\tFrameHeader: *f.header,\n\t\tData:        f.readBytesCopy(),\n\t}\n}\n\nfunc (f *framer) parseAuthChallengeFrame() frame {\n\treturn &frm.AuthChallengeFrame{\n\t\tFrameHeader: *f.header,\n\t\tData:        f.readBytesCopy(),\n\t}\n}\n\nfunc (f *framer) parseEventFrame() frame {\n\teventType := f.readString()\n\n\tswitch eventType {\n\tcase \"TOPOLOGY_CHANGE\":\n\t\tframe := &frm.TopologyChangeEventFrame{FrameHeader: *f.header}\n\t\tframe.Change = f.readString()\n\t\tframe.Host, frame.Port = f.readInet()\n\n\t\treturn frame\n\tcase \"STATUS_CHANGE\":\n\t\tframe := &frm.StatusChangeEventFrame{FrameHeader: *f.header}\n\t\tframe.Change = f.readString()\n\t\tframe.Host, frame.Port = f.readInet()\n\n\t\treturn frame\n\tcase \"SCHEMA_CHANGE\":\n\t\t// this should work for all versions\n\t\treturn f.parseResultSchemaChange()\n\tcase \"CLIENT_ROUTES_CHANGE\":\n\t\treturn &frm.ClientRoutesChanged{\n\t\t\tFrameHeader:   *f.header,\n\t\t\tChangeType:    f.readString(),\n\t\t\tConnectionIDs: f.readStringList(),\n\t\t\tHostIDs:       f.readStringList(),\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"gocql: unknown event type: %q\", eventType))\n\t}\n\n}\n\ntype writeAuthResponseFrame struct {\n\tdata []byte\n}\n\nfunc (a *writeAuthResponseFrame) String() string {\n\treturn fmt.Sprintf(\"[auth_response data=%q]\", a.data)\n}\n\nfunc (a *writeAuthResponseFrame) buildFrame(framer *framer, streamID int) error {\n\treturn framer.writeAuthResponseFrame(streamID, a.data)\n}\n\nfunc (f *framer) writeAuthResponseFrame(streamID int, data []byte) error {\n\tf.writeHeader(f.flags, frm.OpAuthResponse, streamID)\n\tf.writeBytes(data)\n\treturn f.finish()\n}\n\ntype queryValues struct {\n\tname    string\n\tvalue   []byte\n\tisUnset bool\n}\n\ntype queryParams struct {\n\tkeyspace              string\n\tvalues                []queryValues\n\tpagingState           []byte\n\tpageSize              int\n\tdefaultTimestampValue int64\n\tconsistency           Consistency\n\tserialConsistency     Consistency\n\tskipMeta              bool\n\tdefaultTimestamp      bool\n}\n\nfunc (q queryParams) String() string {\n\treturn fmt.Sprintf(\"[query_params consistency=%v skip_meta=%v page_size=%d paging_state=%q serial_consistency=%v default_timestamp=%v values=%v keyspace=%s]\",\n\t\tq.consistency, q.skipMeta, q.pageSize, q.pagingState, q.serialConsistency, q.defaultTimestamp, q.values, q.keyspace)\n}\n\nfunc (f *framer) writeQueryParams(opts *queryParams) {\n\tf.writeConsistency(opts.consistency)\n\n\tvar flags byte\n\tif len(opts.values) > 0 {\n\t\tflags |= frm.FlagValues\n\t}\n\tif opts.skipMeta {\n\t\tflags |= frm.FlagSkipMetaData\n\t}\n\tif opts.pageSize > 0 {\n\t\tflags |= frm.FlagPageSize\n\t}\n\tif len(opts.pagingState) > 0 {\n\t\tflags |= frm.FlagWithPagingState\n\t}\n\tif opts.serialConsistency > 0 {\n\t\tflags |= frm.FlagWithSerialConsistency\n\t}\n\n\tnames := false\n\n\t// protoV3 specific things\n\tif opts.defaultTimestamp {\n\t\tflags |= frm.FlagDefaultTimestamp\n\t}\n\n\tif len(opts.values) > 0 && opts.values[0].name != \"\" {\n\t\tflags |= frm.FlagWithNameValues\n\t\tnames = true\n\t}\n\n\tif opts.keyspace != \"\" {\n\t\tif f.proto > protoVersion4 {\n\t\t\tflags |= frm.FlagWithKeyspace\n\t\t} else {\n\t\t\tpanic(fmt.Errorf(\"the keyspace can only be set with protocol 5 or higher\"))\n\t\t}\n\t}\n\n\tif f.proto > protoVersion4 {\n\t\tf.writeUint(uint32(flags))\n\t} else {\n\t\tf.writeByte(flags)\n\t}\n\n\tif n := len(opts.values); n > 0 {\n\t\tf.writeShort(uint16(n))\n\n\t\tfor i := 0; i < n; i++ {\n\t\t\tif names {\n\t\t\t\tf.writeString(opts.values[i].name)\n\t\t\t}\n\t\t\tif opts.values[i].isUnset {\n\t\t\t\tf.writeUnset()\n\t\t\t} else {\n\t\t\t\tf.writeBytes(opts.values[i].value)\n\t\t\t}\n\t\t}\n\t}\n\n\tif opts.pageSize > 0 {\n\t\tf.writeInt(int32(opts.pageSize))\n\t}\n\n\tif len(opts.pagingState) > 0 {\n\t\tf.writeBytes(opts.pagingState)\n\t}\n\n\tif opts.serialConsistency > 0 {\n\t\tf.writeConsistency(opts.serialConsistency)\n\t}\n\n\tif opts.defaultTimestamp {\n\t\t// timestamp in microseconds\n\t\tvar ts int64\n\t\tif opts.defaultTimestampValue != 0 {\n\t\t\tts = opts.defaultTimestampValue\n\t\t} else {\n\t\t\tts = time.Now().UnixNano() / 1000\n\t\t}\n\t\tf.writeLong(ts)\n\t}\n\n\tif opts.keyspace != \"\" {\n\t\tf.writeString(opts.keyspace)\n\t}\n}\n\ntype writeQueryFrame struct {\n\tcustomPayload map[string][]byte\n\tstatement     string\n\tparams        queryParams\n}\n\nfunc (w *writeQueryFrame) String() string {\n\treturn fmt.Sprintf(\"[query statement=%q params=%v]\", w.statement, w.params)\n}\n\nfunc (w *writeQueryFrame) buildFrame(framer *framer, streamID int) error {\n\treturn framer.writeQueryFrame(streamID, w.statement, &w.params, w.customPayload)\n}\n\nfunc (f *framer) writeQueryFrame(streamID int, statement string, params *queryParams, customPayload map[string][]byte) error {\n\tif len(customPayload) > 0 {\n\t\tf.payload()\n\t}\n\tf.writeHeader(f.flags, frm.OpQuery, streamID)\n\tf.writeCustomPayload(&customPayload)\n\tf.writeLongString(statement)\n\tf.writeQueryParams(params)\n\n\treturn f.finish()\n}\n\ntype frameBuilder interface {\n\tbuildFrame(framer *framer, streamID int) error\n}\n\ntype frameWriterFunc func(framer *framer, streamID int) error\n\nfunc (f frameWriterFunc) buildFrame(framer *framer, streamID int) error {\n\treturn f(framer, streamID)\n}\n\ntype writeExecuteFrame struct {\n\tcustomPayload map[string][]byte\n\tpreparedID    []byte\n\tparams        queryParams\n}\n\nfunc (e *writeExecuteFrame) String() string {\n\treturn fmt.Sprintf(\"[execute id=% X params=%v]\", e.preparedID, &e.params)\n}\n\nfunc (e *writeExecuteFrame) buildFrame(fr *framer, streamID int) error {\n\treturn fr.writeExecuteFrame(streamID, e.preparedID, &e.params, &e.customPayload)\n}\n\nfunc (f *framer) writeExecuteFrame(streamID int, preparedID []byte, params *queryParams, customPayload *map[string][]byte) error {\n\tif len(*customPayload) > 0 {\n\t\tf.payload()\n\t}\n\tf.writeHeader(f.flags, frm.OpExecute, streamID)\n\tf.writeCustomPayload(customPayload)\n\tf.writeShortBytes(preparedID)\n\tf.writeQueryParams(params)\n\n\treturn f.finish()\n}\n\n// TODO: can we replace BatchStatemt with batchStatement? As they prety much\n// duplicate each other\ntype batchStatment struct {\n\tpreparedID []byte\n\tstatement  string\n\tvalues     []queryValues\n}\n\ntype writeBatchFrame struct {\n\tcustomPayload         map[string][]byte\n\tstatements            []batchStatment\n\tdefaultTimestampValue int64\n\tconsistency           Consistency\n\tserialConsistency     Consistency\n\ttyp                   BatchType\n\tdefaultTimestamp      bool\n}\n\nfunc (w *writeBatchFrame) buildFrame(framer *framer, streamID int) error {\n\treturn framer.writeBatchFrame(streamID, w, w.customPayload)\n}\n\nfunc (f *framer) writeBatchFrame(streamID int, w *writeBatchFrame, customPayload map[string][]byte) error {\n\tif len(customPayload) > 0 {\n\t\tf.payload()\n\t}\n\tf.writeHeader(f.flags, frm.OpBatch, streamID)\n\tf.writeCustomPayload(&customPayload)\n\tf.writeByte(byte(w.typ))\n\n\tn := len(w.statements)\n\tf.writeShort(uint16(n))\n\n\tvar flags byte\n\n\tfor i := 0; i < n; i++ {\n\t\tb := &w.statements[i]\n\t\tif len(b.preparedID) == 0 {\n\t\t\tf.writeByte(0)\n\t\t\tf.writeLongString(b.statement)\n\t\t} else {\n\t\t\tf.writeByte(1)\n\t\t\tf.writeShortBytes(b.preparedID)\n\t\t}\n\n\t\tf.writeShort(uint16(len(b.values)))\n\t\tfor j := range b.values {\n\t\t\tcol := b.values[j]\n\t\t\tif col.name != \"\" {\n\t\t\t\t// TODO: move this check into the caller and set a flag on writeBatchFrame\n\t\t\t\t// to indicate using named values\n\t\t\t\tif f.proto <= protoVersion5 {\n\t\t\t\t\treturn fmt.Errorf(\"gocql: named query values are not supported in batches, please see https://issues.apache.org/jira/browse/CASSANDRA-10246\")\n\t\t\t\t}\n\t\t\t\tflags |= frm.FlagWithNameValues\n\t\t\t\tf.writeString(col.name)\n\t\t\t}\n\t\t\tif col.isUnset {\n\t\t\t\tf.writeUnset()\n\t\t\t} else {\n\t\t\t\tf.writeBytes(col.value)\n\t\t\t}\n\t\t}\n\t}\n\n\tf.writeConsistency(w.consistency)\n\n\tif w.serialConsistency > 0 {\n\t\tflags |= frm.FlagWithSerialConsistency\n\t}\n\tif w.defaultTimestamp {\n\t\tflags |= frm.FlagDefaultTimestamp\n\t}\n\n\tif f.proto > protoVersion4 {\n\t\tf.writeUint(uint32(flags))\n\t} else {\n\t\tf.writeByte(flags)\n\t}\n\n\tif w.serialConsistency > 0 {\n\t\tf.writeConsistency(w.serialConsistency)\n\t}\n\n\tif w.defaultTimestamp {\n\t\tvar ts int64\n\t\tif w.defaultTimestampValue != 0 {\n\t\t\tts = w.defaultTimestampValue\n\t\t} else {\n\t\t\tts = time.Now().UnixNano() / 1000\n\t\t}\n\t\tf.writeLong(ts)\n\t}\n\n\treturn f.finish()\n}\n\ntype writeOptionsFrame struct{}\n\nfunc (w *writeOptionsFrame) buildFrame(framer *framer, streamID int) error {\n\treturn framer.writeOptionsFrame(streamID, w)\n}\n\nfunc (f *framer) writeOptionsFrame(stream int, _ *writeOptionsFrame) error {\n\tf.writeHeader(f.flags&^frm.FlagCompress, frm.OpOptions, stream)\n\treturn f.finish()\n}\n\ntype writeRegisterFrame struct {\n\tevents []string\n}\n\nfunc (w *writeRegisterFrame) buildFrame(framer *framer, streamID int) error {\n\treturn framer.writeRegisterFrame(streamID, w)\n}\n\nfunc (f *framer) writeRegisterFrame(streamID int, w *writeRegisterFrame) error {\n\tf.writeHeader(f.flags, frm.OpRegister, streamID)\n\tf.writeStringList(w.events)\n\n\treturn f.finish()\n}\n\nfunc (f *framer) readByte() byte {\n\tif len(f.buf) < 1 {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to read byte require 1 got: %d\", len(f.buf)))\n\t}\n\n\tb := f.buf[0]\n\tf.buf = f.buf[1:]\n\treturn b\n}\n\nfunc (f *framer) readInt() (n int) {\n\tif len(f.buf) < 4 {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to read int require 4 got: %d\", len(f.buf)))\n\t}\n\n\tn = int(int32(binary.BigEndian.Uint32(f.buf[:4])))\n\tf.buf = f.buf[4:]\n\treturn\n}\n\nfunc (f *framer) readShort() (n uint16) {\n\tif len(f.buf) < 2 {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to read short require 2 got: %d\", len(f.buf)))\n\t}\n\tn = binary.BigEndian.Uint16(f.buf[:2])\n\tf.buf = f.buf[2:]\n\treturn\n}\n\nfunc (f *framer) readString() (s string) {\n\tsize := f.readShort()\n\n\tif len(f.buf) < int(size) {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to read string require %d got: %d\", size, len(f.buf)))\n\t}\n\n\ts = string(f.buf[:size])\n\tf.buf = f.buf[size:]\n\treturn\n}\n\n// skipString advances past a string without allocating.\nfunc (f *framer) skipString() {\n\tsize := f.readShort()\n\n\tif len(f.buf) < int(size) {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to skip string, requires %d got %d\", size, len(f.buf)))\n\t}\n\n\tf.buf = f.buf[size:]\n}\n\nfunc (f *framer) readLongString() (s string) {\n\tsize := f.readInt()\n\n\tif len(f.buf) < size {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to read long string require %d got: %d\", size, len(f.buf)))\n\t}\n\n\ts = string(f.buf[:size])\n\tf.buf = f.buf[size:]\n\treturn\n}\n\nfunc (f *framer) readStringList() []string {\n\tsize := f.readShort()\n\n\tl := make([]string, size)\n\tfor i := 0; i < int(size); i++ {\n\t\tl[i] = f.readString()\n\t}\n\n\treturn l\n}\n\nfunc (f *framer) ReadBytesInternal() ([]byte, error) {\n\tsize := f.readInt()\n\tif size < 0 {\n\t\treturn nil, nil\n\t}\n\n\tif len(f.buf) < size {\n\t\treturn nil, fmt.Errorf(\"not enough bytes in buffer to read bytes require %d got: %d\", size, len(f.buf))\n\t}\n\n\tl := f.buf[:size]\n\tf.buf = f.buf[size:]\n\n\treturn l, nil\n}\n\nfunc (f *framer) readBytesCopy() []byte {\n\tsize := f.readInt()\n\tif size < 0 {\n\t\treturn nil\n\t}\n\n\tif len(f.buf) < size {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to read bytes require %d got: %d\", size, len(f.buf)))\n\t}\n\n\tout := make([]byte, size)\n\tcopy(out, f.buf[:size])\n\tf.buf = f.buf[size:]\n\treturn out\n}\n\nfunc (f *framer) readShortBytesCopy() []byte {\n\tsize := f.readShort()\n\tif len(f.buf) < int(size) {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to read short bytes: require %d got %d\", size, len(f.buf)))\n\t}\n\n\tout := make([]byte, size)\n\tcopy(out, f.buf[:size])\n\tf.buf = f.buf[size:]\n\n\treturn out\n}\n\nfunc (f *framer) readInetAdressOnly() net.IP {\n\tif len(f.buf) < 1 {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to read inet size require %d got: %d\", 1, len(f.buf)))\n\t}\n\n\tsize := f.buf[0]\n\tf.buf = f.buf[1:]\n\n\tif !(size == 4 || size == 16) {\n\t\tpanic(fmt.Errorf(\"invalid IP size: %d\", size))\n\t}\n\n\tif len(f.buf) < int(size) {\n\t\tpanic(fmt.Errorf(\"not enough bytes in buffer to read inet require %d got: %d\", size, len(f.buf)))\n\t}\n\n\tip := make(net.IP, size)\n\tcopy(ip, f.buf[:size])\n\tf.buf = f.buf[size:]\n\treturn ip\n}\n\nfunc (f *framer) readInet() (net.IP, int) {\n\treturn f.readInetAdressOnly(), f.readInt()\n}\n\nfunc (f *framer) readConsistency() Consistency {\n\treturn Consistency(f.readShort())\n}\n\nfunc (f *framer) readBytesMap() map[string][]byte {\n\tsize := f.readShort()\n\tm := make(map[string][]byte, size)\n\n\tfor i := 0; i < int(size); i++ {\n\t\tm[f.readString()] = f.readBytesCopy()\n\t}\n\n\treturn m\n}\n\nfunc (f *framer) readStringMultiMap() map[string][]string {\n\tsize := f.readShort()\n\tm := make(map[string][]string, size)\n\n\tfor i := 0; i < int(size); i++ {\n\t\tk := f.readString()\n\t\tv := f.readStringList()\n\t\tm[k] = v\n\t}\n\n\treturn m\n}\n\nfunc (f *framer) writeByte(b byte) {\n\tf.buf = append(f.buf, b)\n}\n\nfunc appendBytes(p []byte, d []byte) []byte {\n\tif d == nil {\n\t\treturn appendIntNeg1(p)\n\t}\n\tp = appendInt(p, int32(len(d)))\n\tp = append(p, d...)\n\treturn p\n}\n\nfunc appendShort(p []byte, n uint16) []byte {\n\treturn append(p,\n\t\tbyte(n>>8),\n\t\tbyte(n),\n\t)\n}\n\nfunc appendInt(p []byte, n int32) []byte {\n\treturn append(p, byte(n>>24),\n\t\tbyte(n>>16),\n\t\tbyte(n>>8),\n\t\tbyte(n))\n}\n\nfunc appendIntNeg1(p []byte) []byte {\n\treturn append(p, 255, 255, 255, 255)\n}\n\nfunc appendUint(p []byte, n uint32) []byte {\n\treturn append(p, byte(n>>24),\n\t\tbyte(n>>16),\n\t\tbyte(n>>8),\n\t\tbyte(n))\n}\n\nfunc appendLong(p []byte, n int64) []byte {\n\treturn append(p,\n\t\tbyte(n>>56),\n\t\tbyte(n>>48),\n\t\tbyte(n>>40),\n\t\tbyte(n>>32),\n\t\tbyte(n>>24),\n\t\tbyte(n>>16),\n\t\tbyte(n>>8),\n\t\tbyte(n),\n\t)\n}\n\nfunc (f *framer) writeCustomPayload(customPayload *map[string][]byte) {\n\tif len(*customPayload) > 0 {\n\t\tif f.proto < protoVersion4 {\n\t\t\tpanic(\"Custom payload is not supported with version V3 or less\")\n\t\t}\n\t\tf.writeBytesMap(*customPayload)\n\t}\n}\n\nfunc (f *framer) GetCustomPayload() map[string][]byte {\n\treturn f.customPayload\n}\n\nfunc (f *framer) GetHeaderWarnings() []string {\n\treturn f.header.Warnings\n}\n\n// these are protocol level binary types\nfunc (f *framer) writeInt(n int32) {\n\tf.buf = appendInt(f.buf, n)\n}\n\nfunc (f *framer) writeIntNeg1() {\n\tf.buf = appendIntNeg1(f.buf)\n}\n\nfunc (f *framer) writeIntNeg2() {\n\tf.buf = append(f.buf, 255, 255, 255, 254)\n}\n\nfunc (f *framer) writeUint(n uint32) {\n\tf.buf = appendUint(f.buf, n)\n}\n\nfunc (f *framer) writeShort(n uint16) {\n\tf.buf = appendShort(f.buf, n)\n}\n\nfunc (f *framer) writeLong(n int64) {\n\tf.buf = appendLong(f.buf, n)\n}\n\nfunc (f *framer) writeString(s string) {\n\tf.writeShort(uint16(len(s)))\n\tf.buf = append(f.buf, s...)\n}\n\nfunc (f *framer) writeLongString(s string) {\n\tf.writeInt(int32(len(s)))\n\tf.buf = append(f.buf, s...)\n}\n\nfunc (f *framer) writeStringList(l []string) {\n\tf.writeShort(uint16(len(l)))\n\tfor _, s := range l {\n\t\tf.writeString(s)\n\t}\n}\n\nfunc (f *framer) writeUnset() {\n\t// Protocol version 4 specifies that bind variables do not require having a\n\t// value when executing a statement.   Bind variables without a value are\n\t// called 'unset'. The 'unset' bind variable is serialized as the int\n\t// value '-2' without following bytes.\n\tf.writeIntNeg2()\n}\n\nfunc (f *framer) writeBytes(p []byte) {\n\t// TODO: handle null case correctly,\n\t//     [bytes]        A [int] n, followed by n bytes if n >= 0. If n < 0,\n\t//\t\t\t\t\t  no byte should follow and the value represented is `null`.\n\tif p == nil {\n\t\tf.writeIntNeg1()\n\t} else {\n\t\tf.writeInt(int32(len(p)))\n\t\tf.buf = append(f.buf, p...)\n\t}\n}\n\nfunc (f *framer) writeShortBytes(p []byte) {\n\tf.writeShort(uint16(len(p)))\n\tf.buf = append(f.buf, p...)\n}\n\nfunc (f *framer) writeConsistency(cons Consistency) {\n\tf.writeShort(uint16(cons))\n}\n\nfunc (f *framer) writeStringMap(m map[string]string) {\n\tf.writeShort(uint16(len(m)))\n\tfor k, v := range m {\n\t\tf.writeString(k)\n\t\tf.writeString(v)\n\t}\n}\n\nfunc (f *framer) writeStringMultiMap(m map[string][]string) {\n\tf.writeShort(uint16(len(m)))\n\tfor k, v := range m {\n\t\tf.writeString(k)\n\t\tf.writeStringList(v)\n\t}\n}\n\nfunc (f *framer) writeBytesMap(m map[string][]byte) {\n\tf.writeShort(uint16(len(m)))\n\tfor k, v := range m {\n\t\tf.writeString(k)\n\t\tf.writeBytes(v)\n\t}\n}\n"
  },
  {
    "path": "frame_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"testing\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\nfunc TestFuzzBugs(t *testing.T) {\n\tt.Parallel()\n\n\t// these inputs are found using go-fuzz (https://github.com/dvyukov/go-fuzz)\n\t// and should cause a panic unless fixed.\n\ttests := [][]byte{\n\t\t[]byte(\"00000\\xa0000\"),\n\t\t[]byte(\"\\x8000\\x0e\\x00\\x00\\x00\\x000\"),\n\t\t[]byte(\"\\x8000\\x00\\x00\\x00\\x00\\t0000000000\"),\n\t\t[]byte(\"\\xa0\\xff\\x01\\xae\\xefqE\\xf2\\x1a\"),\n\t\t[]byte(\"\\x8200\\b\\x00\\x00\\x00c\\x00\\x00\\x00\\x02000\\x01\\x00\\x00\\x00\\x03\" +\n\t\t\t\"\\x00\\n0000000000\\x00\\x14000000\" +\n\t\t\t\"00000000000000\\x00\\x020000\" +\n\t\t\t\"\\x00\\a000000000\\x00\\x050000000\" +\n\t\t\t\"\\xff0000000000000000000\" +\n\t\t\t\"0000000\"),\n\t\t[]byte(\"\\x82\\xe600\\x00\\x00\\x00\\x000\"),\n\t\t[]byte(\"\\x8200\\b\\x00\\x00\\x00\\b0\\x00\\x00\\x00\\x040000\"),\n\t\t[]byte(\"\\x83000\\b\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x020000000\" +\n\t\t\t\"000000000\"),\n\t\t[]byte(\"\\x83000\\b\\x00\\x00\\x000\\x00\\x00\\x00\\x04\\x00\\x1000000\" +\n\t\t\t\"00000000000000e00000\" +\n\t\t\t\"000\\x800000000000000000\" +\n\t\t\t\"0000000000000\"),\n\t}\n\n\tfor i, test := range tests {\n\t\tt.Logf(\"test %d input: %q\", i, test)\n\n\t\tr := bytes.NewReader(test)\n\t\thead, err := readHeader(r, make([]byte, 9))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tframer := newFramer(nil, byte(head.Version))\n\t\terr = framer.readFrame(r, &head)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tframe, err := framer.parseFrame()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tt.Errorf(\"(%d) expected to fail for input % X\", i, test)\n\t\tt.Errorf(\"(%d) frame=%+#v\", i, frame)\n\t}\n}\n\nfunc TestFrameWriteTooLong(t *testing.T) {\n\tt.Parallel()\n\n\tif os.Getenv(\"TRAVIS\") == \"true\" {\n\t\tt.Skip(\"skipping test in travis due to memory pressure with the race detecor\")\n\t}\n\n\tframer := newFramer(nil, 3)\n\n\tframer.writeHeader(0, frm.OpStartup, 1)\n\tframer.writeBytes(make([]byte, maxFrameSize+1))\n\terr := framer.finish()\n\tif err != ErrFrameTooBig {\n\t\tt.Fatalf(\"expected to get %v got %v\", ErrFrameTooBig, err)\n\t}\n}\n\nfunc TestFrameReadTooLong(t *testing.T) {\n\tt.Parallel()\n\n\tif os.Getenv(\"TRAVIS\") == \"true\" {\n\t\tt.Skip(\"skipping test in travis due to memory pressure with the race detecor\")\n\t}\n\n\tr := &bytes.Buffer{}\n\tr.Write(make([]byte, maxFrameSize+1))\n\t// write a new header right after this frame to verify that we can read it\n\tr.Write([]byte{0x03, 0x00, 0x00, 0x00, byte(frm.OpReady), 0x00, 0x00, 0x00, 0x00})\n\n\tframer := newFramer(nil, 3)\n\n\thead := frm.FrameHeader{\n\t\tVersion: protoVersion3,\n\t\tOp:      frm.OpReady,\n\t\tLength:  r.Len() - 9,\n\t}\n\n\terr := framer.readFrame(r, &head)\n\tif err != ErrFrameTooBig {\n\t\tt.Fatalf(\"expected to get %v got %v\", ErrFrameTooBig, err)\n\t}\n\n\thead, err = readHeader(r, make([]byte, 9))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif head.Op != frm.OpReady {\n\t\tt.Fatalf(\"expected to get header %v got %v\", frm.OpReady, head.Op)\n\t}\n}\n\nfunc TestParseResultMetadata_PerColumnSpec(t *testing.T) {\n\tt.Parallel()\n\n\t// Build a synthetic ROWS result metadata frame with FlagGlobalTableSpec unset\n\t// (per-column keyspace/table encoding). This tests the !globalSpec optimization\n\t// in parseResultMetadata() which reads keyspace/table from the first column\n\t// position and reuses them for all columns via skipString().\n\tfr := newFramer(nil, protoVersion4)\n\tfr.header = &frm.FrameHeader{Version: protoVersion4}\n\n\t// flags: no FlagGlobalTableSpec — per-column keyspace/table\n\tfr.writeInt(0)\n\t// colCount\n\tfr.writeInt(3)\n\n\t// Column 0: keyspace/table + name + type\n\tfr.writeString(\"test_ks\")\n\tfr.writeString(\"test_tbl\")\n\tfr.writeString(\"col_a\")\n\tfr.writeShort(uint16(TypeInt))\n\n\t// Column 1: same keyspace/table (will be skipped by optimization)\n\tfr.writeString(\"test_ks\")\n\tfr.writeString(\"test_tbl\")\n\tfr.writeString(\"col_b\")\n\tfr.writeShort(uint16(TypeVarchar))\n\n\t// Column 2: same keyspace/table\n\tfr.writeString(\"test_ks\")\n\tfr.writeString(\"test_tbl\")\n\tfr.writeString(\"col_c\")\n\tfr.writeShort(uint16(TypeBoolean))\n\n\tmeta := fr.parseResultMetadata()\n\n\tif meta.colCount != 3 {\n\t\tt.Fatalf(\"colCount = %d, want 3\", meta.colCount)\n\t}\n\tif len(meta.columns) != 3 {\n\t\tt.Fatalf(\"len(columns) = %d, want 3\", len(meta.columns))\n\t}\n\n\t// Verify all columns got the correct keyspace/table from the optimization\n\tfor i, col := range meta.columns {\n\t\tif col.Keyspace != \"test_ks\" {\n\t\t\tt.Errorf(\"columns[%d].Keyspace = %q, want %q\", i, col.Keyspace, \"test_ks\")\n\t\t}\n\t\tif col.Table != \"test_tbl\" {\n\t\t\tt.Errorf(\"columns[%d].Table = %q, want %q\", i, col.Table, \"test_tbl\")\n\t\t}\n\t}\n\n\t// Verify column names\n\texpectedNames := []string{\"col_a\", \"col_b\", \"col_c\"}\n\tfor i, col := range meta.columns {\n\t\tif col.Name != expectedNames[i] {\n\t\t\tt.Errorf(\"columns[%d].Name = %q, want %q\", i, col.Name, expectedNames[i])\n\t\t}\n\t}\n\n\t// Verify column types\n\texpectedTypes := []Type{TypeInt, TypeVarchar, TypeBoolean}\n\tfor i, col := range meta.columns {\n\t\tnt, ok := col.TypeInfo.(NativeType)\n\t\tif !ok {\n\t\t\tt.Fatalf(\"columns[%d].TypeInfo is %T, want NativeType\", i, col.TypeInfo)\n\t\t}\n\t\tif nt.typ != expectedTypes[i] {\n\t\t\tt.Errorf(\"columns[%d].Type = %v, want %v\", i, nt.typ, expectedTypes[i])\n\t\t}\n\t}\n\n\t// Verify the entire buffer was consumed (no misalignment from skipString)\n\tif len(fr.buf) != 0 {\n\t\tt.Errorf(\"buffer has %d unconsumed bytes, want 0 (possible skipString misalignment)\", len(fr.buf))\n\t}\n}\n\nfunc TestParseEventFrame_ClientRoutesChanged(t *testing.T) {\n\tt.Parallel()\n\n\tfr := newFramer(nil, protoVersion4)\n\tfr.header = &frm.FrameHeader{Version: protoVersion4}\n\tfr.writeString(\"CLIENT_ROUTES_CHANGE\")\n\tfr.writeString(\"UPDATED\")\n\tfr.writeStringList([]string{\"c1\", \"\"})\n\tfr.writeStringList([]string{})\n\n\tframe := fr.parseEventFrame()\n\tevt, ok := frame.(*frm.ClientRoutesChanged)\n\tif !ok {\n\t\tt.Fatalf(\"expected ClientRoutesChanged frame, got %T\", frame)\n\t}\n\tif evt.ChangeType != \"UPDATED\" {\n\t\tt.Fatalf(\"ChangeType = %v, want UPDATED\", evt.ChangeType)\n\t}\n\tif len(evt.ConnectionIDs) != 2 || evt.ConnectionIDs[1] != \"\" {\n\t\tt.Fatalf(\"ConnectionIDs = %v, want [c1 \\\"\\\"]\", evt.ConnectionIDs)\n\t}\n\tif len(evt.HostIDs) != 0 {\n\t\tt.Fatalf(\"HostIDs = %v, want empty\", evt.HostIDs)\n\t}\n}\n"
  },
  {
    "path": "framer.go",
    "content": "/*\n * Copyright (C) 2026 ScyllaDB\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage gocql\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\n// framerPool owns one sync.Pool plus the adaptive buffer-sizing state for one\n// framer usage class.\ntype framerPool struct {\n\tpool       sync.Pool\n\tbufAvgSize atomic.Int64\n\tenabled    atomic.Bool\n}\n\n// connFramers owns connection-scoped framer configuration and reader/writer pools.\ntype connFramers struct {\n\treadPool  framerPool\n\twritePool framerPool\n\tdefaults  framerConfig\n}\n\n// framerConfig holds precomputed default framer parameters for the connection.\n// Populated once during connection setup and used to initialize framers from the pool.\ntype framerConfig struct {\n\tcompressor            Compressor\n\tflagLWT               int\n\trateLimitingErrorCode int\n\tproto                 byte\n\tflags                 byte\n\ttabletsRoutingV1      bool\n}\n\n// framerBufEWMAWeight controls how quickly the exponential weighted moving average\n// of framer buffer sizes adapts. A value of 8 means each sample contributes ~12.5%,\n// so it takes roughly 8 samples to converge to a new steady state.\n//\n// Lower values (e.g., 4) adapt faster but are more sensitive to outliers.\n// Higher values (e.g., 16) are more stable but adapt slower to workload changes.\n// The value 8 was chosen as a reasonable balance for typical CQL query patterns.\nconst framerBufEWMAWeight = 8\n\n// framerBufShrinkThreshold is the multiplier applied to the EWMA to decide when a\n// framer's read buffer is too large relative to typical usage and should be shrunk.\nconst framerBufShrinkThreshold = 2\n\n// maxReasonableBufferSize is a safety limit to prevent overflow in EWMA calculations\n// and to catch pathological cases where buffers grow unreasonably large.\nconst maxReasonableBufferSize = 512 * 1024 * 1024 // 512MB\n\n// maxCASRetries is the maximum number of CAS retries for updating EWMA.\n// This prevents infinite loops under extreme contention.\nconst maxCASRetries = 100\n\n// initFramerCache precomputes framer fields from cqlProtoExts so that\n// per-query framer creation avoids repeated linear scans and allocations.\nfunc (c *Conn) initFramerCache() {\n\tc.framers.initCache(c)\n}\n\nfunc (cf *connFramers) initCache(c *Conn) {\n\tcfg := framerConfig{\n\t\tcompressor: c.compressor,\n\t\tproto:      c.version & protoVersionMask,\n\t}\n\tif c.compressor != nil {\n\t\tcfg.flags |= frm.FlagCompress\n\t}\n\tif c.version == protoVersion5 {\n\t\tcfg.flags |= frm.FlagBetaProtocol\n\t}\n\tif lwtExt := findCQLProtoExtByName(c.cqlProtoExts, lwtAddMetadataMarkKey); lwtExt != nil {\n\t\tif castedExt, ok := lwtExt.(*lwtAddMetadataMarkExt); ok {\n\t\t\tcfg.flagLWT = castedExt.lwtOptMetaBitMask\n\t\t} else {\n\t\t\tc.logger.Printf(\"gocql: failed to cast CQL protocol extension %s to %T\", lwtAddMetadataMarkKey, lwtAddMetadataMarkExt{})\n\t\t}\n\t}\n\tif rateLimitErrorExt := findCQLProtoExtByName(c.cqlProtoExts, rateLimitError); rateLimitErrorExt != nil {\n\t\tif castedExt, ok := rateLimitErrorExt.(*rateLimitExt); ok {\n\t\t\tcfg.rateLimitingErrorCode = castedExt.rateLimitErrorCode\n\t\t} else {\n\t\t\tc.logger.Printf(\"gocql: failed to cast CQL protocol extension %s to %T\", rateLimitError, rateLimitExt{})\n\t\t}\n\t}\n\tif tabletsExt := findCQLProtoExtByName(c.cqlProtoExts, tabletsRoutingV1); tabletsExt != nil {\n\t\tif _, ok := tabletsExt.(*tabletsRoutingV1Ext); ok {\n\t\t\tcfg.tabletsRoutingV1 = true\n\t\t} else {\n\t\t\tc.logger.Printf(\"gocql: failed to cast CQL protocol extension %s to %T\", tabletsRoutingV1, tabletsRoutingV1Ext{})\n\t\t}\n\t}\n\tcf.defaults = cfg\n\tc.setTabletSupported(cfg.tabletsRoutingV1)\n\tcf.initPool(c)\n}\n\nfunc (cf *connFramers) initPool(c *Conn) {\n\tdefaults := cf.defaults\n\tcf.readPool.init(defaults, func(f *framer) { c.releaseReadFramer(f) })\n\tcf.writePool.init(defaults, func(f *framer) { c.releaseWriteFramer(f) })\n}\n\n// getReadFramer returns a pooled framer for reading responses and events.\nfunc (c *Conn) getReadFramer() *framer {\n\treturn c.framers.getRead(c)\n}\n\nfunc (cf *connFramers) getRead(c *Conn) *framer {\n\tf := cf.readPool.get(c)\n\tf.released.Store(false)\n\treturn f\n}\n\n// getWriteFramer returns a pooled framer for building outgoing requests.\nfunc (c *Conn) getWriteFramer() *framer {\n\treturn c.framers.getWrite(c)\n}\n\nfunc (cf *connFramers) getWrite(c *Conn) *framer {\n\tf := cf.writePool.get(c)\n\tf.released.Store(false)\n\tf.flags = cf.defaults.flags\n\treturn f\n}\n\n// releaseReadFramer returns a response/event framer to the reader pool.\nfunc (c *Conn) releaseReadFramer(f *framer) {\n\tc.framers.releaseRead(c, f)\n}\n\nfunc (cf *connFramers) releaseRead(c *Conn, f *framer) {\n\tif f == nil {\n\t\treturn\n\t}\n\tif f.released.Swap(true) {\n\t\treturn // already released\n\t}\n\tf.header = nil\n\tf.traceID = nil\n\tf.customPayload = nil\n\tif !cf.readPool.enabled.Load() {\n\t\treturn\n\t}\n\n\tbufCap := int64(cap(f.readBuffer))\n\tnewAvg, success := cf.readPool.updateAvg(c.logger, bufCap)\n\tif !success {\n\t\tcf.readPool.resetAndPut(f, false, 0)\n\t\treturn\n\t}\n\tcf.readPool.resetAndPut(f, true, fpShrinkSize(bufCap, newAvg))\n}\n\n// releaseWriteFramer returns a request-builder framer to the writer pool.\nfunc (c *Conn) releaseWriteFramer(f *framer) {\n\tc.framers.releaseWrite(f)\n}\n\nfunc (cf *connFramers) releaseWrite(f *framer) {\n\tif f == nil {\n\t\treturn\n\t}\n\tif f.released.Swap(true) {\n\t\treturn\n\t}\n\tf.header = nil\n\tf.traceID = nil\n\tf.customPayload = nil\n\tf.flags = cf.defaults.flags\n\tif !cf.writePool.enabled.Load() {\n\t\treturn\n\t}\n\tbufCap := int64(cap(f.buf))\n\tnewAvg, success := cf.writePool.updateAvg(nil, bufCap)\n\tif !success {\n\t\tcf.writePool.resetAndPut(f, false, 0)\n\t\treturn\n\t}\n\tcf.writePool.resetAndPut(f, false, fpShrinkSize(bufCap, newAvg))\n}\n\nfunc (cf *connFramers) close() {\n\tcf.readPool.close()\n\tcf.writePool.close()\n}\n\nfunc (fp *framerPool) init(defaults framerConfig, release func(*framer)) {\n\tfp.bufAvgSize.Store(int64(defaultBufSize))\n\tfp.enabled.Store(true)\n\tfp.pool = sync.Pool{\n\t\tNew: func() any {\n\t\t\tbuf := make([]byte, defaultBufSize)\n\t\t\tf := &framer{\n\t\t\t\tbuf:                   buf[:0],\n\t\t\t\treadBuffer:            buf,\n\t\t\t\tcompressor:            defaults.compressor,\n\t\t\t\tproto:                 defaults.proto,\n\t\t\t\tflags:                 defaults.flags,\n\t\t\t\tflagLWT:               defaults.flagLWT,\n\t\t\t\trateLimitingErrorCode: defaults.rateLimitingErrorCode,\n\t\t\t\ttabletsRoutingV1:      defaults.tabletsRoutingV1,\n\t\t\t}\n\t\t\tf.release = func() { release(f) }\n\t\t\treturn f\n\t\t},\n\t}\n}\n\nfunc (fp *framerPool) get(c *Conn) *framer {\n\tif !fp.enabled.Load() {\n\t\treturn newFramer(c.compressor, c.version)\n\t}\n\treturn fp.pool.Get().(*framer)\n}\n\nfunc (fp *framerPool) put(f *framer) {\n\tif !fp.enabled.Load() {\n\t\treturn\n\t}\n\tfp.pool.Put(f)\n}\n\nfunc (fp *framerPool) close() {\n\tfp.enabled.Store(false)\n}\n\nfunc (fp *framerPool) updateAvg(logger StdLogger, bufCap int64) (int64, bool) {\n\tif bufCap > maxReasonableBufferSize {\n\t\tbufCap = maxReasonableBufferSize\n\t}\n\tif bufCap < 0 {\n\t\tbufCap = defaultBufSize\n\t}\n\n\tfor i := 0; i < maxCASRetries; i++ {\n\t\tavg := fp.bufAvgSize.Load()\n\t\t// EWMA update with upward-biased rounding: the +framerBufEWMAWeight/2 term\n\t\t// biases the integer division toward ceiling for all deltas. This means:\n\t\t// - When bufCap > avg (growth): the average increases slightly faster\n\t\t// - When bufCap < avg (shrink): the average decreases slightly slower\n\t\t// Both effects are intentional — favoring larger buffers reduces\n\t\t// reallocation churn at the cost of slightly more memory.\n\t\t// In practice, this means the steady-state EWMA settles ~framerBufEWMAWeight/2\n\t\t// bytes above the true average when tracking decreasing buffer sizes.\n\t\tnewAvg := avg + (bufCap-avg+framerBufEWMAWeight/2)/framerBufEWMAWeight\n\t\tif fp.bufAvgSize.CompareAndSwap(avg, newAvg) {\n\t\t\treturn newAvg, true\n\t\t}\n\t}\n\n\tif logger != nil {\n\t\tlogger.Printf(\"gocql: EWMA update failed after %d retries, skipping shrink decision\", maxCASRetries)\n\t}\n\treturn fp.bufAvgSize.Load(), false\n}\n\nfunc fpShrinkSize(bufCap, newAvg int64) int64 {\n\t// If this framer's buffer is much larger than the running average,\n\t// reallocate it to prevent a single large query from permanently\n\t// bloating all pooled framers.\n\tif bufCap <= newAvg*framerBufShrinkThreshold {\n\t\treturn 0\n\t}\n\tif newAvg < defaultBufSize {\n\t\treturn defaultBufSize\n\t}\n\treturn newAvg\n}\n\nfunc (fp *framerPool) resetAndPut(f *framer, alignBufWithReadBuffer bool, shrinkSize int64) {\n\tif shrinkSize > 0 {\n\t\tbuf := make([]byte, shrinkSize)\n\t\tf.readBuffer = buf\n\t\tf.buf = buf[:0]\n\t\tfp.put(f)\n\t\treturn\n\t}\n\tif alignBufWithReadBuffer {\n\t\tf.buf = f.readBuffer[:0]\n\t} else {\n\t\tf.buf = f.buf[:0]\n\t}\n\tfp.put(f)\n}\n"
  },
  {
    "path": "framer_bench_test.go",
    "content": "//go:build bench\n// +build bench\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"compress/gzip\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc readGzipData(path string) ([]byte, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tr, err := gzip.NewReader(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\treturn io.ReadAll(r)\n}\n\nfunc BenchmarkParseRowsFrame(b *testing.B) {\n\tdata, err := readGzipData(\"testdata/frames/bench_parse_result.gz\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tframer := &framer{\n\t\t\theader: &frameHeader{\n\t\t\t\tversion: protoVersion4 | 0x80,\n\t\t\t\top:      frm.OpResult,\n\t\t\t\tlength:  len(data),\n\t\t\t},\n\t\t\tbuf: data,\n\t\t}\n\n\t\t_, err = framer.parseFrame()\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "go.mod",
    "content": "//\n// Licensed to the Apache Software Foundation (ASF) under one\n// or more contributor license agreements.  See the NOTICE file\n// distributed with this work for additional information\n// regarding copyright ownership.  The ASF licenses this file\n// to you under the Apache License, Version 2.0 (the\n// \"License\"); you may not use this file except in compliance\n// with the License.  You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\nmodule github.com/gocql/gocql\n\nrequire (\n\tgithub.com/google/go-cmp v0.7.0\n\tgithub.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed\n\tgithub.com/klauspost/compress v1.18.5\n\tgolang.org/x/net v0.53.0\n\tgolang.org/x/sync v0.20.0\n\tgopkg.in/inf.v0 v0.9.1\n\tsigs.k8s.io/yaml v1.6.0\n)\n\nrequire (\n\tgithub.com/davecgh/go-spew v1.1.1 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.0 // indirect\n\tgo.yaml.in/yaml/v2 v2.4.3 // indirect\n\tgopkg.in/yaml.v3 v3.0.1 // indirect\n)\n\nrequire (\n\tgithub.com/bitly/go-hostpool v0.1.1 // indirect\n\tgithub.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect\n\tgithub.com/google/uuid v1.6.0\n\tgithub.com/kr/pretty v0.3.1 // indirect\n\tgithub.com/stretchr/testify v1.11.1\n)\n\nretract (\n\tv1.10.0 // tag from kiwicom/gocql added by mistake to scylladb/gocql\n\tv1.9.0 // tag from kiwicom/gocql added by mistake to scylladb/gocql\n\tv1.8.1 // tag from kiwicom/gocql added by mistake to scylladb/gocql\n\tv1.8.0 // tag from kiwicom/gocql added by mistake to scylladb/gocql\n)\n\ngo 1.25.0\n"
  },
  {
    "path": "go.sum",
    "content": "github.com/bitly/go-hostpool v0.1.1 h1:SsovT4BFqgJQBAESkk2QgeeL7bqKq9oJie8JnD00R+Q=\ngithub.com/bitly/go-hostpool v0.1.1/go.mod h1:iwXQOF7+y3cO8vituSqGpBYf02TYTzxK4S2c4rf4cJs=\ngithub.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=\ngithub.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=\ngithub.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=\ngithub.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=\ngithub.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=\ngithub.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=\ngithub.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngo.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=\ngo.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=\ngo.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE=\ngo.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=\ngolang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA=\ngolang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs=\ngolang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=\ngolang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=\ngopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\nsigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=\nsigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=\n"
  },
  {
    "path": "helpers.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in/inf.v0\"\n)\n\ntype RowData struct {\n\tColumns []string\n\tValues  []any\n}\n\n// asVectorType attempts to convert a NativeType(custom) which represents a VectorType\n// into a concrete VectorType. It also works recursively (nested vectors).\nfunc asVectorType(t TypeInfo) (VectorType, bool) {\n\tif v, ok := t.(VectorType); ok {\n\t\treturn v, true\n\t}\n\tn, ok := t.(NativeType)\n\tif !ok || n.Type() != TypeCustom {\n\t\treturn VectorType{}, false\n\t}\n\tconst vectorTypePrefix = apacheCassandraTypePrefix + \"VectorType\"\n\tif !strings.HasPrefix(n.Custom(), vectorTypePrefix+\"(\") {\n\t\treturn VectorType{}, false\n\t}\n\n\tspec := strings.TrimPrefix(n.Custom(), vectorTypePrefix)\n\tspec = strings.Trim(spec, \"()\")\n\t// split last comma -> subtype spec , dimensions\n\tidx := strings.LastIndex(spec, \",\")\n\tif idx <= 0 {\n\t\treturn VectorType{}, false\n\t}\n\tsubStr := strings.TrimSpace(spec[:idx])\n\tdimStr := strings.TrimSpace(spec[idx+1:])\n\tdim, err := strconv.Atoi(dimStr)\n\tif err != nil {\n\t\treturn VectorType{}, false\n\t}\n\tsubType := getCassandraLongType(subStr, n.Version(), nopLogger{})\n\t// recurse if subtype itself is still a custom vector\n\tif innerVec, ok := asVectorType(subType); ok {\n\t\tsubType = innerVec\n\t}\n\treturn VectorType{\n\t\tNativeType: NewCustomType(n.Version(), TypeCustom, vectorTypePrefix),\n\t\tSubType:    subType,\n\t\tDimensions: dim,\n\t}, true\n}\n\nfunc goType(t TypeInfo) (reflect.Type, error) {\n\tswitch t.Type() {\n\tcase TypeVarchar, TypeAscii, TypeInet, TypeText:\n\t\treturn reflect.TypeOf(*new(string)), nil\n\tcase TypeBigInt, TypeCounter:\n\t\treturn reflect.TypeOf(*new(int64)), nil\n\tcase TypeTime:\n\t\treturn reflect.TypeOf(*new(time.Duration)), nil\n\tcase TypeTimestamp:\n\t\treturn reflect.TypeOf(*new(time.Time)), nil\n\tcase TypeBlob:\n\t\treturn reflect.TypeOf(*new([]byte)), nil\n\tcase TypeBoolean:\n\t\treturn reflect.TypeOf(*new(bool)), nil\n\tcase TypeFloat:\n\t\treturn reflect.TypeOf(*new(float32)), nil\n\tcase TypeDouble:\n\t\treturn reflect.TypeOf(*new(float64)), nil\n\tcase TypeInt:\n\t\treturn reflect.TypeOf(*new(int)), nil\n\tcase TypeSmallInt:\n\t\treturn reflect.TypeOf(*new(int16)), nil\n\tcase TypeTinyInt:\n\t\treturn reflect.TypeOf(*new(int8)), nil\n\tcase TypeDecimal:\n\t\treturn reflect.TypeOf(*new(*inf.Dec)), nil\n\tcase TypeUUID, TypeTimeUUID:\n\t\treturn reflect.TypeOf(*new(UUID)), nil\n\tcase TypeList, TypeSet:\n\t\telemType, err := goType(t.(CollectionType).Elem)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn reflect.SliceOf(elemType), nil\n\tcase TypeMap:\n\t\tkeyType, err := goType(t.(CollectionType).Key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvalueType, err := goType(t.(CollectionType).Elem)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn reflect.MapOf(keyType, valueType), nil\n\tcase TypeVarint:\n\t\treturn reflect.TypeOf(*new(*big.Int)), nil\n\tcase TypeTuple:\n\t\t// what can we do here? all there is to do is to make a list of any\n\t\ttuple := t.(TupleTypeInfo)\n\t\treturn reflect.TypeOf(make([]any, len(tuple.Elems))), nil\n\tcase TypeUDT:\n\t\treturn reflect.TypeOf(make(map[string]any)), nil\n\tcase TypeDate:\n\t\treturn reflect.TypeOf(*new(time.Time)), nil\n\tcase TypeDuration:\n\t\treturn reflect.TypeOf(*new(Duration)), nil\n\tcase TypeCustom:\n\t\t// Handle VectorType encoded as custom\n\t\tif vec, ok := asVectorType(t); ok {\n\t\t\tinnerPtr, err := vec.SubType.NewWithError()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\telemType := reflect.TypeOf(innerPtr)\n\t\t\tif elemType.Kind() == reflect.Ptr {\n\t\t\t\telemType = elemType.Elem()\n\t\t\t}\n\t\t\treturn reflect.SliceOf(elemType), nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"cannot create Go type for unknown CQL type %s\", t)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"cannot create Go type for unknown CQL type %s\", t)\n\t}\n}\n\nfunc dereference(i any) any {\n\t// Fast path: avoid reflect for the common pointer types returned by\n\t// NativeType.NewWithError and used in RowData/MapScan.\n\tswitch v := i.(type) {\n\tcase *string:\n\t\treturn *v\n\tcase *int:\n\t\treturn *v\n\tcase *int64:\n\t\treturn *v\n\tcase *int32:\n\t\treturn *v\n\tcase *int16:\n\t\treturn *v\n\tcase *int8:\n\t\treturn *v\n\tcase *float64:\n\t\treturn *v\n\tcase *float32:\n\t\treturn *v\n\tcase *bool:\n\t\treturn *v\n\tcase *[]byte:\n\t\treturn *v\n\tcase *time.Time:\n\t\treturn *v\n\tcase *time.Duration:\n\t\treturn *v\n\tcase *UUID:\n\t\treturn *v\n\tcase *Duration:\n\t\treturn *v\n\tcase *inf.Dec:\n\t\treturn *v\n\tcase *big.Int:\n\t\treturn *v\n\tcase *[]any:\n\t\treturn *v\n\tcase *map[string]any:\n\t\treturn *v\n\tdefault:\n\t\treturn reflect.Indirect(reflect.ValueOf(i)).Interface()\n\t}\n}\n\n// TODO: Cover with unit tests.\n// Parses long Java-style type definition to internal data structures.\nfunc getCassandraLongType(name string, protoVer byte, logger StdLogger) TypeInfo {\n\tconst prefix = apacheCassandraTypePrefix\n\tif strings.HasPrefix(name, prefix+\"SetType\") {\n\t\treturn CollectionType{\n\t\t\tNativeType: NewNativeType(protoVer, TypeSet),\n\t\t\tElem:       getCassandraLongType(unwrapCompositeTypeDefinition(name, prefix+\"SetType\", '('), protoVer, logger),\n\t\t}\n\t} else if strings.HasPrefix(name, prefix+\"ListType\") {\n\t\treturn CollectionType{\n\t\t\tNativeType: NewNativeType(protoVer, TypeList),\n\t\t\tElem:       getCassandraLongType(unwrapCompositeTypeDefinition(name, prefix+\"ListType\", '('), protoVer, logger),\n\t\t}\n\t} else if strings.HasPrefix(name, prefix+\"MapType\") {\n\t\tnames := splitJavaCompositeTypes(name, prefix+\"MapType\")\n\t\tif len(names) != 2 {\n\t\t\tlogger.Printf(\"gocql: error parsing map type, it has %d subelements, expecting 2\\n\", len(names))\n\t\t\treturn NewNativeType(protoVer, TypeCustom)\n\t\t}\n\t\treturn CollectionType{\n\t\t\tNativeType: NewNativeType(protoVer, TypeMap),\n\t\t\tKey:        getCassandraLongType(names[0], protoVer, logger),\n\t\t\tElem:       getCassandraLongType(names[1], protoVer, logger),\n\t\t}\n\t} else if strings.HasPrefix(name, prefix+\"TupleType\") {\n\t\tnames := splitJavaCompositeTypes(name, prefix+\"TupleType\")\n\t\ttypes := make([]TypeInfo, len(names))\n\n\t\tfor i, name := range names {\n\t\t\ttypes[i] = getCassandraLongType(name, protoVer, logger)\n\t\t}\n\n\t\treturn TupleTypeInfo{\n\t\t\tNativeType: NewNativeType(protoVer, TypeTuple),\n\t\t\tElems:      types,\n\t\t}\n\t} else if strings.HasPrefix(name, prefix+\"UserType\") {\n\t\tnames := splitJavaCompositeTypes(name, prefix+\"UserType\")\n\t\tfields := make([]UDTField, len(names)-2)\n\n\t\tfor i := 2; i < len(names); i++ {\n\t\t\tspec := strings.Split(names[i], \":\")\n\t\t\tfieldName, _ := hex.DecodeString(spec[0])\n\t\t\tfields[i-2] = UDTField{\n\t\t\t\tName: string(fieldName),\n\t\t\t\tType: getCassandraLongType(spec[1], protoVer, logger),\n\t\t\t}\n\t\t}\n\n\t\tudtName, _ := hex.DecodeString(names[1])\n\t\treturn UDTTypeInfo{\n\t\t\tNativeType: NewNativeType(protoVer, TypeUDT),\n\t\t\tKeySpace:   names[0],\n\t\t\tName:       string(udtName),\n\t\t\tElements:   fields,\n\t\t}\n\t} else if strings.HasPrefix(name, prefix+\"VectorType\") {\n\t\tnames := splitJavaCompositeTypes(name, prefix+\"VectorType\")\n\t\tsubType := getCassandraLongType(strings.TrimSpace(names[0]), protoVer, logger)\n\t\tdim, err := strconv.Atoi(strings.TrimSpace(names[1]))\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"gocql: error parsing vector dimensions: %v\\n\", err)\n\t\t\treturn NewNativeType(protoVer, TypeCustom)\n\t\t}\n\n\t\treturn VectorType{\n\t\t\tNativeType: NewCustomType(protoVer, TypeCustom, prefix+\"VectorType\"),\n\t\t\tSubType:    subType,\n\t\t\tDimensions: dim,\n\t\t}\n\t} else if strings.HasPrefix(name, prefix+\"FrozenType\") {\n\t\tnames := splitJavaCompositeTypes(name, prefix+\"FrozenType\")\n\t\treturn getCassandraLongType(strings.TrimSpace(names[0]), protoVer, logger)\n\t} else {\n\t\t// basic type\n\t\treturn NativeType{\n\t\t\tproto: protoVer,\n\t\t\ttyp:   getApacheCassandraType(name),\n\t\t}\n\t}\n}\n\nfunc splitJavaCompositeTypes(name string, typeName string) []string {\n\treturn splitCompositeTypes(name, typeName, '(', ')')\n}\n\nfunc unwrapCompositeTypeDefinition(name string, typeName string, typeOpen int32) string {\n\treturn strings.TrimPrefix(name[:len(name)-1], typeName+string(typeOpen))\n}\n\nfunc splitCompositeTypes(name string, typeName string, typeOpen int32, typeClose int32) []string {\n\tdef := unwrapCompositeTypeDefinition(name, typeName, typeOpen)\n\tif !strings.Contains(def, string(typeOpen)) {\n\t\tparts := strings.Split(def, \",\")\n\t\tfor i := range parts {\n\t\t\tparts[i] = strings.TrimSpace(parts[i])\n\t\t}\n\t\treturn parts\n\t}\n\tvar parts []string\n\tlessCount := 0\n\tsegment := \"\"\n\tfor _, char := range def {\n\t\tif char == ',' && lessCount == 0 {\n\t\t\tif segment != \"\" {\n\t\t\t\tparts = append(parts, strings.TrimSpace(segment))\n\t\t\t}\n\t\t\tsegment = \"\"\n\t\t\tcontinue\n\t\t}\n\t\tsegment += string(char)\n\t\tif char == typeOpen {\n\t\t\tlessCount++\n\t\t} else if char == typeClose {\n\t\t\tlessCount--\n\t\t}\n\t}\n\tif segment != \"\" {\n\t\tparts = append(parts, strings.TrimSpace(segment))\n\t}\n\treturn parts\n}\n\nfunc getApacheCassandraType(class string) Type {\n\tswitch strings.TrimPrefix(class, apacheCassandraTypePrefix) {\n\tcase \"AsciiType\":\n\t\treturn TypeAscii\n\tcase \"LongType\":\n\t\treturn TypeBigInt\n\tcase \"BytesType\":\n\t\treturn TypeBlob\n\tcase \"BooleanType\":\n\t\treturn TypeBoolean\n\tcase \"CounterColumnType\":\n\t\treturn TypeCounter\n\tcase \"DecimalType\":\n\t\treturn TypeDecimal\n\tcase \"DoubleType\":\n\t\treturn TypeDouble\n\tcase \"FloatType\":\n\t\treturn TypeFloat\n\tcase \"Int32Type\":\n\t\treturn TypeInt\n\tcase \"ShortType\":\n\t\treturn TypeSmallInt\n\tcase \"ByteType\":\n\t\treturn TypeTinyInt\n\tcase \"TimeType\":\n\t\treturn TypeTime\n\tcase \"DateType\", \"TimestampType\":\n\t\treturn TypeTimestamp\n\tcase \"UUIDType\", \"LexicalUUIDType\":\n\t\treturn TypeUUID\n\tcase \"UTF8Type\":\n\t\treturn TypeVarchar\n\tcase \"IntegerType\":\n\t\treturn TypeVarint\n\tcase \"TimeUUIDType\":\n\t\treturn TypeTimeUUID\n\tcase \"InetAddressType\":\n\t\treturn TypeInet\n\tcase \"MapType\":\n\t\treturn TypeMap\n\tcase \"ListType\":\n\t\treturn TypeList\n\tcase \"SetType\":\n\t\treturn TypeSet\n\tcase \"TupleType\":\n\t\treturn TypeTuple\n\tcase \"DurationType\":\n\t\treturn TypeDuration\n\tcase \"SimpleDateType\":\n\t\treturn TypeDate\n\tcase \"UserType\":\n\t\treturn TypeUDT\n\tdefault:\n\t\treturn TypeCustom\n\t}\n}\n\nfunc (r *RowData) rowMap(m map[string]any) {\n\tfor i, column := range r.Columns {\n\t\tval := dereference(r.Values[i])\n\t\tif valVal := reflect.ValueOf(val); valVal.Kind() == reflect.Slice && !valVal.IsNil() {\n\t\t\tvalCopy := reflect.MakeSlice(valVal.Type(), valVal.Len(), valVal.Cap())\n\t\t\treflect.Copy(valCopy, valVal)\n\t\t\tm[column] = valCopy.Interface()\n\t\t} else {\n\t\t\tm[column] = val\n\t\t}\n\t}\n}\n\n// TupeColumnName will return the column name of a tuple value in a column named\n// c at index n. It should be used if a specific element within a tuple is needed\n// to be extracted from a map returned from SliceMap or MapScan.\nfunc TupleColumnName(c string, n int) string {\n\treturn fmt.Sprintf(\"%s[%d]\", c, n)\n}\n\n// RowData returns the RowData for the iterator.\nfunc (iter *Iter) RowData() (RowData, error) {\n\tif iter.err != nil {\n\t\treturn RowData{}, iter.err\n\t}\n\n\tcolumns, err := iter.getScanColumns()\n\tif err != nil {\n\t\treturn RowData{}, err\n\t}\n\n\tvalues, err := iter.newScanValues()\n\tif err != nil {\n\t\treturn RowData{}, err\n\t}\n\n\treturn RowData{\n\t\tColumns: columns,\n\t\tValues:  values,\n\t}, nil\n}\n\n// getScanColumns returns the cached column names for this iterator,\n// computing them on the first call. Column names don't change between\n// rows, so they are computed once and reused.\n//\n// The returned slice is shared across all callers and must not be mutated.\nfunc (iter *Iter) getScanColumns() ([]string, error) {\n\tif iter.scanColumns != nil {\n\t\treturn iter.scanColumns, nil\n\t}\n\n\tactualSize := iter.meta.actualColCount\n\tcolumns := make([]string, actualSize)\n\tidx := 0\n\tfor _, column := range iter.Columns() {\n\t\tif c, ok := column.TypeInfo.(TupleTypeInfo); !ok {\n\t\t\tif idx >= actualSize {\n\t\t\t\terr := fmt.Errorf(\"gocql: column count overflow in RowData: metadata predicted %d columns but encountered more\", actualSize)\n\t\t\t\titer.err = err\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcolumns[idx] = column.Name\n\t\t\tidx++\n\t\t} else {\n\t\t\tfor i := range c.Elems {\n\t\t\t\tif idx >= actualSize {\n\t\t\t\t\terr := fmt.Errorf(\"gocql: column count overflow in RowData: metadata predicted %d columns but encountered more\", actualSize)\n\t\t\t\t\titer.err = err\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tcolumns[idx] = TupleColumnName(column.Name, i)\n\t\t\t\tidx++\n\t\t\t}\n\t\t}\n\t}\n\n\tif idx != actualSize {\n\t\terr := fmt.Errorf(\"gocql: column count mismatch in RowData: metadata predicted %d columns but got %d\", actualSize, idx)\n\t\titer.err = err\n\t\treturn nil, err\n\t}\n\n\titer.scanColumns = columns\n\treturn columns, nil\n}\n\n// newScanValues allocates fresh zero-value pointers for each column,\n// suitable for passing to Scan. Values must be freshly allocated each\n// call because Scan mutates them.\nfunc (iter *Iter) newScanValues() ([]any, error) {\n\tactualSize := iter.meta.actualColCount\n\tvalues := make([]any, actualSize)\n\tidx := 0\n\tfor _, column := range iter.Columns() {\n\t\tif c, ok := column.TypeInfo.(TupleTypeInfo); !ok {\n\t\t\tif idx >= actualSize {\n\t\t\t\terr := fmt.Errorf(\"gocql: column count overflow in newScanValues: metadata predicted %d columns but encountered more\", actualSize)\n\t\t\t\titer.err = err\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tval, err := column.TypeInfo.NewWithError()\n\t\t\tif err != nil {\n\t\t\t\titer.err = err\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tvalues[idx] = val\n\t\t\tidx++\n\t\t} else {\n\t\t\tfor _, elem := range c.Elems {\n\t\t\t\tif idx >= actualSize {\n\t\t\t\t\terr := fmt.Errorf(\"gocql: column count overflow in newScanValues: metadata predicted %d columns but encountered more\", actualSize)\n\t\t\t\t\titer.err = err\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tval, err := elem.NewWithError()\n\t\t\t\tif err != nil {\n\t\t\t\t\titer.err = err\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tvalues[idx] = val\n\t\t\t\tidx++\n\t\t\t}\n\t\t}\n\t}\n\n\tif idx != actualSize {\n\t\terr := fmt.Errorf(\"gocql: column count mismatch in newScanValues: metadata predicted %d columns but got %d\", actualSize, idx)\n\t\titer.err = err\n\t\treturn nil, err\n\t}\n\n\treturn values, nil\n}\n\n// TODO(zariel): is it worth exporting this?\nfunc (iter *Iter) rowMap() (map[string]any, error) {\n\tif iter.err != nil {\n\t\treturn nil, iter.err\n\t}\n\n\trowData, err := iter.RowData()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titer.Scan(rowData.Values...)\n\tm := make(map[string]any, len(rowData.Columns))\n\trowData.rowMap(m)\n\treturn m, nil\n}\n\n// SliceMap is a helper function to make the API easier to use.\n// It consumes the remaining rows, closes the iterator, and returns the data\n// in the form of []map[string]any.\nfunc (iter *Iter) SliceMap() ([]map[string]any, error) {\n\tdefer iter.Close()\n\n\tif iter.err != nil {\n\t\treturn nil, iter.err\n\t}\n\n\t// Not checking for the error because we just did\n\trowData, err := iter.RowData()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdataToReturn := make([]map[string]any, 0)\n\tfor iter.Scan(rowData.Values...) {\n\t\tm := make(map[string]any, len(rowData.Columns))\n\t\trowData.rowMap(m)\n\t\tdataToReturn = append(dataToReturn, m)\n\t}\n\tif iter.err != nil {\n\t\treturn nil, iter.err\n\t}\n\treturn dataToReturn, nil\n}\n\n// MapScan takes a map[string]any and populates it with a row\n// that is returned from cassandra.\n//\n// Each call to MapScan() must be called with a new map object.\n// During the call to MapScan() any pointers in the existing map\n// are replaced with non pointer types before the call returns\n//\n//\titer := session.Query(`SELECT * FROM mytable`).Iter()\n//\tfor {\n//\t\t// New map each iteration\n//\t\trow := make(map[string]any)\n//\t\tif !iter.MapScan(row) {\n//\t\t\tbreak\n//\t\t}\n//\t\t// Do things with row\n//\t\tif fullname, ok := row[\"fullname\"]; ok {\n//\t\t\tfmt.Printf(\"Full Name: %s\\n\", fullname)\n//\t\t}\n//\t}\n//\tif err := iter.Close(); err != nil {\n//\t\treturn err\n//\t}\n//\n// You can also pass pointers in the map before each call\n//\n//\tvar fullName FullName // Implements gocql.Unmarshaler and gocql.Marshaler interfaces\n//\tvar address net.IP\n//\tvar age int\n//\titer := session.Query(`SELECT * FROM scan_map_table`).Iter()\n//\tfor {\n//\t\t// New map each iteration\n//\t\trow := map[string]any{\n//\t\t\t\"fullname\": &fullName,\n//\t\t\t\"age\":      &age,\n//\t\t\t\"address\":  &address,\n//\t\t}\n//\t\tif !iter.MapScan(row) {\n//\t\t\tbreak\n//\t\t}\n//\t\tfmt.Printf(\"First: %s Age: %d Address: %q\\n\", fullName.FirstName, age, address)\n//\t}\n//\tif err := iter.Close(); err != nil {\n//\t\treturn err\n//\t}\nfunc (iter *Iter) MapScan(m map[string]any) bool {\n\tif iter.err != nil {\n\t\treturn false\n\t}\n\n\trowData, err := iter.RowData()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor i, col := range rowData.Columns {\n\t\tif dest, ok := m[col]; ok {\n\t\t\trowData.Values[i] = dest\n\t\t}\n\t}\n\n\tif iter.Scan(rowData.Values...) {\n\t\trowData.rowMap(m)\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc copyBytes(p []byte) []byte {\n\tb := make([]byte, len(p))\n\tcopy(b, p)\n\treturn b\n}\n"
  },
  {
    "path": "helpers_bench_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\n// createMockIter creates a mock iterator with the specified number of simple columns\nfunc createMockIter(numColumns int) *Iter {\n\tcolumns := make([]ColumnInfo, numColumns)\n\tfor i := 0; i < numColumns; i++ {\n\t\tcolumns[i] = ColumnInfo{\n\t\t\tKeyspace: \"test_keyspace\",\n\t\t\tTable:    \"test_table\",\n\t\t\tName:     fmt.Sprintf(\"column_%d\", i),\n\t\t\tTypeInfo: NativeType{typ: TypeInt, proto: protoVersion4},\n\t\t}\n\t}\n\n\treturn &Iter{\n\t\tmeta: resultMetadata{\n\t\t\tcolumns:        columns,\n\t\t\tcolCount:       numColumns,\n\t\t\tactualColCount: numColumns,\n\t\t},\n\t\tnumRows: 1,\n\t}\n}\n\n// createMockIterWithTypes creates a mock iterator with varied column types\nfunc createMockIterWithTypes() *Iter {\n\tcolumns := []ColumnInfo{\n\t\t{Name: \"id\", TypeInfo: NativeType{typ: TypeInt, proto: protoVersion4}},\n\t\t{Name: \"name\", TypeInfo: NativeType{typ: TypeVarchar, proto: protoVersion4}},\n\t\t{Name: \"created\", TypeInfo: NativeType{typ: TypeTimestamp, proto: protoVersion4}},\n\t\t{Name: \"score\", TypeInfo: NativeType{typ: TypeBigInt, proto: protoVersion4}},\n\t\t{Name: \"active\", TypeInfo: NativeType{typ: TypeBoolean, proto: protoVersion4}},\n\t\t{Name: \"data\", TypeInfo: NativeType{typ: TypeBlob, proto: protoVersion4}},\n\t\t{Name: \"uuid\", TypeInfo: NativeType{typ: TypeUUID, proto: protoVersion4}},\n\t\t{Name: \"value\", TypeInfo: NativeType{typ: TypeDouble, proto: protoVersion4}},\n\t\t{Name: \"count\", TypeInfo: NativeType{typ: TypeCounter, proto: protoVersion4}},\n\t\t{Name: \"text\", TypeInfo: NativeType{typ: TypeText, proto: protoVersion4}},\n\t}\n\n\treturn &Iter{\n\t\tmeta: resultMetadata{\n\t\t\tcolumns:        columns,\n\t\t\tcolCount:       len(columns),\n\t\t\tactualColCount: len(columns),\n\t\t},\n\t\tnumRows: 1,\n\t}\n}\n\n// createMockIterWithTuples creates a mock iterator with tuple columns\nfunc createMockIterWithTuples() *Iter {\n\t// Create a tuple with 3 elements\n\ttupleElems := []TypeInfo{\n\t\tNativeType{typ: TypeInt, proto: protoVersion4},\n\t\tNativeType{typ: TypeVarchar, proto: protoVersion4},\n\t\tNativeType{typ: TypeTimestamp, proto: protoVersion4},\n\t}\n\n\tcolumns := []ColumnInfo{\n\t\t{Name: \"id\", TypeInfo: NativeType{typ: TypeInt, proto: protoVersion4}},\n\t\t{Name: \"coords\", TypeInfo: TupleTypeInfo{\n\t\t\tNativeType: NativeType{typ: TypeTuple, proto: protoVersion4},\n\t\t\tElems:      tupleElems,\n\t\t}},\n\t\t{Name: \"name\", TypeInfo: NativeType{typ: TypeVarchar, proto: protoVersion4}},\n\t}\n\n\t// actualColCount accounts for tuple expansion: 1 (id) + 3 (tuple elements) + 1 (name) = 5\n\tactualColCount := 1 + len(tupleElems) + 1\n\n\treturn &Iter{\n\t\tmeta: resultMetadata{\n\t\t\tcolumns:        columns,\n\t\t\tcolCount:       len(columns),\n\t\t\tactualColCount: actualColCount,\n\t\t},\n\t\tnumRows: 1,\n\t}\n}\n\n// BenchmarkRowData measures the performance of RowData() with simple columns\nfunc BenchmarkRowData(b *testing.B) {\n\titer := createMockIter(10)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\trd, err := iter.RowData()\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\t_ = rd\n\t}\n}\n\n// BenchmarkRowDataSmall measures performance with few columns (typical for narrow tables)\nfunc BenchmarkRowDataSmall(b *testing.B) {\n\titer := createMockIter(3)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\trd, err := iter.RowData()\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\t_ = rd\n\t}\n}\n\n// BenchmarkRowDataLarge measures performance with many columns (wide tables)\nfunc BenchmarkRowDataLarge(b *testing.B) {\n\titer := createMockIter(50)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\trd, err := iter.RowData()\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\t_ = rd\n\t}\n}\n\n// BenchmarkRowDataWithTypes measures performance with varied column types\nfunc BenchmarkRowDataWithTypes(b *testing.B) {\n\titer := createMockIterWithTypes()\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\trd, err := iter.RowData()\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\t_ = rd\n\t}\n}\n\n// BenchmarkRowDataWithTuples measures performance with tuple columns\nfunc BenchmarkRowDataWithTuples(b *testing.B) {\n\titer := createMockIterWithTuples()\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\trd, err := iter.RowData()\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\t_ = rd\n\t}\n}\n\n// BenchmarkRowDataRepeated simulates MapScan calling RowData repeatedly\nfunc BenchmarkRowDataRepeated(b *testing.B) {\n\titer := createMockIter(10)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\t// Simulate 100 rows being scanned with MapScan\n\t\tfor j := 0; j < 100; j++ {\n\t\t\trd, err := iter.RowData()\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\t_ = rd\n\t\t}\n\t}\n}\n\n// BenchmarkRowDataAllocation focuses on allocation patterns\nfunc BenchmarkRowDataAllocation(b *testing.B) {\n\tbenchmarks := []struct {\n\t\titer *Iter\n\t\tname string\n\t}{\n\t\t{name: \"10cols\", iter: createMockIter(10)},\n\t\t{name: \"100cols\", iter: createMockIter(100)},\n\t\t{name: \"1000cols\", iter: createMockIter(1000)},\n\t\t{name: \"WithTuples\", iter: createMockIterWithTuples()},\n\t}\n\n\tfor _, bm := range benchmarks {\n\t\tb.Run(bm.name, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\trd, err := bm.iter.RowData()\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\t_ = rd\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkDereference measures the fast-path vs reflect performance of dereference().\nfunc BenchmarkDereference(b *testing.B) {\n\tn := 42\n\ts := \"hello\"\n\tu := TimeUUID()\n\n\tb.Run(\"int_ptr\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\t_ = dereference(&n)\n\t\t}\n\t})\n\n\tb.Run(\"string_ptr\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\t_ = dereference(&s)\n\t\t}\n\t})\n\n\tb.Run(\"uuid_ptr\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\t_ = dereference(&u)\n\t\t}\n\t})\n}\n\n// BenchmarkRowDataRepeatedCached measures the improvement from column name caching\n// when RowData is called repeatedly (as MapScan does per-row).\nfunc BenchmarkRowDataRepeatedCached(b *testing.B) {\n\titer := createMockIterWithTypes()\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\t// Simulate 100 rows being scanned with MapScan\n\t\tfor j := 0; j < 100; j++ {\n\t\t\trd, err := iter.RowData()\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\t_ = rd\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "host_source.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\nvar (\n\tErrCannotFindHost    = errors.New(\"cannot find host\")\n\tErrHostAlreadyExists = errors.New(\"host already exists\")\n)\n\ntype nodeState int32\n\nfunc (n nodeState) String() string {\n\tif n == NodeUp {\n\t\treturn \"UP\"\n\t} else if n == NodeDown {\n\t\treturn \"DOWN\"\n\t}\n\treturn fmt.Sprintf(\"UNKNOWN_%d\", n)\n}\n\nconst (\n\tNodeUp nodeState = iota\n\tNodeDown\n)\n\ntype cassVersion struct {\n\tQualifier string\n\tMajor     int\n\tMinor     int\n\tPatch     int\n}\n\nfunc (c *cassVersion) Set(v string) error {\n\tif v == \"\" {\n\t\treturn nil\n\t}\n\n\treturn c.UnmarshalCQL(nil, []byte(v))\n}\n\nfunc (c *cassVersion) UnmarshalCQL(info TypeInfo, data []byte) error {\n\treturn c.unmarshal(data)\n}\n\nfunc (c *cassVersion) unmarshal(data []byte) error {\n\tv := strings.SplitN(strings.TrimPrefix(strings.TrimSuffix(string(data), \"-SNAPSHOT\"), \"v\"), \".\", 3)\n\n\tif len(v) < 2 {\n\t\treturn fmt.Errorf(\"invalid version string: %s\", data)\n\t}\n\n\tvar err error\n\tc.Major, err = strconv.Atoi(v[0])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid major version %v: %v\", v[0], err)\n\t}\n\n\tif len(v) == 2 {\n\t\tvMinor := strings.SplitN(v[1], \"-\", 2)\n\t\tc.Minor, err = strconv.Atoi(vMinor[0])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid minor version %v: %v\", vMinor[0], err)\n\t\t}\n\t\tif len(vMinor) == 2 {\n\t\t\tc.Qualifier = vMinor[1]\n\t\t}\n\t\treturn nil\n\t}\n\n\tc.Minor, err = strconv.Atoi(v[1])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid minor version %v: %v\", v[1], err)\n\t}\n\n\tvPatch := strings.SplitN(v[2], \"-\", 2)\n\tc.Patch, err = strconv.Atoi(vPatch[0])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid patch version %v: %v\", vPatch[0], err)\n\t}\n\tif len(vPatch) == 2 {\n\t\tc.Qualifier = vPatch[1]\n\t}\n\treturn nil\n}\n\nfunc (c cassVersion) Before(major, minor, patch int) bool {\n\t// We're comparing us (cassVersion) with the provided version (major, minor, patch)\n\t// We return true if our version is lower (comes before) than the provided one.\n\tif c.Major < major {\n\t\treturn true\n\t} else if c.Major == major {\n\t\tif c.Minor < minor {\n\t\t\treturn true\n\t\t} else if c.Minor == minor && c.Patch < patch {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (c cassVersion) AtLeast(major, minor, patch int) bool {\n\treturn !c.Before(major, minor, patch)\n}\n\nfunc (c cassVersion) String() string {\n\tif c.Qualifier != \"\" {\n\t\treturn fmt.Sprintf(\"%d.%d.%d-%v\", c.Major, c.Minor, c.Patch, c.Qualifier)\n\t}\n\treturn fmt.Sprintf(\"v%d.%d.%d\", c.Major, c.Minor, c.Patch)\n}\n\nfunc (c cassVersion) nodeUpDelay() time.Duration {\n\tif c.Major >= 2 && c.Minor >= 2 {\n\t\t// CASSANDRA-8236\n\t\treturn 0\n\t}\n\n\treturn 10 * time.Second\n}\n\ntype AddressPort struct {\n\tAddress net.IP\n\tPort    uint16\n}\n\nfunc (a AddressPort) Equal(o AddressPort) bool {\n\treturn a.Address.Equal(o.Address) && a.Port == o.Port\n}\n\nfunc (a AddressPort) IsValid() bool {\n\treturn len(a.Address) != 0 && !a.Address.IsUnspecified() && a.Port != 0\n}\n\nfunc (a AddressPort) String() string {\n\treturn fmt.Sprintf(\"%s:%d\", a.Address, a.Port)\n}\n\nfunc (a AddressPort) ToNetAddr() string {\n\treturn net.JoinHostPort(a.Address.String(), strconv.Itoa(int(a.Port)))\n}\n\ntype translatedAddresses struct {\n\tCQL           AddressPort\n\tShardAware    AddressPort\n\tShardAwareTLS AddressPort\n}\n\nfunc (h translatedAddresses) Equal(o *translatedAddresses) bool {\n\treturn h.CQL.Equal(o.CQL) && h.ShardAware.Equal(o.ShardAware) && h.ShardAwareTLS.Equal(o.ShardAwareTLS)\n}\n\ntype HostInfoBuilder struct {\n\tTranslatedAddresses *translatedAddresses\n\tWorkload            string\n\tHostId              string\n\tSchemaVersion       string\n\tHostname            string\n\tClusterName         string\n\tPartitioner         string\n\tRack                string\n\tDseVersion          string\n\tDataCenter          string\n\tConnectAddress      net.IP\n\tBroadcastAddress    net.IP\n\tPreferredIP         net.IP\n\tRpcAddress          net.IP\n\tPeer                net.IP\n\tListenAddress       net.IP\n\tTokens              []string\n\tVersion             cassVersion\n\tPort                int\n}\n\nfunc (b HostInfoBuilder) Build() HostInfo {\n\tvar hostUUID UUID\n\tif b.HostId != \"\" {\n\t\tvar err error\n\t\thostUUID, err = ParseUUID(b.HostId)\n\t\tif err != nil {\n\t\t\t// Fall back: treat as opaque identifier (for tests with non-UUID strings).\n\t\t\tcopy(hostUUID[:], b.HostId)\n\t\t}\n\t}\n\treturn HostInfo{\n\t\tdseVersion:          b.DseVersion,\n\t\thostId:              hostUUID,\n\t\tdataCenter:          b.DataCenter,\n\t\tschemaVersion:       b.SchemaVersion,\n\t\thostname:            b.Hostname,\n\t\tclusterName:         b.ClusterName,\n\t\tpartitioner:         b.Partitioner,\n\t\track:                b.Rack,\n\t\tworkload:            b.Workload,\n\t\ttokens:              b.Tokens,\n\t\tpreferredIP:         b.PreferredIP,\n\t\tbroadcastAddress:    b.BroadcastAddress,\n\t\trpcAddress:          b.RpcAddress,\n\t\tconnectAddress:      b.ConnectAddress,\n\t\tlistenAddress:       b.ListenAddress,\n\t\ttranslatedAddresses: b.TranslatedAddresses,\n\t\tversion:             b.Version,\n\t\tport:                b.Port,\n\t\tpeer:                b.Peer,\n\t}\n}\n\ntype HostInfo struct {\n\ttranslatedAddresses *translatedAddresses\n\tworkload            string\n\tdseVersion          string\n\tdataCenter          string\n\tschemaVersion       string\n\thostname            string\n\tclusterName         string\n\tpartitioner         string\n\track                string\n\trpcAddress          net.IP\n\tbroadcastAddress    net.IP\n\ttokens              []string\n\tpreferredIP         net.IP\n\tpeer                net.IP\n\tlistenAddress       net.IP\n\tconnectAddress      net.IP\n\tversion             cassVersion\n\tscyllaFeatures      ScyllaHostFeatures\n\tport                int\n\t// TODO(zariel): reduce locking maybe, not all values will change, but to ensure\n\t// that we are thread safe use a mutex to access all fields.\n\tmu     sync.RWMutex\n\tstate  nodeState\n\thostId UUID\n\tgraph  bool\n}\n\nfunc (h *HostInfo) Equal(host *HostInfo) bool {\n\tif h == host {\n\t\t// prevent rlock reentry\n\t\treturn true\n\t}\n\n\treturn h.HostID() == host.HostID() && h.ConnectAddress().Equal(host.ConnectAddress()) && h.Port() == host.Port()\n}\n\nfunc (h *HostInfo) Peer() net.IP {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.peer\n}\n\nfunc (h *HostInfo) invalidConnectAddr() bool {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\taddr, _ := h.connectAddressLocked()\n\treturn !validIpAddr(addr)\n}\n\nfunc validIpAddr(addr net.IP) bool {\n\treturn addr != nil && !addr.IsUnspecified()\n}\n\nfunc (h *HostInfo) connectAddressLocked() (net.IP, string) {\n\tif h.translatedAddresses != nil && h.translatedAddresses.CQL.IsValid() {\n\t\treturn h.translatedAddresses.CQL.Address, \"connect_address\"\n\t} else if validIpAddr(h.connectAddress) {\n\t\treturn h.connectAddress, \"connect_address\"\n\t} else if validIpAddr(h.rpcAddress) {\n\t\treturn h.rpcAddress, \"rpc_adress\"\n\t} else if validIpAddr(h.preferredIP) {\n\t\t// where does perferred_ip get set?\n\t\treturn h.preferredIP, \"preferred_ip\"\n\t} else if validIpAddr(h.broadcastAddress) {\n\t\treturn h.broadcastAddress, \"broadcast_address\"\n\t} else if validIpAddr(h.peer) {\n\t\treturn h.peer, \"peer\"\n\t}\n\treturn net.IPv4zero, \"invalid\"\n}\n\nfunc (h *HostInfo) getDriverFacingIpAddressLocked() net.IP {\n\tif validIpAddr(h.rpcAddress) {\n\t\treturn h.rpcAddress\n\t} else if validIpAddr(h.preferredIP) {\n\t\treturn h.preferredIP\n\t} else if validIpAddr(h.broadcastAddress) {\n\t\treturn h.broadcastAddress\n\t} else if validIpAddr(h.peer) {\n\t\treturn h.peer\n\t}\n\treturn net.IPv4zero\n}\n\n// nodeToNodeAddress returns address broadcasted between node to nodes.\n// It's either `broadcast_address` if host info is read from system.local or `peer` if read from system.peers.\n// This IP address is also part of CQL Event emitted on topology/status changes,\n// but does not uniquely identify the node in case multiple nodes use the same IP address.\nfunc (h *HostInfo) nodeToNodeAddress() net.IP {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\n\tif validIpAddr(h.broadcastAddress) {\n\t\treturn h.broadcastAddress\n\t} else if validIpAddr(h.peer) {\n\t\treturn h.peer\n\t}\n\treturn net.IPv4zero\n}\n\n// Returns the address that should be used to connect to the host.\n// If you wish to override this, use an AddressTranslator\nfunc (h *HostInfo) ConnectAddress() net.IP {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\n\tif addr, _ := h.connectAddressLocked(); validIpAddr(addr) {\n\t\treturn addr\n\t}\n\tpanic(fmt.Sprintf(\"no valid connect address for host: %v. Is your cluster configured correctly?\", h))\n}\n\nfunc (h *HostInfo) UntranslatedConnectAddress() net.IP {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.connectAddress\n}\n\nfunc (h *HostInfo) BroadcastAddress() net.IP {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.broadcastAddress\n}\n\nfunc (h *HostInfo) ListenAddress() net.IP {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.listenAddress\n}\n\nfunc (h *HostInfo) RPCAddress() net.IP {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.rpcAddress\n}\n\nfunc (h *HostInfo) PreferredIP() net.IP {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.preferredIP\n}\n\nfunc (h *HostInfo) DataCenter() string {\n\th.mu.RLock()\n\tdc := h.dataCenter\n\th.mu.RUnlock()\n\treturn dc\n}\n\nfunc (h *HostInfo) Rack() string {\n\th.mu.RLock()\n\track := h.rack\n\th.mu.RUnlock()\n\treturn rack\n}\n\nfunc (h *HostInfo) HostID() string {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\tif h.hostId.IsEmpty() {\n\t\treturn \"\"\n\t}\n\treturn h.hostId.String()\n}\n\n// hostUUID returns the raw binary host UUID under the read lock.\n// Use this instead of direct field access on shared HostInfo to avoid data races.\nfunc (h *HostInfo) hostUUID() UUID {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.hostId\n}\n\nfunc (h *HostInfo) WorkLoad() string {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.workload\n}\n\nfunc (h *HostInfo) Graph() bool {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.graph\n}\n\nfunc (h *HostInfo) DSEVersion() string {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.dseVersion\n}\n\nfunc (h *HostInfo) Partitioner() string {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\tif h.partitioner != \"\" {\n\t\treturn h.partitioner\n\t}\n\treturn h.scyllaFeatures.partitioner\n}\n\nfunc (h *HostInfo) ClusterName() string {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.clusterName\n}\n\nfunc (h *HostInfo) Version() cassVersion {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.version\n}\n\nfunc (h *HostInfo) State() nodeState {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.state\n}\n\nfunc (h *HostInfo) setState(state nodeState) *HostInfo {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\th.state = state\n\treturn h\n}\n\nfunc (h *HostInfo) Tokens() []string {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.tokens\n}\n\nfunc (h *HostInfo) Port() int {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.port\n}\n\nfunc (h *HostInfo) update(from *HostInfo) {\n\tif h == from {\n\t\treturn\n\t}\n\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\n\tfrom.mu.RLock()\n\tdefer from.mu.RUnlock()\n\n\t// autogenerated do not update\n\tif h.peer == nil {\n\t\th.peer = from.peer\n\t}\n\tif h.broadcastAddress == nil {\n\t\th.broadcastAddress = from.broadcastAddress\n\t}\n\tif h.listenAddress == nil {\n\t\th.listenAddress = from.listenAddress\n\t}\n\tif h.rpcAddress == nil {\n\t\th.rpcAddress = from.rpcAddress\n\t}\n\tif h.preferredIP == nil {\n\t\th.preferredIP = from.preferredIP\n\t}\n\tif h.connectAddress == nil {\n\t\th.connectAddress = from.connectAddress\n\t}\n\tif h.port == 0 {\n\t\th.port = from.port\n\t}\n\tif h.dataCenter == \"\" {\n\t\th.dataCenter = from.dataCenter\n\t}\n\tif h.rack == \"\" {\n\t\th.rack = from.rack\n\t}\n\tif h.hostId.IsEmpty() {\n\t\th.hostId = from.hostId\n\t}\n\tif h.workload == \"\" {\n\t\th.workload = from.workload\n\t}\n\tif h.dseVersion == \"\" {\n\t\th.dseVersion = from.dseVersion\n\t}\n\tif h.partitioner == \"\" {\n\t\th.partitioner = from.partitioner\n\t}\n\tif h.clusterName == \"\" {\n\t\th.clusterName = from.clusterName\n\t}\n\tif h.version == (cassVersion{}) {\n\t\th.version = from.version\n\t}\n\tif h.tokens == nil {\n\t\th.tokens = from.tokens\n\t}\n}\n\nfunc (h *HostInfo) IsUp() bool {\n\treturn h != nil && h.State() == NodeUp\n}\n\nfunc (h *HostInfo) IsBusy(s *Session) bool {\n\tpool, ok := s.pool.getPool(h)\n\treturn ok && h != nil && pool.InFlight() >= MAX_IN_FLIGHT_THRESHOLD\n}\n\n// ConnectAddressAndPort returns \"{ConnectAddress}:{Port}\"\n// Deprecated: Use ConnectAddress and Port separately.\nfunc (h *HostInfo) ConnectAddressAndPort() string {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\taddr, _ := h.connectAddressLocked()\n\treturn net.JoinHostPort(addr.String(), strconv.Itoa(h.port))\n}\n\nfunc (h *HostInfo) String() string {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\n\tconnectAddr, source := h.connectAddressLocked()\n\treturn fmt.Sprintf(\"[HostInfo hostname=%q connectAddress=%q peer=%q rpc_address=%q broadcast_address=%q \"+\n\t\t\"preferred_ip=%q connect_addr=%q connect_addr_source=%q \"+\n\t\t\"port=%d data_center=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]\",\n\t\th.hostname, h.connectAddress, h.peer, h.rpcAddress, h.broadcastAddress, h.preferredIP,\n\t\tconnectAddr, source,\n\t\th.port, h.dataCenter, h.rack, h.hostId.String(), h.version, h.state, len(h.tokens))\n}\n\nfunc (h *HostInfo) setScyllaFeatures(s ScyllaHostFeatures) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\th.scyllaFeatures = s\n}\n\nfunc (h *HostInfo) ScyllaFeatures() ScyllaHostFeatures {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\treturn h.scyllaFeatures\n}\n\n// ScyllaShardAwarePort returns the shard aware port of this host.\n// Returns zero if the shard aware port is not known.\nfunc (h *HostInfo) ScyllaShardAwarePort() uint16 {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.scyllaFeatures.ShardAwarePort()\n}\n\n// ScyllaShardAwarePortTLS returns the TLS-enabled shard aware port of this host.\n// Returns zero if the shard aware port is not known.\nfunc (h *HostInfo) ScyllaShardAwarePortTLS() uint16 {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.scyllaFeatures.ShardAwarePortTLS()\n}\n\n// ScyllaShardCount returns count of shards on the node.\nfunc (h *HostInfo) ScyllaShardCount() int {\n\th.mu.RLock()\n\tdefer h.mu.RUnlock()\n\treturn h.scyllaFeatures.ShardsCount()\n}\n\nfunc (h *HostInfo) setTranslatedConnectionInfo(info translatedAddresses) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\th.translatedAddresses = &info\n}\n\nfunc (h *HostInfo) getTranslatedConnectionInfo() *translatedAddresses {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\treturn h.translatedAddresses\n}\n\n// Returns true if we are using system_schema.keyspaces instead of system.schema_keyspaces\nfunc checkSystemSchema(control controlConnection) (bool, error) {\n\titer := control.querySystem(\"SELECT * FROM system_schema.keyspaces\")\n\tif iter == nil {\n\t\treturn false, errNoControl\n\t}\n\tdefer iter.Close()\n\tif err := iter.err; err != nil {\n\t\tif errf, ok := err.(*frm.ErrorFrame); ok {\n\t\t\tif errf.Code == ErrCodeSyntax {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\n// Given a map that represents a row from either system.local or system.peers\n// return as much information as we can in *HostInfo\nfunc hostInfoFromMap(row map[string]any, defaultPort int) (*HostInfo, error) {\n\tconst assertErrorMsg = \"Assertion failed for %s\"\n\tvar ok bool\n\n\thost := HostInfo{}\n\n\t// Default to our connected port if the cluster doesn't have port information\n\tfor key, value := range row {\n\t\tswitch key {\n\t\tcase \"data_center\":\n\t\t\thost.dataCenter, ok = value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"data_center\")\n\t\t\t}\n\t\tcase \"rack\":\n\t\t\thost.rack, ok = value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"rack\")\n\t\t\t}\n\t\tcase \"host_id\":\n\t\t\thostId, ok := value.(UUID)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"host_id\")\n\t\t\t}\n\t\t\thost.hostId = hostId\n\t\tcase \"release_version\":\n\t\t\tversion, ok := value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"release_version\")\n\t\t\t}\n\t\t\thost.version.Set(version)\n\t\tcase \"peer\":\n\t\t\tip, ok := value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"peer\")\n\t\t\t}\n\t\t\thost.peer = net.ParseIP(ip)\n\t\tcase \"cluster_name\":\n\t\t\thost.clusterName, ok = value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"cluster_name\")\n\t\t\t}\n\t\tcase \"partitioner\":\n\t\t\thost.partitioner, ok = value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"partitioner\")\n\t\t\t}\n\t\tcase \"broadcast_address\":\n\t\t\tip, ok := value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"broadcast_address\")\n\t\t\t}\n\t\t\thost.broadcastAddress = net.ParseIP(ip)\n\t\tcase \"preferred_ip\":\n\t\t\tip, ok := value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"preferred_ip\")\n\t\t\t}\n\t\t\thost.preferredIP = net.ParseIP(ip)\n\t\tcase \"rpc_address\":\n\t\t\tip, ok := value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"rpc_address\")\n\t\t\t}\n\t\t\thost.rpcAddress = net.ParseIP(ip)\n\t\tcase \"native_address\":\n\t\t\tip, ok := value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"native_address\")\n\t\t\t}\n\t\t\thost.rpcAddress = net.ParseIP(ip)\n\t\tcase \"listen_address\":\n\t\t\tip, ok := value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"listen_address\")\n\t\t\t}\n\t\t\thost.listenAddress = net.ParseIP(ip)\n\t\tcase \"native_port\":\n\t\t\tnative_port, ok := value.(int)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"native_port\")\n\t\t\t}\n\t\t\thost.port = native_port\n\t\tcase \"workload\":\n\t\t\thost.workload, ok = value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"workload\")\n\t\t\t}\n\t\tcase \"graph\":\n\t\t\thost.graph, ok = value.(bool)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"graph\")\n\t\t\t}\n\t\tcase \"tokens\":\n\t\t\thost.tokens, ok = value.([]string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"tokens\")\n\t\t\t}\n\t\tcase \"dse_version\":\n\t\t\thost.dseVersion, ok = value.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"dse_version\")\n\t\t\t}\n\t\tcase \"schema_version\":\n\t\t\tschemaVersion, ok := value.(UUID)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(assertErrorMsg, \"schema_version\")\n\t\t\t}\n\t\t\thost.schemaVersion = schemaVersion.String()\n\t\t}\n\t\t// TODO(thrawn01): Add 'port'? once CASSANDRA-7544 is complete\n\t\t// Not sure what the port field will be called until the JIRA issue is complete\n\t}\n\n\tif host.port == 0 {\n\t\thost.port = defaultPort\n\t}\n\n\thost.connectAddress = host.getDriverFacingIpAddressLocked()\n\treturn &host, nil\n}\n\nfunc hostInfoFromIter(iter *Iter, defaultPort int) (*HostInfo, error) {\n\tdefer iter.Close()\n\n\trows, err := iter.SliceMap()\n\tif err != nil {\n\t\t// TODO(zariel): make typed error\n\t\treturn nil, err\n\t}\n\n\tif len(rows) == 0 {\n\t\treturn nil, errors.New(\"query returned 0 rows\")\n\t}\n\n\thost, err := hostInfoFromMap(rows[0], defaultPort)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn host, nil\n}\n\n// debounceRingRefresh submits a ring refresh request to the ring refresh debouncer.\nfunc (s *Session) debounceRingRefresh() {\n\ts.ringRefresher.Debounce()\n}\n\n// refreshRing executes a ring refresh immediately and cancels pending debounce ring refresh requests.\nfunc (s *Session) refreshRingNow() error {\n\terr, ok := <-s.ringRefresher.RefreshNow()\n\tif !ok {\n\t\treturn errors.New(\"could not refresh ring because stop was requested\")\n\t}\n\n\treturn err\n}\n\nfunc (s *Session) refreshRing() error {\n\thosts, partitioner, err := s.hostSource.GetHostsFromSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\tprevHosts := s.hostSource.getHostsMap()\n\n\tfor _, h := range hosts {\n\t\tif s.cfg.filterHost(h) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif host, ok := s.hostSource.addHostIfMissing(h); !ok {\n\t\t\ts.startPoolFill(h)\n\t\t} else {\n\t\t\t// host (by hostID) already exists; determine if IP has changed\n\t\t\tnewHostID := h.HostID()\n\t\t\texisting, ok := prevHosts[newHostID]\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"get existing host=%s from prevHosts: %w\", h, ErrCannotFindHost)\n\t\t\t}\n\t\t\tif h.UntranslatedConnectAddress().Equal(existing.UntranslatedConnectAddress()) && h.nodeToNodeAddress().Equal(existing.nodeToNodeAddress()) {\n\t\t\t\t// no host IP change\n\t\t\t\thost.update(h)\n\t\t\t} else {\n\t\t\t\t// host IP has changed\n\t\t\t\t// remove old HostInfo (w/old IP)\n\t\t\t\ts.removeHost(existing)\n\t\t\t\tif _, alreadyExists := s.hostSource.addHostIfMissing(h); alreadyExists {\n\t\t\t\t\treturn fmt.Errorf(\"add new host=%s after removal: %w\", h, ErrHostAlreadyExists)\n\t\t\t\t}\n\t\t\t\t// add new HostInfo (same hostID, new IP)\n\t\t\t\ts.startPoolFill(h)\n\t\t\t}\n\t\t}\n\t\tdelete(prevHosts, h.HostID())\n\t}\n\n\tfor _, host := range prevHosts {\n\t\ts.metadataDescriber.RemoveTabletsWithHost(host)\n\t\ts.removeHost(host)\n\t}\n\ts.policy.SetPartitioner(partitioner)\n\n\treturn nil\n}\n"
  },
  {
    "path": "host_source_scylla.go",
    "content": "package gocql\n\nfunc (h *HostInfo) SetDatacenter(dc string) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\th.dataCenter = dc\n}\n"
  },
  {
    "path": "host_source_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n\t\"github.com/gocql/gocql/internal/tests/mock\"\n)\n\ntype trackingMockFramer struct {\n\tmock.MockFramer\n\treleased bool\n}\n\nfunc (f *trackingMockFramer) Release() {\n\tf.released = true\n}\n\ntype systemSchemaTestControl struct {\n\titer *Iter\n}\n\nfunc (*systemSchemaTestControl) getConn() *connHost                        { return nil }\nfunc (*systemSchemaTestControl) awaitSchemaAgreement() error               { return nil }\nfunc (*systemSchemaTestControl) query(string, ...any) (iter *Iter)         { return nil }\nfunc (c *systemSchemaTestControl) querySystem(string, ...any) (iter *Iter) { return c.iter }\nfunc (*systemSchemaTestControl) discoverProtocol([]*HostInfo) (int, error) { return 0, nil }\nfunc (*systemSchemaTestControl) connect([]*HostInfo) error                 { return nil }\nfunc (*systemSchemaTestControl) close()                                    {}\nfunc (*systemSchemaTestControl) getSession() *Session                      { return nil }\nfunc (*systemSchemaTestControl) reconnect() error                          { return nil }\n\nfunc TestUnmarshalCassVersion(t *testing.T) {\n\tt.Parallel()\n\n\ttests := [...]struct {\n\t\tdata    string\n\t\tversion cassVersion\n\t}{\n\t\t{\"3.2\", cassVersion{Major: 3, Minor: 2, Patch: 0, Qualifier: \"\"}},\n\t\t{\"2.10.1-SNAPSHOT\", cassVersion{Major: 2, Minor: 10, Patch: 1, Qualifier: \"\"}},\n\t\t{\"1.2.3\", cassVersion{Major: 1, Minor: 2, Patch: 3, Qualifier: \"\"}},\n\t\t{\"4.0-rc2\", cassVersion{Major: 4, Minor: 0, Patch: 0, Qualifier: \"rc2\"}},\n\t\t{\"4.3.2-rc1\", cassVersion{Major: 4, Minor: 3, Patch: 2, Qualifier: \"rc1\"}},\n\t\t{\"4.3.2-rc1-qualifier1\", cassVersion{Major: 4, Minor: 3, Patch: 2, Qualifier: \"rc1-qualifier1\"}},\n\t\t{\"4.3-rc1-qualifier1\", cassVersion{Major: 4, Minor: 3, Patch: 0, Qualifier: \"rc1-qualifier1\"}},\n\t}\n\n\tfor i, test := range tests {\n\t\tv := &cassVersion{}\n\t\tif err := v.UnmarshalCQL(nil, []byte(test.data)); err != nil {\n\t\t\tt.Errorf(\"%d: %v\", i, err)\n\t\t} else if *v != test.version {\n\t\t\tt.Errorf(\"%d: expected %#+v got %#+v\", i, test.version, *v)\n\t\t}\n\t}\n}\n\nfunc TestCassVersionBefore(t *testing.T) {\n\tt.Parallel()\n\n\ttests := [...]struct {\n\t\tversion             cassVersion\n\t\tmajor, minor, patch int\n\t\tQualifier           string\n\t}{\n\t\t{cassVersion{Major: 1, Minor: 0, Patch: 0, Qualifier: \"\"}, 0, 0, 0, \"\"},\n\t\t{cassVersion{Major: 0, Minor: 1, Patch: 0, Qualifier: \"\"}, 0, 0, 0, \"\"},\n\t\t{cassVersion{Major: 0, Minor: 0, Patch: 1, Qualifier: \"\"}, 0, 0, 0, \"\"},\n\n\t\t{cassVersion{Major: 1, Minor: 0, Patch: 0, Qualifier: \"\"}, 0, 1, 0, \"\"},\n\t\t{cassVersion{Major: 0, Minor: 1, Patch: 0, Qualifier: \"\"}, 0, 0, 1, \"\"},\n\t\t{cassVersion{Major: 4, Minor: 1, Patch: 0, Qualifier: \"\"}, 3, 1, 2, \"\"},\n\n\t\t{cassVersion{Major: 4, Minor: 1, Patch: 0, Qualifier: \"\"}, 3, 1, 2, \"\"},\n\t}\n\n\tfor i, test := range tests {\n\t\tif test.version.Before(test.major, test.minor, test.patch) {\n\t\t\tt.Errorf(\"%d: expected v%d.%d.%d to be before %v\", i, test.major, test.minor, test.patch, test.version)\n\t\t}\n\t}\n\n}\n\nfunc TestIsValidPeer(t *testing.T) {\n\tt.Parallel()\n\n\thost := &HostInfo{\n\t\trpcAddress: net.ParseIP(\"0.0.0.0\"),\n\t\track:       \"myRack\",\n\t\thostId:     tUUID(1),\n\t\tdataCenter: \"datacenter\",\n\t\ttokens:     []string{\"0\", \"1\"},\n\t}\n\n\tif !isValidPeer(host) {\n\t\tt.Errorf(\"expected %+v to be a valid peer\", host)\n\t}\n\n\thost.rack = \"\"\n\tif isValidPeer(host) {\n\t\tt.Errorf(\"expected %+v to NOT be a valid peer\", host)\n\t}\n}\n\nfunc TestIsZeroToken(t *testing.T) {\n\tt.Parallel()\n\n\thost := &HostInfo{\n\t\trpcAddress: net.ParseIP(\"0.0.0.0\"),\n\t\track:       \"myRack\",\n\t\thostId:     tUUID(1),\n\t\tdataCenter: \"datacenter\",\n\t\ttokens:     []string{\"0\", \"1\"},\n\t}\n\n\tif isZeroToken(host) {\n\t\tt.Errorf(\"expected %+v to NOT be a zero-token host\", host)\n\t}\n\n\thost.tokens = []string{}\n\tif !isZeroToken(host) {\n\t\tt.Errorf(\"expected %+v to be a zero-token host\", host)\n\t}\n}\n\nfunc TestCheckSystemSchemaClosesIter(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"NilIterReturnsNoControl\", func(t *testing.T) {\n\t\tok, err := checkSystemSchema(&systemSchemaTestControl{})\n\t\tif err != errNoControl {\n\t\t\tt.Fatalf(\"expected errNoControl, got %v\", err)\n\t\t}\n\t\tif ok {\n\t\t\tt.Fatal(\"expected system schema v2 detection to fail without a control iterator\")\n\t\t}\n\t})\n\n\tt.Run(\"Success\", func(t *testing.T) {\n\t\tframer := &trackingMockFramer{}\n\t\tok, err := checkSystemSchema(&systemSchemaTestControl{\n\t\t\titer: &Iter{framer: framer},\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected system schema v2 detection to succeed\")\n\t\t}\n\t\tif !framer.released {\n\t\t\tt.Fatal(\"expected iterator framer to be released\")\n\t\t}\n\t})\n\n\tt.Run(\"SyntaxError\", func(t *testing.T) {\n\t\tframer := &trackingMockFramer{}\n\t\tok, err := checkSystemSchema(&systemSchemaTestControl{\n\t\t\titer: &Iter{\n\t\t\t\terr:    &frm.ErrorFrame{Code: ErrCodeSyntax},\n\t\t\t\tframer: framer,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif ok {\n\t\t\tt.Fatal(\"expected schema v2 detection to fall back on syntax error\")\n\t\t}\n\t\tif !framer.released {\n\t\t\tt.Fatal(\"expected iterator framer to be released\")\n\t\t}\n\t})\n}\n\nfunc TestHostInfoFromIterClosesIter(t *testing.T) {\n\tt.Parallel()\n\n\trow := []any{\n\t\t\"local\",\n\t\t\"COMPLETED\",\n\t\tnet.IPv4(192, 168, 100, 12),\n\t\t\"cluster\",\n\t\t\"3.3.1\",\n\t\t\"datacenter1\",\n\t\t1733834239,\n\t\tParseUUIDMust(\"045859a7-6b9f-4efd-a5e7-acd64a295e13\"),\n\t\tnet.IPv4(192, 168, 100, 12),\n\t\t\"4\",\n\t\t\"org.apache.cassandra.dht.Murmur3Partitioner\",\n\t\t\"rack1\",\n\t\t\"3.0.8\",\n\t\tnet.IPv4(192, 168, 100, 12),\n\t\tParseUUIDMust(\"daf4df2c-b708-11ef-5c25-3004361afd71\"),\n\t\t\"\",\n\t\t[]string{\"1\"},\n\t\tmap[UUID]byte{},\n\t}\n\tframer := &trackingMockFramer{\n\t\tMockFramer: mock.MockFramer{Data: marshalMetadataMust(systemLocalResultMetadata, row)},\n\t}\n\n\thost, err := hostInfoFromIter(&Iter{\n\t\tmeta:    systemLocalResultMetadata,\n\t\tframer:  framer,\n\t\tnumRows: 1,\n\t}, 9042)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif host == nil {\n\t\tt.Fatal(\"expected host info\")\n\t}\n\tif !framer.released {\n\t\tt.Fatal(\"expected iterator framer to be released\")\n\t}\n}\n\nfunc TestHostInfo_ConnectAddress(t *testing.T) {\n\tt.Parallel()\n\n\tvar localhost = net.IPv4(127, 0, 0, 1)\n\ttests := []struct {\n\t\tname          string\n\t\tconnectAddr   net.IP\n\t\trpcAddr       net.IP\n\t\tbroadcastAddr net.IP\n\t\tpeer          net.IP\n\t}{\n\t\t{name: \"rpc_address\", rpcAddr: localhost},\n\t\t{name: \"connect_address\", connectAddr: localhost},\n\t\t{name: \"broadcast_address\", broadcastAddr: localhost},\n\t\t{name: \"peer\", peer: localhost},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\thost := &HostInfo{\n\t\t\t\tconnectAddress:   test.connectAddr,\n\t\t\t\trpcAddress:       test.rpcAddr,\n\t\t\t\tbroadcastAddress: test.broadcastAddr,\n\t\t\t\tpeer:             test.peer,\n\t\t\t}\n\n\t\t\tif addr := host.ConnectAddress(); !addr.Equal(localhost) {\n\t\t\t\tt.Fatalf(\"expected ConnectAddress to be %s got %s\", localhost, addr)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAddressPort(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"IsValid\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\ttests := []struct {\n\t\t\tname     string\n\t\t\taddr     AddressPort\n\t\t\texpected bool\n\t\t}{\n\t\t\t{\n\t\t\t\tname:     \"valid IPv4 address with port\",\n\t\t\t\taddr:     AddressPort{Address: net.IPv4(127, 0, 0, 1), Port: 9042},\n\t\t\t\texpected: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"valid IPv6 address with port\",\n\t\t\t\taddr:     AddressPort{Address: net.ParseIP(\"::1\"), Port: 9042},\n\t\t\t\texpected: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"nil address\",\n\t\t\t\taddr:     AddressPort{Address: nil, Port: 9042},\n\t\t\t\texpected: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"unspecified IPv4 address\",\n\t\t\t\taddr:     AddressPort{Address: net.IPv4zero, Port: 9042},\n\t\t\t\texpected: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"unspecified IPv6 address\",\n\t\t\t\taddr:     AddressPort{Address: net.IPv6unspecified, Port: 9042},\n\t\t\t\texpected: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"zero port\",\n\t\t\t\taddr:     AddressPort{Address: net.IPv4(127, 0, 0, 1), Port: 0},\n\t\t\t\texpected: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"nil address and zero port\",\n\t\t\t\taddr:     AddressPort{Address: nil, Port: 0},\n\t\t\t\texpected: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"empty AddressPort\",\n\t\t\t\taddr:     AddressPort{},\n\t\t\t\texpected: false,\n\t\t\t},\n\t\t}\n\n\t\tfor _, test := range tests {\n\t\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t\tresult := test.addr.IsValid()\n\t\t\t\tif result != test.expected {\n\t\t\t\t\tt.Errorf(\"IsValid() = %v, expected %v for %+v\", result, test.expected, test.addr)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"Equal\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\taddr1 := AddressPort{Address: net.IPv4(127, 0, 0, 1), Port: 9042}\n\t\taddr2 := AddressPort{Address: net.IPv4(127, 0, 0, 1), Port: 9042}\n\t\taddr3 := AddressPort{Address: net.IPv4(192, 168, 1, 1), Port: 9042}\n\t\taddr4 := AddressPort{Address: net.IPv4(127, 0, 0, 1), Port: 9043}\n\n\t\ttests := []struct {\n\t\t\tname     string\n\t\t\ta        AddressPort\n\t\t\tb        AddressPort\n\t\t\texpected bool\n\t\t}{\n\t\t\t{\n\t\t\t\tname:     \"equal addresses and ports\",\n\t\t\t\ta:        addr1,\n\t\t\t\tb:        addr2,\n\t\t\t\texpected: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"different addresses, same port\",\n\t\t\t\ta:        addr1,\n\t\t\t\tb:        addr3,\n\t\t\t\texpected: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"same address, different ports\",\n\t\t\t\ta:        addr1,\n\t\t\t\tb:        addr4,\n\t\t\t\texpected: false,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"IPv6 addresses equal\",\n\t\t\t\ta:        AddressPort{Address: net.ParseIP(\"::1\"), Port: 9042},\n\t\t\t\tb:        AddressPort{Address: net.ParseIP(\"::1\"), Port: 9042},\n\t\t\t\texpected: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"empty\",\n\t\t\t\ta:        AddressPort{},\n\t\t\t\tb:        AddressPort{},\n\t\t\t\texpected: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"empty, non-empty\",\n\t\t\t\ta:        AddressPort{},\n\t\t\t\tb:        AddressPort{Address: net.ParseIP(\"::1\"), Port: 9042},\n\t\t\t\texpected: false,\n\t\t\t},\n\t\t}\n\n\t\tfor _, test := range tests {\n\t\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t\tresult := test.a.Equal(test.b)\n\t\t\t\tif result != test.expected {\n\t\t\t\t\tt.Errorf(\"Equal() = %v, expected %v for a=%+v, b=%+v\", result, test.expected, test.a, test.b)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"String\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\ttests := []struct {\n\t\t\tname     string\n\t\t\taddr     AddressPort\n\t\t\texpected string\n\t\t}{\n\t\t\t{\n\t\t\t\tname:     \"IPv4 address\",\n\t\t\t\taddr:     AddressPort{Address: net.IPv4(127, 0, 0, 1), Port: 9042},\n\t\t\t\texpected: \"127.0.0.1:9042\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"IPv6 address\",\n\t\t\t\taddr:     AddressPort{Address: net.ParseIP(\"::1\"), Port: 9042},\n\t\t\t\texpected: \"::1:9042\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"different port\",\n\t\t\t\taddr:     AddressPort{Address: net.IPv4(192, 168, 1, 1), Port: 8080},\n\t\t\t\texpected: \"192.168.1.1:8080\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"nil address\",\n\t\t\t\taddr:     AddressPort{Address: nil, Port: 9042},\n\t\t\t\texpected: \"<nil>:9042\",\n\t\t\t},\n\t\t}\n\n\t\tfor _, test := range tests {\n\t\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t\tresult := test.addr.String()\n\t\t\t\tif result != test.expected {\n\t\t\t\t\tt.Errorf(\"String() = %q, expected %q\", result, test.expected)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"ToNetAddr\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\ttests := []struct {\n\t\t\tname     string\n\t\t\taddr     AddressPort\n\t\t\texpected string\n\t\t}{\n\t\t\t{\n\t\t\t\tname:     \"IPv4 address\",\n\t\t\t\taddr:     AddressPort{Address: net.IPv4(127, 0, 0, 1), Port: 9042},\n\t\t\t\texpected: \"127.0.0.1:9042\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"IPv6 address\",\n\t\t\t\taddr:     AddressPort{Address: net.ParseIP(\"::1\"), Port: 9042},\n\t\t\t\texpected: \"[::1]:9042\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"IPv6 address with zone\",\n\t\t\t\taddr:     AddressPort{Address: net.ParseIP(\"fe80::1\"), Port: 9043},\n\t\t\t\texpected: \"[fe80::1]:9043\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:     \"different port\",\n\t\t\t\taddr:     AddressPort{Address: net.IPv4(192, 168, 1, 1), Port: 8080},\n\t\t\t\texpected: \"192.168.1.1:8080\",\n\t\t\t},\n\t\t}\n\n\t\tfor _, test := range tests {\n\t\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t\tresult := test.addr.ToNetAddr()\n\t\t\t\tif result != test.expected {\n\t\t\t\t\tt.Errorf(\"ToNetAddr() = %q, expected %q\", result, test.expected)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc TestHostInfoBuilder(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"Build\", func(t *testing.T) {\n\t\tt.Run(\"basic fields\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuilder := HostInfoBuilder{\n\t\t\t\tHostId:        \"a0000000-0000-0000-0000-000000000123\",\n\t\t\t\tDataCenter:    \"dc1\",\n\t\t\t\tRack:          \"rack1\",\n\t\t\t\tTokens:        []string{\"token1\", \"token2\"},\n\t\t\t\tPort:          9042,\n\t\t\t\tWorkload:      \"Analytics\",\n\t\t\t\tDseVersion:    \"6.8.0\",\n\t\t\t\tClusterName:   \"test-cluster\",\n\t\t\t\tPartitioner:   \"Murmur3Partitioner\",\n\t\t\t\tHostname:      \"node1.example.com\",\n\t\t\t\tSchemaVersion: \"schema-v1\",\n\t\t\t}\n\n\t\t\thost := builder.Build()\n\n\t\t\tif host.HostID() != builder.HostId {\n\t\t\t\tt.Errorf(\"HostID() = %q, expected %q\", host.HostID(), builder.HostId)\n\t\t\t}\n\t\t\tif host.DataCenter() != builder.DataCenter {\n\t\t\t\tt.Errorf(\"DataCenter() = %q, expected %q\", host.DataCenter(), builder.DataCenter)\n\t\t\t}\n\t\t\tif host.Rack() != builder.Rack {\n\t\t\t\tt.Errorf(\"Rack() = %q, expected %q\", host.Rack(), builder.Rack)\n\t\t\t}\n\t\t\tif host.Port() != builder.Port {\n\t\t\t\tt.Errorf(\"Port() = %d, expected %d\", host.Port(), builder.Port)\n\t\t\t}\n\t\t\tif host.WorkLoad() != builder.Workload {\n\t\t\t\tt.Errorf(\"WorkLoad() = %q, expected %q\", host.WorkLoad(), builder.Workload)\n\t\t\t}\n\t\t\tif host.DSEVersion() != builder.DseVersion {\n\t\t\t\tt.Errorf(\"DSEVersion() = %q, expected %q\", host.DSEVersion(), builder.DseVersion)\n\t\t\t}\n\t\t\tif host.ClusterName() != builder.ClusterName {\n\t\t\t\tt.Errorf(\"ClusterName() = %q, expected %q\", host.ClusterName(), builder.ClusterName)\n\t\t\t}\n\t\t\tif host.Partitioner() != builder.Partitioner {\n\t\t\t\tt.Errorf(\"Partitioner() = %q, expected %q\", host.Partitioner(), builder.Partitioner)\n\t\t\t}\n\t\t\tif len(host.Tokens()) != len(builder.Tokens) {\n\t\t\t\tt.Errorf(\"len(Tokens()) = %d, expected %d\", len(host.Tokens()), len(builder.Tokens))\n\t\t\t}\n\t\t\tfor i, token := range host.Tokens() {\n\t\t\t\tif token != builder.Tokens[i] {\n\t\t\t\t\tt.Errorf(\"Tokens()[%d] = %q, expected %q\", i, token, builder.Tokens[i])\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"IP addresses\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tconnectAddr := net.IPv4(192, 168, 1, 1)\n\t\t\tbroadcastAddr := net.IPv4(192, 168, 1, 2)\n\t\t\tpreferredIP := net.IPv4(192, 168, 1, 3)\n\t\t\trpcAddr := net.IPv4(192, 168, 1, 4)\n\t\t\tpeer := net.IPv4(192, 168, 1, 5)\n\t\t\tlistenAddr := net.IPv4(192, 168, 1, 6)\n\n\t\t\tbuilder := HostInfoBuilder{\n\t\t\t\tConnectAddress:   connectAddr,\n\t\t\t\tBroadcastAddress: broadcastAddr,\n\t\t\t\tPreferredIP:      preferredIP,\n\t\t\t\tRpcAddress:       rpcAddr,\n\t\t\t\tPeer:             peer,\n\t\t\t\tListenAddress:    listenAddr,\n\t\t\t\tPort:             9042,\n\t\t\t}\n\n\t\t\thost := builder.Build()\n\n\t\t\tif !host.UntranslatedConnectAddress().Equal(connectAddr) {\n\t\t\t\tt.Errorf(\"UntranslatedConnectAddress() = %v, expected %v\", host.UntranslatedConnectAddress(), connectAddr)\n\t\t\t}\n\t\t\tif !host.BroadcastAddress().Equal(broadcastAddr) {\n\t\t\t\tt.Errorf(\"BroadcastAddress() = %v, expected %v\", host.BroadcastAddress(), broadcastAddr)\n\t\t\t}\n\t\t\tif !host.PreferredIP().Equal(preferredIP) {\n\t\t\t\tt.Errorf(\"PreferredIP() = %v, expected %v\", host.PreferredIP(), preferredIP)\n\t\t\t}\n\t\t\tif !host.RPCAddress().Equal(rpcAddr) {\n\t\t\t\tt.Errorf(\"RPCAddress() = %v, expected %v\", host.RPCAddress(), rpcAddr)\n\t\t\t}\n\t\t\tif !host.Peer().Equal(peer) {\n\t\t\t\tt.Errorf(\"Peer() = %v, expected %v\", host.Peer(), peer)\n\t\t\t}\n\t\t\tif !host.ListenAddress().Equal(listenAddr) {\n\t\t\t\tt.Errorf(\"ListenAddress() = %v, expected %v\", host.ListenAddress(), listenAddr)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"translated addresses\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\ttranslatedAddrs := &translatedAddresses{\n\t\t\t\tCQL: AddressPort{\n\t\t\t\t\tAddress: net.IPv4(10, 0, 0, 1),\n\t\t\t\t\tPort:    9042,\n\t\t\t\t},\n\t\t\t\tShardAware: AddressPort{\n\t\t\t\t\tAddress: net.IPv4(10, 0, 0, 2),\n\t\t\t\t\tPort:    19042,\n\t\t\t\t},\n\t\t\t\tShardAwareTLS: AddressPort{\n\t\t\t\t\tAddress: net.IPv4(10, 0, 0, 3),\n\t\t\t\t\tPort:    19043,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tbuilder := HostInfoBuilder{\n\t\t\t\tTranslatedAddresses: translatedAddrs,\n\t\t\t\tPort:                9042,\n\t\t\t}\n\n\t\t\thost := builder.Build()\n\n\t\t\t// ConnectAddress should use translated CQL address\n\t\t\texpectedAddr := translatedAddrs.CQL.Address\n\t\t\tif !host.ConnectAddress().Equal(expectedAddr) {\n\t\t\t\tt.Errorf(\"ConnectAddress() = %v, expected %v\", host.ConnectAddress(), expectedAddr)\n\t\t\t}\n\n\t\t\t// Verify translated addresses are set\n\t\t\tretrievedAddrs := host.getTranslatedConnectionInfo()\n\t\t\tif retrievedAddrs == nil {\n\t\t\t\tt.Fatal(\"getTranslatedConnectionInfo() returned nil\")\n\t\t\t}\n\t\t\tif !retrievedAddrs.Equal(translatedAddrs) {\n\t\t\t\tt.Errorf(\"translated addresses not equal: got %+v, expected %+v\", retrievedAddrs, translatedAddrs)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"version\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tversion := cassVersion{\n\t\t\t\tMajor:     4,\n\t\t\t\tMinor:     0,\n\t\t\t\tPatch:     3,\n\t\t\t\tQualifier: \"rc1\",\n\t\t\t}\n\n\t\t\tbuilder := HostInfoBuilder{\n\t\t\t\tVersion: version,\n\t\t\t\tPort:    9042,\n\t\t\t}\n\n\t\t\thost := builder.Build()\n\n\t\t\tif host.Version() != version {\n\t\t\t\tt.Errorf(\"Version() = %+v, expected %+v\", host.Version(), version)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"empty builder\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuilder := HostInfoBuilder{}\n\t\t\thost := builder.Build()\n\n\t\t\tif host.HostID() != \"\" {\n\t\t\t\tt.Errorf(\"HostID() = %q, expected empty string\", host.HostID())\n\t\t\t}\n\t\t\tif host.DataCenter() != \"\" {\n\t\t\t\tt.Errorf(\"DataCenter() = %q, expected empty string\", host.DataCenter())\n\t\t\t}\n\t\t\tif host.Port() != 0 {\n\t\t\t\tt.Errorf(\"Port() = %d, expected 0\", host.Port())\n\t\t\t}\n\t\t\tif host.Tokens() != nil {\n\t\t\t\tt.Errorf(\"Tokens() = %v, expected nil\", host.Tokens())\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"all fields populated\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\ttranslatedAddrs := &translatedAddresses{\n\t\t\t\tCQL: AddressPort{\n\t\t\t\t\tAddress: net.IPv4(10, 0, 0, 1),\n\t\t\t\t\tPort:    9042,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tversion := cassVersion{\n\t\t\t\tMajor: 3,\n\t\t\t\tMinor: 11,\n\t\t\t\tPatch: 4,\n\t\t\t}\n\n\t\t\tbuilder := HostInfoBuilder{\n\t\t\t\tTranslatedAddresses: translatedAddrs,\n\t\t\t\tWorkload:            \"Cassandra\",\n\t\t\t\tHostId:              \"b0000000-0000-0000-0000-000000000456\",\n\t\t\t\tSchemaVersion:       \"schema-v2\",\n\t\t\t\tHostname:            \"cassandra-node.local\",\n\t\t\t\tClusterName:         \"production-cluster\",\n\t\t\t\tPartitioner:         \"Murmur3Partitioner\",\n\t\t\t\tRack:                \"rack2\",\n\t\t\t\tDseVersion:          \"6.8.1\",\n\t\t\t\tDataCenter:          \"dc2\",\n\t\t\t\tConnectAddress:      net.IPv4(192, 168, 2, 1),\n\t\t\t\tBroadcastAddress:    net.IPv4(192, 168, 2, 2),\n\t\t\t\tPreferredIP:         net.IPv4(192, 168, 2, 3),\n\t\t\t\tRpcAddress:          net.IPv4(192, 168, 2, 4),\n\t\t\t\tPeer:                net.IPv4(192, 168, 2, 5),\n\t\t\t\tListenAddress:       net.IPv4(192, 168, 2, 6),\n\t\t\t\tTokens:              []string{\"token-a\", \"token-b\", \"token-c\"},\n\t\t\t\tVersion:             version,\n\t\t\t\tPort:                9043,\n\t\t\t}\n\n\t\t\thost := builder.Build()\n\n\t\t\t// Verify all fields\n\t\t\tif host.WorkLoad() != builder.Workload {\n\t\t\t\tt.Errorf(\"WorkLoad() = %q, expected %q\", host.WorkLoad(), builder.Workload)\n\t\t\t}\n\t\t\tif host.HostID() != builder.HostId {\n\t\t\t\tt.Errorf(\"HostID() = %q, expected %q\", host.HostID(), builder.HostId)\n\t\t\t}\n\t\t\tif host.ClusterName() != builder.ClusterName {\n\t\t\t\tt.Errorf(\"ClusterName() = %q, expected %q\", host.ClusterName(), builder.ClusterName)\n\t\t\t}\n\t\t\tif host.Partitioner() != builder.Partitioner {\n\t\t\t\tt.Errorf(\"Partitioner() = %q, expected %q\", host.Partitioner(), builder.Partitioner)\n\t\t\t}\n\t\t\tif host.Rack() != builder.Rack {\n\t\t\t\tt.Errorf(\"Rack() = %q, expected %q\", host.Rack(), builder.Rack)\n\t\t\t}\n\t\t\tif host.DSEVersion() != builder.DseVersion {\n\t\t\t\tt.Errorf(\"DSEVersion() = %q, expected %q\", host.DSEVersion(), builder.DseVersion)\n\t\t\t}\n\t\t\tif host.DataCenter() != builder.DataCenter {\n\t\t\t\tt.Errorf(\"DataCenter() = %q, expected %q\", host.DataCenter(), builder.DataCenter)\n\t\t\t}\n\t\t\tif !host.UntranslatedConnectAddress().Equal(builder.ConnectAddress) {\n\t\t\t\tt.Errorf(\"UntranslatedConnectAddress() = %v, expected %v\", host.UntranslatedConnectAddress(), builder.ConnectAddress)\n\t\t\t}\n\t\t\tif !host.BroadcastAddress().Equal(builder.BroadcastAddress) {\n\t\t\t\tt.Errorf(\"BroadcastAddress() = %v, expected %v\", host.BroadcastAddress(), builder.BroadcastAddress)\n\t\t\t}\n\t\t\tif !host.PreferredIP().Equal(builder.PreferredIP) {\n\t\t\t\tt.Errorf(\"PreferredIP() = %v, expected %v\", host.PreferredIP(), builder.PreferredIP)\n\t\t\t}\n\t\t\tif !host.RPCAddress().Equal(builder.RpcAddress) {\n\t\t\t\tt.Errorf(\"RPCAddress() = %v, expected %v\", host.RPCAddress(), builder.RpcAddress)\n\t\t\t}\n\t\t\tif !host.Peer().Equal(builder.Peer) {\n\t\t\t\tt.Errorf(\"Peer() = %v, expected %v\", host.Peer(), builder.Peer)\n\t\t\t}\n\t\t\tif !host.ListenAddress().Equal(builder.ListenAddress) {\n\t\t\t\tt.Errorf(\"ListenAddress() = %v, expected %v\", host.ListenAddress(), builder.ListenAddress)\n\t\t\t}\n\t\t\tif host.Version() != builder.Version {\n\t\t\t\tt.Errorf(\"Version() = %+v, expected %+v\", host.Version(), builder.Version)\n\t\t\t}\n\t\t\tif host.Port() != builder.Port {\n\t\t\t\tt.Errorf(\"Port() = %d, expected %d\", host.Port(), builder.Port)\n\t\t\t}\n\t\t\tif len(host.Tokens()) != len(builder.Tokens) {\n\t\t\t\tt.Errorf(\"len(Tokens()) = %d, expected %d\", len(host.Tokens()), len(builder.Tokens))\n\t\t\t}\n\n\t\t\t// Verify ConnectAddress uses translated address\n\t\t\tif !host.ConnectAddress().Equal(translatedAddrs.CQL.Address) {\n\t\t\t\tt.Errorf(\"ConnectAddress() = %v, expected %v\", host.ConnectAddress(), translatedAddrs.CQL.Address)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"nil IP addresses\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuilder := HostInfoBuilder{\n\t\t\t\tConnectAddress:   nil,\n\t\t\t\tBroadcastAddress: nil,\n\t\t\t\tPreferredIP:      nil,\n\t\t\t\tRpcAddress:       nil,\n\t\t\t\tPeer:             nil,\n\t\t\t\tListenAddress:    nil,\n\t\t\t\tPort:             9042,\n\t\t\t}\n\n\t\t\thost := builder.Build()\n\n\t\t\tif host.UntranslatedConnectAddress() != nil {\n\t\t\t\tt.Errorf(\"UntranslatedConnectAddress() = %v, expected nil\", host.UntranslatedConnectAddress())\n\t\t\t}\n\t\t\tif host.BroadcastAddress() != nil {\n\t\t\t\tt.Errorf(\"BroadcastAddress() = %v, expected nil\", host.BroadcastAddress())\n\t\t\t}\n\t\t\tif host.PreferredIP() != nil {\n\t\t\t\tt.Errorf(\"PreferredIP() = %v, expected nil\", host.PreferredIP())\n\t\t\t}\n\t\t\tif host.RPCAddress() != nil {\n\t\t\t\tt.Errorf(\"RPCAddress() = %v, expected nil\", host.RPCAddress())\n\t\t\t}\n\t\t\tif host.Peer() != nil {\n\t\t\t\tt.Errorf(\"Peer() = %v, expected nil\", host.Peer())\n\t\t\t}\n\t\t\tif host.ListenAddress() != nil {\n\t\t\t\tt.Errorf(\"ListenAddress() = %v, expected nil\", host.ListenAddress())\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"connect address priority without translated addresses\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\t// Test that ConnectAddress follows the priority order when translated addresses are not set\n\t\t\ttests := []struct {\n\t\t\t\tname         string\n\t\t\t\tbuilder      HostInfoBuilder\n\t\t\t\texpectedAddr net.IP\n\t\t\t\tshouldPanic  bool\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tname: \"connectAddress takes priority\",\n\t\t\t\t\tbuilder: HostInfoBuilder{\n\t\t\t\t\t\tConnectAddress:   net.IPv4(1, 1, 1, 1),\n\t\t\t\t\t\tRpcAddress:       net.IPv4(2, 2, 2, 2),\n\t\t\t\t\t\tPreferredIP:      net.IPv4(3, 3, 3, 3),\n\t\t\t\t\t\tBroadcastAddress: net.IPv4(4, 4, 4, 4),\n\t\t\t\t\t\tPeer:             net.IPv4(5, 5, 5, 5),\n\t\t\t\t\t\tPort:             9042,\n\t\t\t\t\t},\n\t\t\t\t\texpectedAddr: net.IPv4(1, 1, 1, 1),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"rpcAddress when connectAddress is nil\",\n\t\t\t\t\tbuilder: HostInfoBuilder{\n\t\t\t\t\t\tRpcAddress:       net.IPv4(2, 2, 2, 2),\n\t\t\t\t\t\tPreferredIP:      net.IPv4(3, 3, 3, 3),\n\t\t\t\t\t\tBroadcastAddress: net.IPv4(4, 4, 4, 4),\n\t\t\t\t\t\tPeer:             net.IPv4(5, 5, 5, 5),\n\t\t\t\t\t\tPort:             9042,\n\t\t\t\t\t},\n\t\t\t\t\texpectedAddr: net.IPv4(2, 2, 2, 2),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"preferredIP when connectAddress and rpcAddress are nil\",\n\t\t\t\t\tbuilder: HostInfoBuilder{\n\t\t\t\t\t\tPreferredIP:      net.IPv4(3, 3, 3, 3),\n\t\t\t\t\t\tBroadcastAddress: net.IPv4(4, 4, 4, 4),\n\t\t\t\t\t\tPeer:             net.IPv4(5, 5, 5, 5),\n\t\t\t\t\t\tPort:             9042,\n\t\t\t\t\t},\n\t\t\t\t\texpectedAddr: net.IPv4(3, 3, 3, 3),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"broadcastAddress when others are nil\",\n\t\t\t\t\tbuilder: HostInfoBuilder{\n\t\t\t\t\t\tBroadcastAddress: net.IPv4(4, 4, 4, 4),\n\t\t\t\t\t\tPeer:             net.IPv4(5, 5, 5, 5),\n\t\t\t\t\t\tPort:             9042,\n\t\t\t\t\t},\n\t\t\t\t\texpectedAddr: net.IPv4(4, 4, 4, 4),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"peer when all others are nil\",\n\t\t\t\t\tbuilder: HostInfoBuilder{\n\t\t\t\t\t\tPeer: net.IPv4(5, 5, 5, 5),\n\t\t\t\t\t\tPort: 9042,\n\t\t\t\t\t},\n\t\t\t\t\texpectedAddr: net.IPv4(5, 5, 5, 5),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"no valid addresses panics\",\n\t\t\t\t\tbuilder: HostInfoBuilder{\n\t\t\t\t\t\tPort: 9042,\n\t\t\t\t\t},\n\t\t\t\t\tshouldPanic: true,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t\t\thost := test.builder.Build()\n\n\t\t\t\t\tif test.shouldPanic {\n\t\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t\tif r := recover(); r == nil {\n\t\t\t\t\t\t\t\tt.Error(\"ConnectAddress() should have panicked but did not\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}()\n\t\t\t\t\t}\n\n\t\t\t\t\taddr := host.ConnectAddress()\n\t\t\t\t\tif !test.shouldPanic && !addr.Equal(test.expectedAddr) {\n\t\t\t\t\t\tt.Errorf(\"ConnectAddress() = %v, expected %v\", addr, test.expectedAddr)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t})\n}\n"
  },
  {
    "path": "hostpolicy/hostpool.go",
    "content": "package hostpolicy\n\nimport (\n\t\"sync\"\n\n\t\"github.com/hailocab/go-hostpool\"\n\n\t\"github.com/gocql/gocql\"\n)\n\n// HostPool is a host policy which uses the bitly/go-hostpool library\n// to distribute queries between hosts and prevent sending queries to\n// unresponsive hosts. When creating the host pool that is passed to the policy\n// use an empty slice of hosts as the hostpool will be populated later by gocql.\n// See below for examples of usage:\n//\n//\t// Create host selection policy using a simple host pool\n//\tcluster.PoolConfig.HostSelectionPolicy = HostPool(hostpool.New(nil))\n//\n//\t// Create host selection policy using an epsilon greedy pool\n//\tcluster.PoolConfig.HostSelectionPolicy = HostPool(\n//\t    hostpool.NewEpsilonGreedy(nil, 0, &hostpool.LinearEpsilonValueCalculator{}),\n//\t)\n\nfunc HostPool(hp hostpool.HostPool) gocql.HostSelectionPolicy {\n\treturn &hostPoolHostPolicy{hostMap: map[string]*gocql.HostInfo{}, hp: hp}\n}\n\ntype hostPoolHostPolicy struct {\n\thp      hostpool.HostPool\n\thostMap map[string]*gocql.HostInfo\n\tmu      sync.RWMutex\n}\n\nfunc (r *hostPoolHostPolicy) Init(*gocql.Session)                       {}\nfunc (r *hostPoolHostPolicy) Reset()                                    {}\nfunc (r *hostPoolHostPolicy) IsOperational(*gocql.Session) error        { return nil }\nfunc (r *hostPoolHostPolicy) KeyspaceChanged(gocql.KeyspaceUpdateEvent) {}\nfunc (r *hostPoolHostPolicy) SetPartitioner(string)                     {}\nfunc (r *hostPoolHostPolicy) IsLocal(*gocql.HostInfo) bool              { return true }\n\nfunc (r *hostPoolHostPolicy) SetHosts(hosts []*gocql.HostInfo) {\n\tpeers := make([]string, len(hosts))\n\thostMap := make(map[string]*gocql.HostInfo, len(hosts))\n\n\tfor i, host := range hosts {\n\t\tip := host.ConnectAddress().String()\n\t\tpeers[i] = ip\n\t\thostMap[ip] = host\n\t}\n\n\tr.mu.Lock()\n\tr.hp.SetHosts(peers)\n\tr.hostMap = hostMap\n\tr.mu.Unlock()\n}\n\nfunc (r *hostPoolHostPolicy) AddHost(host *gocql.HostInfo) {\n\tip := host.ConnectAddress().String()\n\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\t// If the host addr is present and isn't nil return\n\tif h, ok := r.hostMap[ip]; ok && h != nil {\n\t\treturn\n\t}\n\t// otherwise, add the host to the map\n\tr.hostMap[ip] = host\n\t// and construct a new peer list to give to the HostPool\n\thosts := make([]string, 0, len(r.hostMap))\n\tfor addr := range r.hostMap {\n\t\thosts = append(hosts, addr)\n\t}\n\n\tr.hp.SetHosts(hosts)\n}\n\nfunc (r *hostPoolHostPolicy) RemoveHost(host *gocql.HostInfo) {\n\tip := host.ConnectAddress().String()\n\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif _, ok := r.hostMap[ip]; !ok {\n\t\treturn\n\t}\n\n\tdelete(r.hostMap, ip)\n\thosts := make([]string, 0, len(r.hostMap))\n\tfor _, host := range r.hostMap {\n\t\thosts = append(hosts, host.ConnectAddress().String())\n\t}\n\n\tr.hp.SetHosts(hosts)\n}\n\nfunc (r *hostPoolHostPolicy) HostUp(host *gocql.HostInfo) {\n\tr.AddHost(host)\n}\n\nfunc (r *hostPoolHostPolicy) HostDown(host *gocql.HostInfo) {\n\tr.RemoveHost(host)\n}\n\nfunc (r *hostPoolHostPolicy) Pick(qry gocql.ExecutableQuery) gocql.NextHost {\n\treturn func() gocql.SelectedHost {\n\t\tr.mu.RLock()\n\t\tdefer r.mu.RUnlock()\n\n\t\tif len(r.hostMap) == 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\thostR := r.hp.Get()\n\t\thost, ok := r.hostMap[hostR.Host()]\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn selectedHostPoolHost{\n\t\t\tpolicy: r,\n\t\t\tinfo:   host,\n\t\t\thostR:  hostR,\n\t\t}\n\t}\n}\n\n// selectedHostPoolHost is a host returned by the hostPoolHostPolicy and\n// implements the SelectedHost interface\ntype selectedHostPoolHost struct {\n\tpolicy *hostPoolHostPolicy\n\tinfo   *gocql.HostInfo\n\thostR  hostpool.HostPoolResponse\n}\n\nfunc (host selectedHostPoolHost) Info() *gocql.HostInfo {\n\treturn host.info\n}\n\nfunc (host selectedHostPoolHost) Token() gocql.Token {\n\treturn nil\n}\n\nfunc (host selectedHostPoolHost) Mark(err error) {\n\tip := host.info.ConnectAddress().String()\n\n\thost.policy.mu.RLock()\n\tdefer host.policy.mu.RUnlock()\n\n\tif _, ok := host.policy.hostMap[ip]; !ok {\n\t\t// host was removed between pick and mark\n\t\treturn\n\t}\n\n\thost.hostR.Mark(err)\n}\n"
  },
  {
    "path": "hostpolicy/hostpool_test.go",
    "content": "package hostpolicy\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"testing\"\n\n\t\"github.com/hailocab/go-hostpool\"\n\n\t\"github.com/gocql/gocql\"\n)\n\n// Tests of the host pool host selection policy implementation\nfunc TestHostPolicy_HostPool(t *testing.T) {\n\tpolicy := HostPool(hostpool.New(nil))\n\n\t//hosts := []*gocql.HostInfo{\n\t//\t{hostId: \"0\", connectAddress: net.IPv4(10, 0, 0, 0)},\n\t//\t{hostId: \"1\", connectAddress: net.IPv4(10, 0, 0, 1)},\n\t//}\n\tfirstHost := gocql.HostInfoBuilder{\n\t\tHostId:         \"a0000000-0000-0000-0000-000000000000\",\n\t\tConnectAddress: net.IPv4(10, 0, 0, 0),\n\t}.Build()\n\tsecHost := gocql.HostInfoBuilder{\n\t\tHostId:         \"a0000000-0000-0000-0000-000000000001\",\n\t\tConnectAddress: net.IPv4(10, 0, 0, 1),\n\t}.Build()\n\thosts := []*gocql.HostInfo{&firstHost, &secHost}\n\t// Using set host to control the ordering of the hosts as calling \"AddHost\" iterates the map\n\t// which will result in an unpredictable ordering\n\tpolicy.(*hostPoolHostPolicy).SetHosts(hosts)\n\n\t// the first host selected is actually at [1], but this is ok for RR\n\t// interleaved iteration should always increment the host\n\titer := policy.Pick(nil)\n\tactualA := iter()\n\tif actualA.Info().HostID() != \"a0000000-0000-0000-0000-000000000000\" {\n\t\tt.Errorf(\"Expected hosts[0] but was hosts[%s]\", actualA.Info().HostID())\n\t}\n\tactualA.Mark(nil)\n\n\tactualB := iter()\n\tif actualB.Info().HostID() != \"a0000000-0000-0000-0000-000000000001\" {\n\t\tt.Errorf(\"Expected hosts[1] but was hosts[%s]\", actualB.Info().HostID())\n\t}\n\tactualB.Mark(fmt.Errorf(\"error\"))\n\n\tactualC := iter()\n\tif actualC.Info().HostID() != \"a0000000-0000-0000-0000-000000000000\" {\n\t\tt.Errorf(\"Expected hosts[0] but was hosts[%s]\", actualC.Info().HostID())\n\t}\n\tactualC.Mark(nil)\n\n\tactualD := iter()\n\tif actualD.Info().HostID() != \"a0000000-0000-0000-0000-000000000000\" {\n\t\tt.Errorf(\"Expected hosts[0] but was hosts[%s]\", actualD.Info().HostID())\n\t}\n\tactualD.Mark(nil)\n}\n"
  },
  {
    "path": "install_test_deps.sh",
    "content": ""
  },
  {
    "path": "integration.sh",
    "content": "#!/bin/bash\n#\n# Copyright (C) 2017 ScyllaDB\n#\n\nreadonly SCYLLA_IMAGE=${SCYLLA_IMAGE}\n\nset -eu -o pipefail\n\nfunction scylla_up() {\n  local -r exec=\"docker compose exec -T\"\n\n  echo \"==> Running Scylla ${SCYLLA_IMAGE}\"\n  docker pull ${SCYLLA_IMAGE}\n  docker compose up -d --wait || ( docker compose ps --format json | jq -M 'select(.Health == \"unhealthy\") | .Service' | xargs docker compose logs; exit 1 )\n}\n\nfunction scylla_down() {\n  echo \"==> Stopping Scylla\"\n  docker compose down\n}\n\nfunction scylla_restart() {\n  scylla_down\n  scylla_up\n}\n\nscylla_restart\n\nsudo chmod 0777 /tmp/scylla_node_1/cql.m\nsudo chmod 0777 /tmp/scylla_node_2/cql.m\nsudo chmod 0777 /tmp/scylla_node_3/cql.m\n\nreadonly clusterSize=3\nreadonly scylla_liveset=\"192.168.100.11,192.168.100.12,192.168.100.13\"\nreadonly cversion=\"3.11.4\"\nreadonly proto=4\nreadonly args=\"-cluster-socket /tmp/scylla_node_1/cql.m -gocql.timeout=60s -proto=${proto} -rf=1 -clusterSize=${clusterSize} -autowait=2000ms -compressor=snappy -gocql.cversion=${cversion} -cluster=${scylla_liveset}\"\n\nTAGS=$*\n\nif [ ! -z \"$TAGS\" ];\nthen\n\techo \"==> Running ${TAGS} tests with args: ${args}\"\n\tgo test -v -timeout=5m -race -tags=\"$TAGS\" ${args} ./...\nfi\n"
  },
  {
    "path": "integration_only.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\n// This file contains code to enable easy access to driver internals\n// To be used only for integration test\n\nimport \"fmt\"\n\nfunc (p *policyConnPool) MissingConnections() (int, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\ttotal := 0\n\n\t// close the pools\n\tfor _, pool := range p.hostConnPools {\n\t\tmissing := pool.GetShardCount() - pool.GetConnectionCount()\n\t\tif pool.IsClosed() {\n\t\t\treturn 0, fmt.Errorf(\"pool for %s is closed\", pool.host.HostID())\n\t\t}\n\t\ttotal += missing\n\t}\n\treturn total, nil\n}\n\nfunc (s *Session) MissingConnections() (int, error) {\n\tif s.pool == nil {\n\t\treturn 0, fmt.Errorf(\"pool is nil\")\n\t}\n\treturn s.pool.MissingConnections()\n}\n\ntype ConnPickerIntegration interface {\n\tPick(Token, ExecutableQuery) *Conn\n\tPut(*Conn) error\n\tRemove(conn *Conn)\n\tInFlight() int\n\tSize() (int, int)\n\tClose()\n\tCloseAllConnections()\n\n\t// NextShard returns the shardID to connect to.\n\t// nrShard specifies how many shards the host has.\n\t// If nrShards is zero, the caller shouldn't use shard-aware port.\n\tNextShard() (shardID, nrShards int)\n}\n\nfunc (p *scyllaConnPicker) CloseAllConnections() {\n\tp.nrConns = 0\n\tcloseConns(p.conns...)\n\tfor id := range p.conns {\n\t\tp.conns[id] = nil\n\t}\n}\n\nfunc (p *defaultConnPicker) CloseAllConnections() {\n\tcloseConns(p.conns...)\n\tp.conns = p.conns[:0]\n}\n\nfunc (p *nopConnPicker) CloseAllConnections() {\n}\n\nfunc (pool *hostConnPool) CloseAllConnections() {\n\tif !pool.closed {\n\t\treturn\n\t}\n\tpool.mu.Lock()\n\tprintln(\"Closing all connections in a pool\")\n\tpool.connPicker.(ConnPickerIntegration).CloseAllConnections()\n\tprintln(\"Filling the pool\")\n\tpool.mu.Unlock()\n\tpool.fill()\n}\n\nfunc (p *policyConnPool) CloseAllConnections() {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\t// close the pools\n\tfor _, pool := range p.hostConnPools {\n\t\tpool.CloseAllConnections()\n\t}\n}\n\nfunc (s *Session) CloseAllConnections() {\n\tif s.pool != nil {\n\t\ts.pool.CloseAllConnections()\n\t}\n}\n"
  },
  {
    "path": "integration_serialization_scylla_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"gopkg.in/inf.v0\"\n\n\t\"github.com/gocql/gocql/internal/tests/serialization/valcases\"\n)\n\nfunc TestSerializationSimpleTypesCassandra(t *testing.T) {\n\tt.Parallel()\n\n\tconst (\n\t\tpkColumn   = \"test_id\"\n\t\ttestColumn = \"test_col\"\n\t)\n\n\ttypeCases := valcases.GetSimple()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\t//Checks data and values conversion\n\tt.Run(\"Marshal\", func(t *testing.T) {\n\t\tfor _, tc := range typeCases {\n\t\t\tcheckTypeMarshal(t, tc)\n\t\t}\n\t})\n\n\tt.Run(\"Unmarshal\", func(t *testing.T) {\n\t\tfor _, tc := range typeCases {\n\t\t\tcheckTypeUnmarshal(t, tc)\n\t\t}\n\t})\n\n\t//Create are tables\n\ttables := make([]string, len(typeCases))\n\tfor i, tc := range typeCases {\n\t\ttable := testTableName(t, tc.CQLName)\n\n\t\tstmt := fmt.Sprintf(`CREATE TABLE %s (%s text, %s %s, PRIMARY KEY (test_id))`, table, pkColumn, testColumn, tc.CQLName)\n\t\tif err := createTable(session, stmt); err != nil {\n\t\t\tt.Fatalf(\"failed to create table for cqltype (%s) with error '%v'\", tc.CQLName, err)\n\t\t}\n\t\ttables[i] = table\n\t}\n\n\t//Check Insert and Select are values\n\tt.Run(\"InsertSelect\", func(t *testing.T) {\n\t\tfor i, tc := range typeCases {\n\t\t\tinsertStmt := fmt.Sprintf(\"INSERT INTO %s (%s, %s) VALUES(?, ?)\", tables[i], pkColumn, testColumn)\n\t\t\tselectStmt := fmt.Sprintf(\"SELECT %s FROM %s WHERE %s = ?\", testColumn, tables[i], pkColumn)\n\n\t\t\tcheckTypeInsertSelect(t, session, insertStmt, selectStmt, tc)\n\t\t}\n\t})\n}\n\nfunc checkTypeMarshal(t *testing.T, tc valcases.SimpleTypeCases) {\n\tcqlName := tc.CQLName\n\tt.Run(cqlName, func(t *testing.T) {\n\t\ttp := Type(tc.CQLType)\n\t\tcqlType := NewNativeType(4, tp)\n\n\t\tfor _, valCase := range tc.Cases {\n\t\t\tfor _, langCase := range valCase.LangCases {\n\t\t\t\treceivedData, err := Marshal(cqlType, langCase.Value)\n\n\t\t\t\tif !langCase.ErrInsert && err != nil {\n\t\t\t\t\tt.Errorf(\"failed to marshal case (%s)(%s) value (%T) with error '%v'\", valCase.Name, langCase.LangType, langCase.Value, err)\n\t\t\t\t} else if langCase.ErrInsert && err == nil {\n\t\t\t\t\tt.Errorf(\"expected an error on marshal case (%s)(%s) value (%T)(%[2]v), but have no error\", valCase.Name, langCase.LangType, langCase.Value)\n\t\t\t\t} else if !bytes.Equal(valCase.Data, receivedData) {\n\t\t\t\t\tt.Errorf(\"failed to equal case (%s)(%s) data: expected %d, got %d\", valCase.Name, langCase.LangType, valCase.Data, receivedData)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc checkTypeUnmarshal(t *testing.T, tc valcases.SimpleTypeCases) {\n\tcqlName := tc.CQLName\n\tt.Run(cqlName, func(t *testing.T) {\n\t\ttp := Type(tc.CQLType)\n\t\tcqlType := NewNativeType(4, tp)\n\n\t\tfor _, valCase := range tc.Cases {\n\t\t\tfor _, langCase := range valCase.LangCases {\n\t\t\t\treceived := newRef(langCase.Value)\n\n\t\t\t\terr := Unmarshal(cqlType, valCase.Data, received)\n\t\t\t\tif !langCase.ErrSelect && err != nil {\n\t\t\t\t\tt.Errorf(\"failed to unmarshal case (%s)(%s) value (%T) with error '%v'\", valCase.Name, langCase.LangType, langCase.Value, err)\n\t\t\t\t}\n\t\t\t\tif langCase.ErrSelect && err == nil {\n\t\t\t\t\tt.Errorf(\"expected an error on unmarshal case (%s)(%s) value (%T)(%[2]v), but have no error\", valCase.Name, langCase.LangType, langCase.Value)\n\t\t\t\t}\n\t\t\t\treceived = deReference(received)\n\t\t\t\tif !equalVals(langCase.Value, received) {\n\t\t\t\t\tt.Errorf(\"failed to equal case (%s)(%s) value: expected %d, got %d\", valCase.Name, langCase.LangType, langCase.Value, received)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc checkTypeInsertSelect(t *testing.T, session *Session, insertStmt, selectStmt string, tc valcases.SimpleTypeCases) {\n\tcqlName := tc.CQLName\n\tt.Run(cqlName, func(t *testing.T) {\n\t\ttp := Type(tc.CQLType)\n\t\tcqlType := NewNativeType(4, tp)\n\n\t\tfor _, valCase := range tc.Cases {\n\t\t\tvalCaseName := valCase.Name\n\n\t\t\tfor _, langCase := range valCase.LangCases {\n\t\t\t\tvar insertedValue any\n\t\t\t\t//Check Insert value as values\n\t\t\t\tinsertedValue = langCase.Value\n\t\t\t\terr := session.Query(insertStmt, valCaseName, insertedValue).Exec()\n\t\t\t\tif !langCase.ErrInsert && err != nil {\n\t\t\t\t\tt.Errorf(\"failed to insert case (%s) value (%T)(%[2]v) with error '%v'\", valCaseName, insertedValue, err)\n\t\t\t\t} else if langCase.ErrInsert && err == nil {\n\t\t\t\t\tt.Errorf(\"expected an error on insert case (%s) value (%T)(%[2]v), but have no error\", valCaseName, insertedValue, err)\n\t\t\t\t}\n\n\t\t\t\t//Check Select value as value\n\t\t\t\tselectedValue := newRef(langCase.Value)\n\t\t\t\terr = session.Query(selectStmt, valCase.Name).Scan(selectedValue)\n\t\t\t\tif !langCase.ErrSelect && err != nil {\n\t\t\t\t\tt.Errorf(\"failed to select case (%s) value (%T) with error '%v'\", valCaseName, selectedValue, err)\n\t\t\t\t} else if langCase.ErrSelect && err == nil {\n\t\t\t\t\tt.Errorf(\"expected an error on select case (%s) value (%T)(%[2]v), but have no error\", valCaseName, selectedValue)\n\t\t\t\t}\n\t\t\t\tselectedValue = deReference(selectedValue)\n\t\t\t\tif !equalVals(langCase.Value, selectedValue) {\n\t\t\t\t\tt.Errorf(\"failed to equal case (%s) value: expected: %d, got: %d\", valCaseName, langCase.Value, selectedValue)\n\t\t\t\t}\n\n\t\t\t\t//Check Select value as bytes\n\t\t\t\tselectedValue = &DirectUnmarshal{}\n\t\t\t\terr = session.Query(selectStmt, valCase.Name).Scan(selectedValue)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to select case (%s) value (%T) for cqltype (%s) with error '%v'\", valCaseName, selectedValue, cqlType, err)\n\t\t\t\t}\n\t\t\t\tselectedValue = *(*[]byte)(selectedValue.(*DirectUnmarshal))\n\t\t\t\tif !equalVals(valCase.Data, selectedValue) {\n\t\t\t\t\tt.Errorf(\"failed to equal case (%s) value for cqltype (%s): expected: %d, got: %d\", valCaseName, cqlType, valCase.Data, selectedValue)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n}\n\n// newRef returns the nil reference to the input type value (*type)(nil)\nfunc newRef(in any) any {\n\tout := reflect.New(reflect.TypeOf(in)).Interface()\n\treturn out\n}\n\nfunc deReference(in any) any {\n\treturn reflect.Indirect(reflect.ValueOf(in)).Interface()\n}\n\nfunc equalVals(in1, in2 any) bool {\n\trin1 := reflect.ValueOf(in1)\n\trin2 := reflect.ValueOf(in2)\n\tif rin1.Kind() != rin2.Kind() {\n\t\treturn false\n\t}\n\tif rin1.Kind() == reflect.Ptr && (rin1.IsNil() || rin2.IsNil()) {\n\t\treturn rin1.IsNil() && rin2.IsNil()\n\t}\n\n\tswitch vin1 := in1.(type) {\n\tcase float32:\n\t\tvin2 := in2.(float32)\n\t\treturn *(*[4]byte)(unsafe.Pointer(&vin1)) == *(*[4]byte)(unsafe.Pointer(&vin2))\n\tcase *float32:\n\t\tvin2 := in2.(*float32)\n\t\treturn *(*[4]byte)(unsafe.Pointer(vin1)) == *(*[4]byte)(unsafe.Pointer(vin2))\n\tcase float64:\n\t\tvin2 := in2.(float64)\n\t\treturn *(*[8]byte)(unsafe.Pointer(&vin1)) == *(*[8]byte)(unsafe.Pointer(&vin2))\n\tcase *float64:\n\t\tvin2 := in2.(*float64)\n\t\treturn *(*[8]byte)(unsafe.Pointer(vin1)) == *(*[8]byte)(unsafe.Pointer(vin2))\n\tcase big.Int:\n\t\tvin2 := in2.(big.Int)\n\t\treturn vin1.Cmp(&vin2) == 0\n\tcase *big.Int:\n\t\tvin2 := in2.(*big.Int)\n\t\treturn vin1.Cmp(vin2) == 0\n\tcase inf.Dec:\n\t\tvin2 := in2.(inf.Dec)\n\t\tif vin1.Scale() != vin2.Scale() {\n\t\t\treturn false\n\t\t}\n\t\treturn vin1.UnscaledBig().Cmp(vin2.UnscaledBig()) == 0\n\tcase *inf.Dec:\n\t\tvin2 := in2.(*inf.Dec)\n\t\tif vin1.Scale() != vin2.Scale() {\n\t\t\treturn false\n\t\t}\n\t\treturn vin1.UnscaledBig().Cmp(vin2.UnscaledBig()) == 0\n\tcase fmt.Stringer:\n\t\tvin2 := in2.(fmt.Stringer)\n\t\treturn vin1.String() == vin2.String()\n\tdefault:\n\t\treturn reflect.DeepEqual(in1, in2)\n\t}\n}\n\n// SliceMapTypesTestCase defines a test case for validating SliceMap/MapScan behavior\ntype SliceMapTypesTestCase struct {\n\tCQLType           string\n\tCQLValue          string // Non-NULL value to insert\n\tExpectedValue     any    // Expected value for non-NULL case\n\tExpectedNullValue any    // Expected value for NULL\n}\n\n// compareCollectionValues compares collection values (lists, sets, maps) with special handling\nfunc compareCollectionValues(t *testing.T, cqlType string, expected, actual any) bool {\n\tswitch {\n\tcase strings.HasPrefix(cqlType, \"set<\"):\n\t\t// Sets are returned as slices, but order is not guaranteed\n\t\texpectedSlice := reflect.ValueOf(expected)\n\t\tactualSlice := reflect.ValueOf(actual)\n\t\tif expectedSlice.Kind() != reflect.Slice || actualSlice.Kind() != reflect.Slice {\n\t\t\treturn false\n\t\t}\n\t\tif expectedSlice.Len() != actualSlice.Len() {\n\t\t\treturn false\n\t\t}\n\n\t\t// Convert to maps for unordered comparison\n\t\texpectedSet := make(map[any]bool)\n\t\tfor i := 0; i < expectedSlice.Len(); i++ {\n\t\t\texpectedSet[expectedSlice.Index(i).Interface()] = true\n\t\t}\n\n\t\tactualSet := make(map[any]bool)\n\t\tfor i := 0; i < actualSlice.Len(); i++ {\n\t\t\tactualSet[actualSlice.Index(i).Interface()] = true\n\t\t}\n\n\t\treturn reflect.DeepEqual(expectedSet, actualSet)\n\n\tdefault:\n\t\t// For lists, maps, and other collections, reflect.DeepEqual works fine\n\t\treturn reflect.DeepEqual(expected, actual)\n\t}\n}\n\n// compareValues compares expected and actual values with type-specific logic\nfunc compareValues(t *testing.T, cqlType string, expected, actual any) bool {\n\tswitch cqlType {\n\tcase \"varint\":\n\t\t// big.Int needs Cmp() for proper comparison, but handle nil pointers safely\n\t\tif expectedBig, ok := expected.(*big.Int); ok {\n\t\t\tif actualBig, ok := actual.(*big.Int); ok {\n\t\t\t\t// Handle nil cases\n\t\t\t\tif expectedBig == nil && actualBig == nil {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\tif expectedBig == nil || actualBig == nil {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\treturn expectedBig.Cmp(actualBig) == 0\n\t\t\t}\n\t\t}\n\t\treturn reflect.DeepEqual(expected, actual)\n\n\tcase \"decimal\":\n\t\t// inf.Dec needs Cmp() for proper comparison, but handle nil pointers safely\n\t\tif expectedDec, ok := expected.(*inf.Dec); ok {\n\t\t\tif actualDec, ok := actual.(*inf.Dec); ok {\n\t\t\t\t// Handle nil cases\n\t\t\t\tif expectedDec == nil && actualDec == nil {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\tif expectedDec == nil || actualDec == nil {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\treturn expectedDec.Cmp(actualDec) == 0\n\t\t\t}\n\t\t}\n\t\treturn reflect.DeepEqual(expected, actual)\n\n\tdefault:\n\t\t// reflect.DeepEqual handles nil vs empty slice/map distinction correctly for all types\n\t\t// including inet (net.IP), blob ([]byte), collections ([]T, map[K]V), etc.\n\t\t// This is critical for catching zero value behavior changes in the driver\n\t\treturn reflect.DeepEqual(expected, actual)\n\t}\n}\n\n// TestSliceMapMapScanTypes tests SliceMap and MapScan with various CQL types\nfunc TestSliceMapMapScanTypes(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\ttableCQL := fmt.Sprintf(`\n\t\tCREATE TABLE IF NOT EXISTS gocql_test.%s (\n\t\t\tid int PRIMARY KEY,\n\t\t\ttinyint_col tinyint,\n\t\t\tsmallint_col smallint,\n\t\t\tint_col int,\n\t\t\tbigint_col bigint,\n\t\t\tfloat_col float,\n\t\t\tdouble_col double,\n\t\t\tboolean_col boolean,\n\t\t\ttext_col text,\n\t\t\tascii_col ascii,\n\t\t\tvarchar_col varchar,\n\t\t\ttimestamp_col timestamp,\n\t\t\tuuid_col uuid,\n\t\t\ttimeuuid_col timeuuid,\n\t\t\tinet_col inet,\n\t\t\tblob_col blob,\n\t\t\tvarint_col varint,\n\t\t\tdecimal_col decimal,\n\t\t\tdate_col date,\n\t\t\ttime_col time,\n\t\t\tduration_col duration\n\t\t)`, table)\n\n\tif err := createTable(session, tableCQL); err != nil {\n\t\tt.Fatal(\"Failed to create test table:\", err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(\"TRUNCATE gocql_test.%s\", table)).Exec(); err != nil {\n\t\tt.Fatal(\"Failed to truncate test table:\", err)\n\t}\n\n\ttestCases := []SliceMapTypesTestCase{\n\t\t{\"tinyint\", \"42\", int8(42), int8(0)},\n\t\t{\"smallint\", \"1234\", int16(1234), int16(0)},\n\t\t{\"int\", \"123456\", int(123456), int(0)},\n\t\t{\"bigint\", \"1234567890\", int64(1234567890), int64(0)},\n\t\t{\"float\", \"3.14\", float32(3.14), float32(0)},\n\t\t{\"double\", \"2.718281828\", float64(2.718281828), float64(0)},\n\t\t{\"boolean\", \"true\", true, false},\n\t\t{\"text\", \"'hello world'\", \"hello world\", \"\"},\n\t\t{\"ascii\", \"'hello ascii'\", \"hello ascii\", \"\"},\n\t\t{\"varchar\", \"'hello varchar'\", \"hello varchar\", \"\"},\n\t\t{\"timestamp\", \"1388534400000\", time.Unix(1388534400, 0).UTC(), time.Time{}},\n\t\t{\"uuid\", \"550e8400-e29b-41d4-a716-446655440000\", mustParseUUID(\"550e8400-e29b-41d4-a716-446655440000\"), UUID{}},\n\t\t{\"timeuuid\", \"60d79c23-5793-11f0-8afe-bcfce78b517a\", mustParseUUID(\"60d79c23-5793-11f0-8afe-bcfce78b517a\"), UUID{}},\n\t\t{\"inet\", \"'127.0.0.1'\", \"127.0.0.1\", \"\"},\n\t\t{\"blob\", \"0x48656c6c6f\", []byte(\"Hello\"), []byte(nil)},\n\t\t{\"varint\", \"123456789012345678901234567890\", mustParseBigInt(\"123456789012345678901234567890\"), (*big.Int)(nil)},\n\t\t{\"decimal\", \"123.45\", mustParseDecimal(\"123.45\"), (*inf.Dec)(nil)},\n\t\t{\"date\", \"'2015-05-03'\", time.Date(2015, 5, 3, 0, 0, 0, 0, time.UTC), time.Date(-5877641, 06, 23, 0, 0, 0, 0, time.UTC)},\n\t\t{\"time\", \"'13:30:54.234'\", 13*time.Hour + 30*time.Minute + 54*time.Second + 234*time.Millisecond, time.Duration(0)},\n\t\t{\"duration\", \"1y2mo3d4h5m6s789ms\", mustCreateDuration(14, 3, 4*time.Hour+5*time.Minute+6*time.Second+789*time.Millisecond), Duration{}},\n\t}\n\n\tfor i, tc := range testCases {\n\t\tt.Run(tc.CQLType, func(t *testing.T) {\n\t\t\ttestSliceMapMapScanSimple(t, session, tc, i, table)\n\t\t})\n\t}\n}\n\n// Simplified test function that tests both SliceMap and MapScan with both NULL and non-NULL values\nfunc testSliceMapMapScanSimple(t *testing.T, session *Session, tc SliceMapTypesTestCase, id int, table string) {\n\tcolName := tc.CQLType + \"_col\"\n\n\tt.Run(\"NonNull\", func(t *testing.T) {\n\t\tinsertQuery := fmt.Sprintf(\"INSERT INTO gocql_test.%s (id, %s) VALUES (?, %s)\", table, colName, tc.CQLValue)\n\t\tif err := session.Query(insertQuery, id*2).Exec(); err != nil {\n\t\t\tt.Fatalf(\"Failed to insert non-NULL value: %v\", err)\n\t\t}\n\n\t\tfor _, method := range []string{\"SliceMap\", \"MapScan\"} {\n\t\t\tt.Run(method, func(t *testing.T) {\n\t\t\t\tresult := queryAndExtractValue(t, session, colName, id*2, method, table)\n\t\t\t\tvalidateResult(t, tc.CQLType, tc.ExpectedValue, result, method, \"non-NULL\")\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"Null\", func(t *testing.T) {\n\t\tinsertQuery := fmt.Sprintf(\"INSERT INTO gocql_test.%s (id, %s) VALUES (?, NULL)\", table, colName)\n\t\tif err := session.Query(insertQuery, id*2+1).Exec(); err != nil {\n\t\t\tt.Fatalf(\"Failed to insert NULL value: %v\", err)\n\t\t}\n\n\t\t// Test both SliceMap and MapScan\n\t\tfor _, method := range []string{\"SliceMap\", \"MapScan\"} {\n\t\t\tt.Run(method, func(t *testing.T) {\n\t\t\t\tresult := queryAndExtractValue(t, session, colName, id*2+1, method, table)\n\t\t\t\tvalidateResult(t, tc.CQLType, tc.ExpectedNullValue, result, method, \"NULL\")\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc queryAndExtractValue(t *testing.T, session *Session, colName string, id int, method string, table string) any {\n\tfmt.Println(\"queryAndExtractValue\")\n\tselectQuery := fmt.Sprintf(\"SELECT %s FROM gocql_test.%s WHERE id = ?\", colName, table)\n\n\tswitch method {\n\tcase \"SliceMap\":\n\t\titer := session.Query(selectQuery, id).Iter()\n\t\tsliceResults, err := iter.SliceMap()\n\t\tfmt.Println(\"Slice results: \", sliceResults[0][colName])\n\t\titer.Close()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"SliceMap failed: %v\", err)\n\t\t}\n\t\tif len(sliceResults) != 1 {\n\t\t\tt.Fatalf(\"Expected 1 result, got %d\", len(sliceResults))\n\t\t}\n\t\treturn sliceResults[0][colName]\n\n\tcase \"MapScan\":\n\t\tmapResult := make(map[string]any)\n\t\tif err := session.Query(selectQuery, id).MapScan(mapResult); err != nil {\n\t\t\tt.Fatalf(\"MapScan failed: %v\", err)\n\t\t}\n\t\treturn mapResult[colName]\n\n\tdefault:\n\t\tt.Fatalf(\"Unknown method: %s\", method)\n\t\treturn nil\n\t}\n}\n\nfunc validateResult(t *testing.T, cqlType string, expected, actual any, method, valueType string) {\n\tif expected != nil && actual != nil {\n\t\texpectedType := reflect.TypeOf(expected)\n\t\tactualType := reflect.TypeOf(actual)\n\t\tif expectedType != actualType {\n\t\t\tt.Errorf(\"%s %s %s: expected type %v, got %v\", method, valueType, cqlType, expectedType, actualType)\n\t\t}\n\t}\n\n\tif !compareValues(t, cqlType, expected, actual) {\n\t\tt.Errorf(\"%s %s %s: expected value %v (type %T), got %v (type %T)\",\n\t\t\tmethod, valueType, cqlType, expected, expected, actual, actual)\n\t}\n}\n\nfunc mustParseUUID(s string) UUID {\n\tu, err := ParseUUID(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn u\n}\n\nfunc mustParseBigInt(s string) *big.Int {\n\ti := new(big.Int)\n\tif _, ok := i.SetString(s, 10); !ok {\n\t\tpanic(\"failed to parse big.Int: \" + s)\n\t}\n\treturn i\n}\n\nfunc mustParseDecimal(s string) *inf.Dec {\n\tdec := new(inf.Dec)\n\tif _, ok := dec.SetString(s); !ok {\n\t\tpanic(\"failed to parse inf.Dec: \" + s)\n\t}\n\treturn dec\n}\n\nfunc mustCreateDuration(months int32, days int32, timeDuration time.Duration) Duration {\n\treturn Duration{\n\t\tMonths:      months,\n\t\tDays:        days,\n\t\tNanoseconds: timeDuration.Nanoseconds(),\n\t}\n}\n\n// TestSliceMapMapScanCounterTypes tests counter types separately since they have special restrictions\n// (counter columns can't be mixed with other column types in the same table)\nfunc TestSliceMapMapScanCounterTypes(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSessionFromClusterTabletsDisabled(createCluster(), t)\n\tdefer session.Close()\n\n\t// Create separate table for counter types\n\ttable := testTableName(t)\n\tif err := createTable(session, fmt.Sprintf(`\n\t\tCREATE TABLE IF NOT EXISTS gocql_test_tablets_disabled.%s (\n\t\t\tid int PRIMARY KEY,\n\t\t\tcounter_col counter\n\t\t)\n\t`, table)); err != nil {\n\t\tt.Fatal(\"Failed to create counter test table:\", err)\n\t}\n\n\t// Clear existing data\n\tif err := session.Query(fmt.Sprintf(\"TRUNCATE gocql_test_tablets_disabled.%s\", table)).Exec(); err != nil {\n\t\tt.Fatal(\"Failed to truncate counter test table:\", err)\n\t}\n\n\ttestID := 1\n\texpectedValue := int64(42)\n\n\t// Increment counter (can't INSERT into counter, must UPDATE)\n\terr := session.Query(fmt.Sprintf(\"UPDATE gocql_test_tablets_disabled.%s SET counter_col = counter_col + 42 WHERE id = ?\", table), testID).Exec()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to increment counter: %v\", err)\n\t}\n\n\t// Test both SliceMap and MapScan\n\tfor _, method := range []string{\"SliceMap\", \"MapScan\"} {\n\t\tt.Run(method, func(t *testing.T) {\n\t\t\tvar result any\n\n\t\t\tselectQuery := fmt.Sprintf(\"SELECT counter_col FROM gocql_test_tablets_disabled.%s WHERE id = ?\", table)\n\t\t\tif method == \"SliceMap\" {\n\t\t\t\titer := session.Query(selectQuery, testID).Iter()\n\t\t\t\tsliceResults, err := iter.SliceMap()\n\t\t\t\titer.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"SliceMap failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tif len(sliceResults) != 1 {\n\t\t\t\t\tt.Fatalf(\"Expected 1 result, got %d\", len(sliceResults))\n\t\t\t\t}\n\t\t\t\tresult = sliceResults[0][\"counter_col\"]\n\t\t\t} else {\n\t\t\t\tmapResult := make(map[string]any)\n\t\t\t\tif err := session.Query(selectQuery, testID).MapScan(mapResult); err != nil {\n\t\t\t\t\tt.Fatalf(\"MapScan failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tresult = mapResult[\"counter_col\"]\n\t\t\t}\n\n\t\t\tvalidateResult(t, \"counter\", expectedValue, result, method, \"incremented\")\n\t\t})\n\t}\n}\n\n// TestSliceMapMapScanTupleTypes tests tuple types separately since they have special handling\n// (tuple elements get split into individual columns)\nfunc TestSliceMapMapScanTupleTypes(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\t// Create test table with tuple column\n\ttable := testTableName(t)\n\tif err := createTable(session, fmt.Sprintf(`\n\t\tCREATE TABLE IF NOT EXISTS gocql_test.%s (\n\t\t\tid int PRIMARY KEY,\n\t\t\ttuple_col tuple<int, text>\n\t\t)\n\t`, table)); err != nil {\n\t\tt.Fatal(\"Failed to create tuple test table:\", err)\n\t}\n\n\t// Clear existing data\n\tif err := session.Query(fmt.Sprintf(\"TRUNCATE gocql_test.%s\", table)).Exec(); err != nil {\n\t\tt.Fatal(\"Failed to truncate tuple test table:\", err)\n\t}\n\n\t// Test non-NULL tuple\n\tt.Run(\"NonNull\", func(t *testing.T) {\n\t\ttestID := 1\n\t\t// Insert tuple value\n\t\terr := session.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s (id, tuple_col) VALUES (?, (42, 'hello'))\", table), testID).Exec()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to insert tuple value: %v\", err)\n\t\t}\n\n\t\t// Test both SliceMap and MapScan\n\t\tfor _, method := range []string{\"SliceMap\", \"MapScan\"} {\n\t\t\tt.Run(method, func(t *testing.T) {\n\t\t\t\tvar result map[string]any\n\n\t\t\t\tselectQuery := fmt.Sprintf(\"SELECT tuple_col FROM gocql_test.%s WHERE id = ?\", table)\n\t\t\t\tif method == \"SliceMap\" {\n\t\t\t\t\titer := session.Query(selectQuery, testID).Iter()\n\t\t\t\t\tsliceResults, err := iter.SliceMap()\n\t\t\t\t\titer.Close()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"SliceMap failed: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tif len(sliceResults) != 1 {\n\t\t\t\t\t\tt.Fatalf(\"Expected 1 result, got %d\", len(sliceResults))\n\t\t\t\t\t}\n\t\t\t\t\tresult = sliceResults[0]\n\t\t\t\t} else {\n\t\t\t\t\tresult = make(map[string]any)\n\t\t\t\t\tif err := session.Query(selectQuery, testID).MapScan(result); err != nil {\n\t\t\t\t\t\tt.Fatalf(\"MapScan failed: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Check tuple elements (tuples get split into individual columns)\n\t\t\t\telem0Key := TupleColumnName(\"tuple_col\", 0)\n\t\t\t\telem1Key := TupleColumnName(\"tuple_col\", 1)\n\n\t\t\t\tif result[elem0Key] != 42 {\n\t\t\t\t\tt.Errorf(\"%s tuple[0]: expected 42, got %v\", method, result[elem0Key])\n\t\t\t\t}\n\t\t\t\tif result[elem1Key] != \"hello\" {\n\t\t\t\t\tt.Errorf(\"%s tuple[1]: expected 'hello', got %v\", method, result[elem1Key])\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\t// Test NULL tuple\n\tt.Run(\"Null\", func(t *testing.T) {\n\t\ttestID := 2\n\t\t// Insert NULL tuple\n\t\terr := session.Query(fmt.Sprintf(\"INSERT INTO gocql_test.%s (id, tuple_col) VALUES (?, NULL)\", table), testID).Exec()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to insert NULL tuple: %v\", err)\n\t\t}\n\n\t\t// Test both SliceMap and MapScan\n\t\tfor _, method := range []string{\"SliceMap\", \"MapScan\"} {\n\t\t\tt.Run(method, func(t *testing.T) {\n\t\t\t\tvar result map[string]any\n\n\t\t\t\tselectQuery := fmt.Sprintf(\"SELECT tuple_col FROM gocql_test.%s WHERE id = ?\", table)\n\t\t\t\tif method == \"SliceMap\" {\n\t\t\t\t\titer := session.Query(selectQuery, testID).Iter()\n\t\t\t\t\tsliceResults, err := iter.SliceMap()\n\t\t\t\t\titer.Close()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"SliceMap failed: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tif len(sliceResults) != 1 {\n\t\t\t\t\t\tt.Fatalf(\"Expected 1 result, got %d\", len(sliceResults))\n\t\t\t\t\t}\n\t\t\t\t\tresult = sliceResults[0]\n\t\t\t\t} else {\n\t\t\t\t\tresult = make(map[string]any)\n\t\t\t\t\tif err := session.Query(selectQuery, testID).MapScan(result); err != nil {\n\t\t\t\t\t\tt.Fatalf(\"MapScan failed: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Check tuple elements (NULL tuple gives zero values)\n\t\t\t\telem0Key := TupleColumnName(\"tuple_col\", 0)\n\t\t\t\telem1Key := TupleColumnName(\"tuple_col\", 1)\n\n\t\t\t\tif result[elem0Key] != 0 {\n\t\t\t\t\tt.Errorf(\"%s NULL tuple[0]: expected 0, got %v\", method, result[elem0Key])\n\t\t\t\t}\n\t\t\t\tif result[elem1Key] != \"\" {\n\t\t\t\t\tt.Errorf(\"%s NULL tuple[1]: expected '', got %v\", method, result[elem1Key])\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n}\n\n// TestSliceMapMapScanVectorTypes tests vector types separately since they need Cassandra 5.0+ and special table setup\n// (vectors need separate tables and version checks)\nfunc TestSliceMapMapScanVectorTypes(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif *flagDistribution == \"cassandra\" && flagCassVersion.Before(5, 0, 0) {\n\t\tt.Skip(\"Vector types have been introduced in Cassandra 5.0\")\n\t}\n\n\tif *flagDistribution == \"scylla\" && flagCassVersion.Before(2025, 3, 0) {\n\t\tt.Skip(\"Vector types have been introduced in ScyllaDB 2025.3\")\n\t}\n\n\t// Create test table with vector columns\n\ttable := testTableName(t)\n\tif err := createTable(session, fmt.Sprintf(`\n\t\tCREATE TABLE IF NOT EXISTS gocql_test.%s (\n\t\t\tid int PRIMARY KEY,\n\t\t\tvector_float_col vector<float, 3>,\n\t\t\tvector_text_col vector<text, 2>\n\t\t)\n\t`, table)); err != nil {\n\t\tt.Fatal(\"Failed to create vector test table:\", err)\n\t}\n\n\t// Clear existing data\n\tif err := session.Query(fmt.Sprintf(\"TRUNCATE gocql_test.%s\", table)).Exec(); err != nil {\n\t\tt.Fatal(\"Failed to truncate vector test table:\", err)\n\t}\n\n\ttestCases := []struct {\n\t\tcolName       string\n\t\tcqlValue      string\n\t\texpectedValue any\n\t\texpectedNull  any\n\t}{\n\t\t{\"vector_float_col\", \"[1.0, 2.5, -3.0]\", []float32{1.0, 2.5, -3.0}, []float32(nil)},\n\t\t{\"vector_text_col\", \"['hello', 'world']\", []string{\"hello\", \"world\"}, []string(nil)},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.colName, func(t *testing.T) {\n\t\t\t// Test non-NULL value\n\t\t\tt.Run(\"NonNull\", func(t *testing.T) {\n\t\t\t\ttestID := 1\n\t\t\t\t// Insert non-NULL value\n\t\t\t\tinsertQuery := fmt.Sprintf(\"INSERT INTO gocql_test.%s (id, %s) VALUES (?, %s)\", table, tc.colName, tc.cqlValue)\n\t\t\t\tif err := session.Query(insertQuery, testID).Exec(); err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to insert non-NULL value: %v\", err)\n\t\t\t\t}\n\n\t\t\t\t// Test both SliceMap and MapScan\n\t\t\t\tfor _, method := range []string{\"SliceMap\", \"MapScan\"} {\n\t\t\t\t\tt.Run(method, func(t *testing.T) {\n\t\t\t\t\t\tvar result any\n\n\t\t\t\t\t\tselectQuery := fmt.Sprintf(\"SELECT %s FROM gocql_test.%s WHERE id = ?\", tc.colName, table)\n\t\t\t\t\t\tif method == \"SliceMap\" {\n\t\t\t\t\t\t\titer := session.Query(selectQuery, testID).Iter()\n\t\t\t\t\t\t\tsliceResults, err := iter.SliceMap()\n\t\t\t\t\t\t\titer.Close()\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tt.Fatalf(\"SliceMap failed: %v\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif len(sliceResults) != 1 {\n\t\t\t\t\t\t\t\tt.Fatalf(\"Expected 1 result, got %d\", len(sliceResults))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tresult = sliceResults[0][tc.colName]\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tmapResult := make(map[string]any)\n\t\t\t\t\t\t\tif err := session.Query(selectQuery, testID).MapScan(mapResult); err != nil {\n\t\t\t\t\t\t\t\tt.Fatalf(\"MapScan failed: %v\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tresult = mapResult[tc.colName]\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tvalidateResult(t, tc.colName, tc.expectedValue, result, method, \"non-NULL\")\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t})\n\n\t\t\t// Test NULL value\n\t\t\tt.Run(\"Null\", func(t *testing.T) {\n\t\t\t\ttestID := 2\n\t\t\t\t// Insert NULL value\n\t\t\t\tinsertQuery := fmt.Sprintf(\"INSERT INTO gocql_test.%s (id, %s) VALUES (?, NULL)\", table, tc.colName)\n\t\t\t\tif err := session.Query(insertQuery, testID).Exec(); err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to insert NULL value: %v\", err)\n\t\t\t\t}\n\n\t\t\t\t// Test both SliceMap and MapScan\n\t\t\t\tfor _, method := range []string{\"SliceMap\", \"MapScan\"} {\n\t\t\t\t\tt.Run(method, func(t *testing.T) {\n\t\t\t\t\t\tvar result any\n\n\t\t\t\t\t\tselectQuery := fmt.Sprintf(\"SELECT %s FROM gocql_test.%s WHERE id = ?\", tc.colName, table)\n\t\t\t\t\t\tif method == \"SliceMap\" {\n\t\t\t\t\t\t\titer := session.Query(selectQuery, testID).Iter()\n\t\t\t\t\t\t\tsliceResults, err := iter.SliceMap()\n\t\t\t\t\t\t\titer.Close()\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tt.Fatalf(\"SliceMap failed: %v\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif len(sliceResults) != 1 {\n\t\t\t\t\t\t\t\tt.Fatalf(\"Expected 1 result, got %d\", len(sliceResults))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tresult = sliceResults[0][tc.colName]\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tmapResult := make(map[string]any)\n\t\t\t\t\t\t\tif err := session.Query(selectQuery, testID).MapScan(mapResult); err != nil {\n\t\t\t\t\t\t\t\tt.Fatalf(\"MapScan failed: %v\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tresult = mapResult[tc.colName]\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Vectors should return nil slices for NULL values for consistency\n\t\t\t\t\t\tvalidateResult(t, tc.colName, tc.expectedNull, result, method, \"NULL\")\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n}\n\n// TestSliceMapMapScanCollectionTypes tests collection types separately since they have special handling\n// (collections should return nil slices/maps for NULL values for consistency with other slice-based types)\nfunc TestSliceMapMapScanCollectionTypes(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\t// Create test table with collection columns\n\ttable := testTableName(t)\n\tif err := createTable(session, fmt.Sprintf(`\n\t\tCREATE TABLE IF NOT EXISTS gocql_test.%s (\n\t\t\tid int PRIMARY KEY,\n\t\t\tlist_col list<text>,\n\t\t\tset_col set<int>,\n\t\t\tmap_col map<text, int>\n\t\t)\n\t`, table)); err != nil {\n\t\tt.Fatal(\"Failed to create collection test table:\", err)\n\t}\n\n\t// Clear existing data\n\tif err := session.Query(fmt.Sprintf(\"TRUNCATE gocql_test.%s\", table)).Exec(); err != nil {\n\t\tt.Fatal(\"Failed to truncate collection test table:\", err)\n\t}\n\n\ttestCases := []struct {\n\t\tcolName       string\n\t\tcqlValue      string\n\t\texpectedValue any\n\t\texpectedNull  any\n\t}{\n\t\t{\"list_col\", \"['a', 'b', 'c']\", []string{\"a\", \"b\", \"c\"}, []string(nil)},\n\t\t{\"set_col\", \"{1, 2, 3}\", []int{1, 2, 3}, []int(nil)},\n\t\t{\"map_col\", \"{'key1': 1, 'key2': 2}\", map[string]int{\"key1\": 1, \"key2\": 2}, map[string]int(nil)},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.colName, func(t *testing.T) {\n\t\t\t// Test non-NULL value\n\t\t\tt.Run(\"NonNull\", func(t *testing.T) {\n\t\t\t\ttestID := 1\n\t\t\t\t// Insert non-NULL value\n\t\t\t\tinsertQuery := fmt.Sprintf(\"INSERT INTO gocql_test.%s (id, %s) VALUES (?, %s)\", table, tc.colName, tc.cqlValue)\n\t\t\t\tif err := session.Query(insertQuery, testID).Exec(); err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to insert non-NULL value: %v\", err)\n\t\t\t\t}\n\n\t\t\t\t// Test both SliceMap and MapScan\n\t\t\t\tfor _, method := range []string{\"SliceMap\", \"MapScan\"} {\n\t\t\t\t\tt.Run(method, func(t *testing.T) {\n\t\t\t\t\t\tvar result any\n\n\t\t\t\t\t\tselectQuery := fmt.Sprintf(\"SELECT %s FROM gocql_test.%s WHERE id = ?\", tc.colName, table)\n\t\t\t\t\t\tif method == \"SliceMap\" {\n\t\t\t\t\t\t\titer := session.Query(selectQuery, testID).Iter()\n\t\t\t\t\t\t\tsliceResults, err := iter.SliceMap()\n\t\t\t\t\t\t\titer.Close()\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tt.Fatalf(\"SliceMap failed: %v\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif len(sliceResults) != 1 {\n\t\t\t\t\t\t\t\tt.Fatalf(\"Expected 1 result, got %d\", len(sliceResults))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tresult = sliceResults[0][tc.colName]\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tmapResult := make(map[string]any)\n\t\t\t\t\t\t\tif err := session.Query(selectQuery, testID).MapScan(mapResult); err != nil {\n\t\t\t\t\t\t\t\tt.Fatalf(\"MapScan failed: %v\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tresult = mapResult[tc.colName]\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// For sets, we need special comparison since order is not guaranteed\n\t\t\t\t\t\tif strings.HasPrefix(tc.colName, \"set_\") {\n\t\t\t\t\t\t\tif !compareCollectionValues(t, tc.colName, tc.expectedValue, result) {\n\t\t\t\t\t\t\t\tt.Errorf(\"%s non-NULL %s: expected %v, got %v\", method, tc.colName, tc.expectedValue, result)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tvalidateResult(t, tc.colName, tc.expectedValue, result, method, \"non-NULL\")\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t})\n\n\t\t\t// Test NULL value\n\t\t\tt.Run(\"Null\", func(t *testing.T) {\n\t\t\t\ttestID := 2\n\t\t\t\t// Insert NULL value\n\t\t\t\tinsertQuery := fmt.Sprintf(\"INSERT INTO gocql_test.%s (id, %s) VALUES (?, NULL)\", table, tc.colName)\n\t\t\t\tif err := session.Query(insertQuery, testID).Exec(); err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to insert NULL value: %v\", err)\n\t\t\t\t}\n\n\t\t\t\t// Test both SliceMap and MapScan\n\t\t\t\tfor _, method := range []string{\"SliceMap\", \"MapScan\"} {\n\t\t\t\t\tt.Run(method, func(t *testing.T) {\n\t\t\t\t\t\tvar result any\n\n\t\t\t\t\t\tselectQuery := fmt.Sprintf(\"SELECT %s FROM gocql_test.%s WHERE id = ?\", tc.colName, table)\n\t\t\t\t\t\tif method == \"SliceMap\" {\n\t\t\t\t\t\t\titer := session.Query(selectQuery, testID).Iter()\n\t\t\t\t\t\t\tsliceResults, err := iter.SliceMap()\n\t\t\t\t\t\t\titer.Close()\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tt.Fatalf(\"SliceMap failed: %v\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif len(sliceResults) != 1 {\n\t\t\t\t\t\t\t\tt.Fatalf(\"Expected 1 result, got %d\", len(sliceResults))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tresult = sliceResults[0][tc.colName]\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tmapResult := make(map[string]any)\n\t\t\t\t\t\t\tif err := session.Query(selectQuery, testID).MapScan(mapResult); err != nil {\n\t\t\t\t\t\t\t\tt.Fatalf(\"MapScan failed: %v\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tresult = mapResult[tc.colName]\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Collections should return nil slices/maps for NULL values for consistency\n\t\t\t\t\t\tvalidateResult(t, tc.colName, tc.expectedNull, result, method, \"NULL\")\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "integration_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\n// This file groups integration tests where Cassandra has to be set up with some special integration variables\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n)\n\nfunc init() {\n\t// Register integration-only setup that runs before any test (called from TestMain).\n\t// This eagerly probes tablet support so parallel tests don't race on lazy init.\n\tintegrationTestSetup = initTabletProbes\n}\n\n// TestAuthentication verifies that gocql will work with a host configured to only accept authenticated connections\nfunc TestAuthentication(t *testing.T) {\n\tt.Parallel()\n\n\tif !*flagRunAuthTest {\n\t\tt.Skip(\"Authentication is not configured in the target cluster\")\n\t}\n\n\tcluster := createCluster()\n\n\tcluster.Authenticator = PasswordAuthenticator{\n\t\tUsername: \"cassandra\",\n\t\tPassword: \"cassandra\",\n\t}\n\n\tsession, err := cluster.CreateSession()\n\n\tif err != nil {\n\t\tt.Fatalf(\"Authentication error: %s\", err)\n\t}\n\n\tsession.Close()\n}\n\nfunc TestGetHostsFromSystem(t *testing.T) {\n\tt.Parallel()\n\n\tclusterHosts := getClusterHosts()\n\tcluster := createCluster()\n\tsession := createSessionFromCluster(cluster, t)\n\n\thosts, partitioner, err := session.hostSource.GetHostsFromSystem()\n\n\ttests.AssertTrue(t, \"err == nil\", err == nil)\n\ttests.AssertEqual(t, \"len(hosts)\", len(clusterHosts), len(hosts))\n\ttests.AssertTrue(t, \"len(partitioner) != 0\", len(partitioner) != 0)\n}\n\n// TestRingDiscovery makes sure that you can autodiscover other cluster members\n// when you seed a cluster config with just one node\nfunc TestRingDiscovery(t *testing.T) {\n\tt.Parallel()\n\n\tclusterHosts := getClusterHosts()\n\tcluster := createCluster()\n\tcluster.Hosts = clusterHosts[:1]\n\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\tif *clusterSize > 1 {\n\t\t// wait for autodiscovery to update the pool with the list of known hosts\n\t\ttime.Sleep(*flagAutoWait)\n\t}\n\n\tsession.pool.mu.RLock()\n\tdefer session.pool.mu.RUnlock()\n\tsize := len(session.pool.hostConnPools)\n\n\tif *clusterSize != size {\n\t\tfor p, pool := range session.pool.hostConnPools {\n\t\t\tt.Logf(\"p=%q host=%v ips=%s\", p, pool.host, pool.host.ConnectAddress().String())\n\n\t\t}\n\t\tt.Errorf(\"Expected a cluster size of %d, but actual size was %d\", *clusterSize, size)\n\t}\n}\n\n// TestHostFilterDiscovery ensures that host filtering works even when we discover hosts\nfunc TestHostFilterDiscovery(t *testing.T) {\n\tt.Parallel()\n\n\tclusterHosts := getClusterHosts()\n\tif len(clusterHosts) < 2 {\n\t\tt.Skip(\"skipping because we don't have 2 or more hosts\")\n\t}\n\tcluster := createCluster()\n\trr := RoundRobinHostPolicy().(*roundRobinHostPolicy)\n\tcluster.PoolConfig.HostSelectionPolicy = rr\n\t// we'll filter out the second host\n\tfiltered := clusterHosts[1]\n\tcluster.Hosts = clusterHosts[:1]\n\tcluster.HostFilter = HostFilterFunc(func(host *HostInfo) bool {\n\t\tif host.ConnectAddress().String() == filtered {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\ttests.AssertEqual(t, \"len(clusterHosts)-1 != len(rr.hosts.get())\", len(clusterHosts)-1, len(rr.hosts.get()))\n}\n\n// TestHostFilterInitial ensures that host filtering works for the initial\n// connection including the control connection\nfunc TestHostFilterInitial(t *testing.T) {\n\tt.Parallel()\n\n\tclusterHosts := getClusterHosts()\n\tif len(clusterHosts) < 2 {\n\t\tt.Skip(\"skipping because we don't have 2 or more hosts\")\n\t}\n\tcluster := createCluster()\n\trr := RoundRobinHostPolicy().(*roundRobinHostPolicy)\n\tcluster.PoolConfig.HostSelectionPolicy = rr\n\t// we'll filter out the second host\n\tfiltered := clusterHosts[1]\n\tcluster.HostFilter = HostFilterFunc(func(host *HostInfo) bool {\n\t\tif host.ConnectAddress().String() == filtered {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\ttests.AssertEqual(t, \"len(clusterHosts)-1 != len(rr.hosts.get())\", len(clusterHosts)-1, len(rr.hosts.get()))\n}\n\nfunc TestApplicationInformation(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\ts, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"ApplicationInformation error: %s\", err)\n\t}\n\tvar clientsTableName string\n\tfor _, tableName := range []string{\"system_views.clients\", \"system.clients\"} {\n\t\titer := s.Query(\"select client_options from \" + tableName).Iter()\n\t\t_, err = iter.SliceMap()\n\t\tif err == nil {\n\t\t\tclientsTableName = tableName\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif clientsTableName == \"\" {\n\t\tt.Skip(\"Skipping because server does have `client_options` in clients table\")\n\t}\n\n\ttcases := []struct {\n\t\ttestName string\n\t\tname     string\n\t\tversion  string\n\t\tclientID string\n\t}{\n\t\t{\n\t\t\ttestName: \"full\",\n\t\t\tname:     \"my-application\",\n\t\t\tversion:  \"1.0.0\",\n\t\t\tclientID: \"my-client-id\",\n\t\t},\n\t\t{\n\t\t\ttestName: \"empty\",\n\t\t},\n\t\t{\n\t\t\ttestName: \"name-only\",\n\t\t\tname:     \"my-application\",\n\t\t},\n\t\t{\n\t\t\ttestName: \"version-only\",\n\t\t\tversion:  \"1.0.0\",\n\t\t},\n\t\t{\n\t\t\ttestName: \"client-id-only\",\n\t\t\tclientID: \"my-client-id\",\n\t\t},\n\t}\n\tfor _, tcase := range tcases {\n\t\tt.Run(tcase.testName, func(t *testing.T) {\n\t\t\tcluster := createCluster()\n\t\t\tcluster.ApplicationInfo = NewStaticApplicationInfo(tcase.name, tcase.version, tcase.clientID)\n\t\t\ts, err := cluster.CreateSession()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to connect to the cluster: %s\", err)\n\t\t\t}\n\t\t\tdefer s.Close()\n\n\t\t\tvar row map[string]string\n\t\t\titer := s.Query(\"select client_options from \" + clientsTableName).Iter()\n\t\t\tfound := false\n\t\t\tfor iter.Scan(&row) {\n\t\t\t\tif tcase.name != \"\" {\n\t\t\t\t\tif row[\"APPLICATION_NAME\"] != tcase.name {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif _, ok := row[\"APPLICATION_NAME\"]; ok {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif tcase.version != \"\" {\n\t\t\t\t\tif row[\"APPLICATION_VERSION\"] != tcase.version {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif _, ok := row[\"APPLICATION_VERSION\"]; ok {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif tcase.clientID != \"\" {\n\t\t\t\t\tif row[\"CLIENT_ID\"] != tcase.clientID {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif _, ok := row[\"CLIENT_ID\"]; ok {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif iter.Close() != nil {\n\t\t\t\tt.Fatalf(\"failed to execute query: %s\", iter.Close().Error())\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tt.Fatalf(\"failed to find the application info row\")\n\t\t\t}\n\t\t})\n\t}\n\n}\n\nfunc TestWriteFailure(t *testing.T) {\n\tt.Parallel()\n\n\tt.Skip(\"skipped due to unknown purpose\")\n\tcluster := createCluster()\n\tcreateKeyspace(t, cluster, \"test\", false)\n\tcluster.Keyspace = \"test\"\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatal(\"create session:\", err)\n\t}\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE test.%s (id int,value int,PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatalf(\"failed to create table with error '%v'\", err)\n\t}\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO test.%s (id, value) VALUES (1, 1)`, table)).Exec(); err != nil {\n\t\terrWrite, ok := err.(*RequestErrWriteFailure)\n\t\tif ok {\n\t\t\tif session.cfg.ProtoVersion >= protoVersion5 {\n\t\t\t\t// ErrorMap should be filled with some hosts that should've errored\n\t\t\t\tif len(errWrite.ErrorMap) == 0 {\n\t\t\t\t\tt.Fatal(\"errWrite.ErrorMap should have some failed hosts but it didn't have any\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Map doesn't get filled for V4\n\t\t\t\tif len(errWrite.ErrorMap) != 0 {\n\t\t\t\t\tt.Fatal(\"errWrite.ErrorMap should have length 0, it's: \", len(errWrite.ErrorMap))\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tt.Fatalf(\"error (%s) should be RequestErrWriteFailure, it's: %T\", err, err)\n\t\t}\n\t} else {\n\t\tt.Fatal(\"a write fail error should have happened when querying test keyspace\")\n\t}\n\n\tif err = session.Query(\"DROP KEYSPACE test\").Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestCustomPayloadMessages(t *testing.T) {\n\tt.Parallel()\n\n\tt.Skip(\"SKIPPING\")\n\tcluster := createCluster()\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, value int, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// QueryMessage\n\tvar customPayload = map[string][]byte{\"a\": []byte{10, 20}, \"b\": []byte{20, 30}}\n\tquery := session.Query(fmt.Sprintf(\"SELECT id FROM %s where id = ?\", table), 42).Consistency(One).CustomPayload(customPayload)\n\titer := query.Iter()\n\trCustomPayload := iter.GetCustomPayload()\n\tif !reflect.DeepEqual(customPayload, rCustomPayload) {\n\t\tt.Fatal(\"The received custom payload should match the sent\")\n\t}\n\titer.Close()\n\n\t// Insert query\n\tquery = session.Query(fmt.Sprintf(\"INSERT INTO %s(id,value) VALUES(1, 1)\", table)).Consistency(One).CustomPayload(customPayload)\n\titer = query.Iter()\n\trCustomPayload = iter.GetCustomPayload()\n\tif !reflect.DeepEqual(customPayload, rCustomPayload) {\n\t\tt.Fatal(\"The received custom payload should match the sent\")\n\t}\n\titer.Close()\n\n\t// Batch Message\n\tb := session.Batch(LoggedBatch)\n\tb.CustomPayload = customPayload\n\tb.Query(fmt.Sprintf(\"INSERT INTO %s(id,value) VALUES(1, 1)\", table))\n\tif err := session.ExecuteBatch(b); err != nil {\n\t\tt.Fatalf(\"query failed. %v\", err)\n\t}\n}\n\nfunc TestCustomPayloadValues(t *testing.T) {\n\tt.Parallel()\n\n\tt.Skip(\"SKIPPING\")\n\tcluster := createCluster()\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE gocql_test.%s (id int, value int, PRIMARY KEY (id))\", table)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvalues := []map[string][]byte{map[string][]byte{\"a\": []byte{10, 20}, \"b\": []byte{20, 30}}, nil, map[string][]byte{\"a\": []byte{10, 20}, \"b\": nil}}\n\n\tfor _, customPayload := range values {\n\t\tquery := session.Query(fmt.Sprintf(\"SELECT id FROM %s where id = ?\", table), 42).Consistency(One).CustomPayload(customPayload)\n\t\titer := query.Iter()\n\t\trCustomPayload := iter.GetCustomPayload()\n\t\tif !reflect.DeepEqual(customPayload, rCustomPayload) {\n\t\t\tt.Fatal(\"The received custom payload should match the sent\")\n\t\t}\n\t}\n}\n\nfunc TestSessionAwaitSchemaAgreement(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif err := session.AwaitSchemaAgreement(context.Background()); err != nil {\n\t\tt.Fatalf(\"expected session.AwaitSchemaAgreement to not return an error but got '%v'\", err)\n\t}\n}\n\nfunc TestSessionAwaitSchemaAgreementSessionClosed(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tsession.Close()\n\n\tif err := session.AwaitSchemaAgreement(context.Background()); !errors.Is(err, ErrConnectionClosed) {\n\t\tt.Fatalf(\"expected session.AwaitSchemaAgreement to return ErrConnectionClosed but got '%v'\", err)\n\t}\n\n}\n\nfunc TestSessionAwaitSchemaAgreementContextCanceled(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\n\tif err := session.AwaitSchemaAgreement(ctx); !errors.Is(err, context.Canceled) {\n\t\tt.Fatalf(\"expected session.AwaitSchemaAgreement to return 'context canceled' but got '%v'\", err)\n\t}\n\n}\n\nfunc TestNewConnectWithLowTimeout(t *testing.T) {\n\tt.Parallel()\n\n\t// Point of these tests to make sure that with low timeout connection creation will gracefully fail\n\n\ttype TestExpectation int\n\tconst (\n\t\tDontRun TestExpectation = iota\n\t\tFail    TestExpectation = iota\n\t\tPass    TestExpectation = iota\n\t\tCanPass TestExpectation = iota\n\t)\n\n\tmatch := func(t *testing.T, e TestExpectation, result error) {\n\t\tt.Helper()\n\n\t\tswitch e {\n\t\tcase DontRun:\n\t\t\tt.Fatal(\"should not be run\")\n\t\tcase Fail:\n\t\t\tif result == nil {\n\t\t\t\tt.Fatal(\"should return an error\")\n\t\t\t}\n\t\tcase Pass:\n\t\t\tif result != nil {\n\t\t\t\tt.Fatalf(\"should pass, but returned an error: %s\", result.Error())\n\t\t\t}\n\t\tcase CanPass:\n\t\t\tif result == nil {\n\t\t\t\tt.Log(\"test passed due to high timeout\")\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unknown test expectation: %v\", e))\n\t\t}\n\t}\n\n\tfor _, lowTimeout := range []time.Duration{1 * time.Nanosecond, 10 * time.Nanosecond, 100 * time.Nanosecond} {\n\t\tcanPassOnHighTimeout := Fail\n\t\tif lowTimeout >= 100*time.Nanosecond {\n\t\t\tcanPassOnHighTimeout = CanPass\n\t\t}\n\t\tt.Run(lowTimeout.String(), func(t *testing.T) {\n\t\t\tfor _, tcase := range []struct {\n\t\t\t\tname                       string\n\t\t\t\tgetCluster                 func() *ClusterConfig\n\t\t\t\tconnect                    TestExpectation\n\t\t\t\tregularQuery               TestExpectation\n\t\t\t\tcontrolQuery               TestExpectation\n\t\t\t\tcontrolQueryAfterReconnect TestExpectation\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tname: \"Timeout\",\n\t\t\t\t\tgetCluster: func() *ClusterConfig {\n\t\t\t\t\t\tcluster := createCluster()\n\t\t\t\t\t\tcluster.Timeout = lowTimeout\n\t\t\t\t\t\treturn cluster\n\t\t\t\t\t},\n\t\t\t\t\tconnect:                    Pass,\n\t\t\t\t\tregularQuery:               Fail,\n\t\t\t\t\tcontrolQuery:               Pass,\n\t\t\t\t\tcontrolQueryAfterReconnect: Pass,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"MetadataSchemaRequestTimeout\",\n\t\t\t\t\tgetCluster: func() *ClusterConfig {\n\t\t\t\t\t\tcluster := createCluster()\n\t\t\t\t\t\tcluster.MetadataSchemaRequestTimeout = lowTimeout\n\t\t\t\t\t\treturn cluster\n\t\t\t\t\t},\n\t\t\t\t\tconnect:      Pass,\n\t\t\t\t\tregularQuery: Pass,\n\t\t\t\t\tcontrolQuery: Fail,\n\t\t\t\t\t// It breaks control connection, then it can start reconnecting in any moment\n\t\t\t\t\t// As result test is not stable\n\t\t\t\t\tcontrolQueryAfterReconnect: Fail,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"WriteTimeout\",\n\t\t\t\t\tgetCluster: func() *ClusterConfig {\n\t\t\t\t\t\tcluster := createCluster()\n\t\t\t\t\t\tcluster.WriteTimeout = lowTimeout\n\t\t\t\t\t\treturn cluster\n\t\t\t\t\t},\n\t\t\t\t\tconnect:      Pass,\n\t\t\t\t\tregularQuery: canPassOnHighTimeout,\n\t\t\t\t\tcontrolQuery: canPassOnHighTimeout,\n\t\t\t\t\t// It breaks control connection, then it can start reconnecting in any moment\n\t\t\t\t\t// As result test is not stable\n\t\t\t\t\tcontrolQueryAfterReconnect: canPassOnHighTimeout,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"ReadTimeout\",\n\t\t\t\t\tgetCluster: func() *ClusterConfig {\n\t\t\t\t\t\tcluster := createCluster()\n\t\t\t\t\t\tcluster.ReadTimeout = lowTimeout\n\t\t\t\t\t\treturn cluster\n\t\t\t\t\t},\n\t\t\t\t\tconnect: Pass,\n\t\t\t\t\t// When data is available immediately reading from socket is not failing,\n\t\t\t\t\t// despite that deadline is in the past\n\t\t\t\t\t// Because of that even with low read timeout it can pass\n\t\t\t\t\tregularQuery: CanPass,\n\t\t\t\t\tcontrolQuery: CanPass,\n\t\t\t\t\t// It breaks control connection, then it can start reconnecting in any moment\n\t\t\t\t\t// As result test is not stable\n\t\t\t\t\tcontrolQueryAfterReconnect: CanPass,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"AllTimeouts\",\n\t\t\t\t\tgetCluster: func() *ClusterConfig {\n\t\t\t\t\t\tcluster := createCluster()\n\t\t\t\t\t\tcluster.Timeout = lowTimeout\n\t\t\t\t\t\tcluster.ReadTimeout = lowTimeout\n\t\t\t\t\t\tcluster.WriteTimeout = lowTimeout\n\t\t\t\t\t\tcluster.MetadataSchemaRequestTimeout = lowTimeout\n\t\t\t\t\t\treturn cluster\n\t\t\t\t\t},\n\t\t\t\t\tconnect:                    Pass,\n\t\t\t\t\tregularQuery:               Fail,\n\t\t\t\t\tcontrolQuery:               Fail,\n\t\t\t\t\tcontrolQueryAfterReconnect: Fail,\n\t\t\t\t},\n\t\t\t} {\n\t\t\t\tt.Run(tcase.name, func(t *testing.T) {\n\t\t\t\t\tvar (\n\t\t\t\t\t\ts   *Session\n\t\t\t\t\t\terr error\n\t\t\t\t\t)\n\n\t\t\t\t\tt.Run(\"Connect\", func(t *testing.T) {\n\t\t\t\t\t\ts, err = tcase.getCluster().CreateSession()\n\t\t\t\t\t\tmatch(t, tcase.connect, err)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tt.Fatal(\"failed to create session\", err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t\tif s != nil {\n\t\t\t\t\t\tdefer s.Close()\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif tcase.connect == Fail {\n\t\t\t\t\t\t\tt.FailNow()\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tt.Fatal(\"session was not created\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif tcase.regularQuery != DontRun {\n\t\t\t\t\t\tt.Run(\"Regular Query\", func(t *testing.T) {\n\t\t\t\t\t\t\terr = s.Query(\"SELECT key FROM system.local WHERE key='local'\").Exec()\n\t\t\t\t\t\t\tmatch(t, tcase.regularQuery, err)\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\n\t\t\t\t\tif tcase.controlQuery != DontRun {\n\t\t\t\t\t\tt.Run(\"Query from control connection\", func(t *testing.T) {\n\t\t\t\t\t\t\terr = s.control.querySystem(\"SELECT key FROM system.local WHERE key='local'\").err\n\t\t\t\t\t\t\tmatch(t, tcase.controlQuery, err)\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\n\t\t\t\t\tif tcase.controlQueryAfterReconnect != DontRun {\n\t\t\t\t\t\tt.Run(\"Query from control connection after reconnect\", func(t *testing.T) {\n\t\t\t\t\t\t\ts, err = tcase.getCluster().CreateSession()\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tt.Fatal(\"failed to create session\", err.Error())\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tdefer s.Close()\n\t\t\t\t\t\t\terr = s.control.reconnect()\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tt.Fatalf(\"failed to reconnect to control connection: %v\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\terr = s.control.querySystem(\"SELECT key FROM system.local WHERE key='local'\").err\n\t\t\t\t\t\t\tmatch(t, tcase.controlQueryAfterReconnect, err)\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "internal/ccm/ccm.go",
    "content": "//go:build ccm\n// +build ccm\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage ccm\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os/exec\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nfunc execCmd(args ...string) (*bytes.Buffer, error) {\n\texecName := \"ccm\"\n\tif runtime.GOOS == \"windows\" {\n\t\targs = append([]string{\"/c\", execName}, args...)\n\t\texecName = \"cmd.exe\"\n\t}\n\tcmd := exec.Command(execName, args...)\n\tstdout := &bytes.Buffer{}\n\tcmd.Stdout = stdout\n\tcmd.Stderr = &bytes.Buffer{}\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, errors.New(cmd.Stderr.(*bytes.Buffer).String())\n\t}\n\n\treturn stdout, nil\n}\n\nfunc AllUp() error {\n\tstatus, err := Status()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, host := range status {\n\t\tif !host.State.IsUp() {\n\t\t\tif err := NodeUp(host.Name); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc NodeUp(node string) error {\n\targs := []string{node, \"start\", \"--wait-for-binary-proto\"}\n\tif runtime.GOOS == \"windows\" {\n\t\targs = append(args, \"--quiet-windows\")\n\t}\n\t_, err := execCmd(args...)\n\treturn err\n}\n\nfunc NodeDown(node string) error {\n\t_, err := execCmd(node, \"stop\")\n\treturn err\n}\n\ntype Host struct {\n\tState NodeState\n\tAddr  string\n\tName  string\n}\n\ntype NodeState int\n\nfunc (n NodeState) String() string {\n\tif n == NodeStateUp {\n\t\treturn \"UP\"\n\t} else if n == NodeStateDown {\n\t\treturn \"DOWN\"\n\t} else {\n\t\treturn fmt.Sprintf(\"UNKNOWN_STATE_%d\", n)\n\t}\n}\n\nfunc (n NodeState) IsUp() bool {\n\treturn n == NodeStateUp\n}\n\nconst (\n\tNodeStateUp NodeState = iota\n\tNodeStateDown\n)\n\nfunc Status() (map[string]Host, error) {\n\t// TODO: parse into struct to manipulate\n\tout, err := execCmd(\"status\", \"-v\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconst (\n\t\tstateCluster = iota\n\t\tstateCommas\n\t\tstateNode\n\t\tstateOption\n\t)\n\n\tnodes := make(map[string]Host)\n\t// didnt really want to write a full state machine parser\n\tstate := stateCluster\n\tsc := bufio.NewScanner(out)\n\n\tvar host Host\n\n\tfor sc.Scan() {\n\t\tswitch state {\n\t\tcase stateCluster:\n\t\t\ttext := sc.Text()\n\t\t\tif !strings.HasPrefix(text, \"Cluster:\") {\n\t\t\t\treturn nil, fmt.Errorf(\"expected 'Cluster:' got %q\", text)\n\t\t\t}\n\t\t\tstate = stateCommas\n\t\tcase stateCommas:\n\t\t\ttext := sc.Text()\n\t\t\tif !strings.HasPrefix(text, \"-\") {\n\t\t\t\treturn nil, fmt.Errorf(\"expected commas got %q\", text)\n\t\t\t}\n\t\t\tstate = stateNode\n\t\tcase stateNode:\n\t\t\t// assume nodes start with node\n\t\t\ttext := sc.Text()\n\t\t\tif !strings.HasPrefix(text, \"node\") {\n\t\t\t\treturn nil, fmt.Errorf(\"expected 'node' got %q\", text)\n\t\t\t}\n\t\t\tline := strings.Split(text, \":\")\n\t\t\thost.Name = line[0]\n\n\t\t\tnodeState := strings.TrimSpace(line[1])\n\t\t\tswitch nodeState {\n\t\t\tcase \"UP\":\n\t\t\t\thost.State = NodeStateUp\n\t\t\tcase \"DOWN\":\n\t\t\t\thost.State = NodeStateDown\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"unknown node state from ccm: %q\", nodeState)\n\t\t\t}\n\n\t\t\tstate = stateOption\n\t\tcase stateOption:\n\t\t\ttext := sc.Text()\n\t\t\tif text == \"\" {\n\t\t\t\tstate = stateNode\n\t\t\t\tnodes[host.Name] = host\n\t\t\t\thost = Host{}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tline := strings.Split(strings.TrimSpace(text), \"=\")\n\t\t\tk, v := line[0], line[1]\n\t\t\tif k == \"binary\" {\n\t\t\t\t// could check errors\n\t\t\t\t// ('127.0.0.1', 9042)\n\t\t\t\tv = v[2:] // (''\n\t\t\t\tif i := strings.IndexByte(v, '\\''); i < 0 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"invalid binary v=%q\", v)\n\t\t\t\t} else {\n\t\t\t\t\thost.Addr = v[:i]\n\t\t\t\t\t// dont need port\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected state: %q\", state)\n\t\t}\n\t}\n\n\tif err := sc.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse ccm status: %v\", err)\n\t}\n\n\treturn nodes, nil\n}\n"
  },
  {
    "path": "internal/ccm/ccm_test.go",
    "content": "//go:build all || ccm\n// +build all ccm\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage ccm\n\nimport (\n\t\"testing\"\n)\n\nfunc TestCCM(t *testing.T) {\n\tif err := AllUp(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tstatus, err := Status()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif host, ok := status[\"node1\"]; !ok {\n\t\tt.Fatal(\"node1 not in status list\")\n\t} else if !host.State.IsUp() {\n\t\tt.Fatal(\"node1 is not up\")\n\t}\n\n\tNodeDown(\"node1\")\n\tstatus, err = Status()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif host, ok := status[\"node1\"]; !ok {\n\t\tt.Fatal(\"node1 not in status list\")\n\t} else if host.State.IsUp() {\n\t\tt.Fatal(\"node1 is not down\")\n\t}\n\n\tNodeUp(\"node1\")\n\tstatus, err = Status()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif host, ok := status[\"node1\"]; !ok {\n\t\tt.Fatal(\"node1 not in status list\")\n\t} else if !host.State.IsUp() {\n\t\tt.Fatal(\"node1 is not up\")\n\t}\n}\n"
  },
  {
    "path": "internal/debug/debug_off.go",
    "content": "//go:build !gocql_debug\n// +build !gocql_debug\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage debug\n\nconst Enabled = false\n"
  },
  {
    "path": "internal/debug/debug_on.go",
    "content": "//go:build gocql_debug\n// +build gocql_debug\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage debug\n\nconst Enabled = true\n"
  },
  {
    "path": "internal/eventbus/README.md",
    "content": "# EventBus\n\nA generic, thread-safe event processing package for Go based on channels. EventBus allows multiple subscribers to receive events from a single input channel, with optional event filtering and configurable buffer sizes.\n\n## Features\n\n- **Generic Type Support**: Works with any type using Go generics\n- **Thread-Safe**: Safe for concurrent use by multiple goroutines\n- **Event Filtering**: Subscribers can filter events using custom filter functions\n- **Configurable Buffers**: Each subscriber can have its own channel buffer size\n- **Non-Blocking Distribution**: Slow subscribers don't block event distribution\n- **Context Support**: Automatic unsubscription when context is cancelled\n- **Zero External Dependencies**: Pure Go implementation\n\n## Installation\n\n```bash\ngo get github.com/gocql/gocql/eventbus\n```\n\n## Quick Start\n\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com/gocql/gocql/internal/eventbus\"\n)\n\nfunc main() {\n\t// Create a new EventBus for integer events\n\teb := eventbus.New[int](10) // input buffer size of 10\n\n\t// Start processing events\n\teb.Start()\n\tdefer eb.Stop()\n\n\t// Subscribe to all events\n\tallSub, _ := eb.Subscribe(\"subscriber1\", 10, nil)\n\n\t// Subscribe with a filter (only even numbers)\n\tevenFilter := func(n int) bool { return n%2 == 0 }\n\tevenSub, _ := eb.Subscribe(\"subscriber2\", 10, evenFilter)\n\n\t// Send events\n\tgo func() {\n\t\tfor i := 1; i <= 5; i++ {\n\t\t\teb.PublishEvent(i)\n\t\t}\n\t}()\n\n\t// Receive events\n\tfor event := range allSub.Events() {\n\t\tfmt.Println(\"All:\", event)\n\t\t// Process event...\n\t\tif event == 5 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Clean up\n\tallSub.Stop()\n\tevenSub.Stop()\n}\n```\n\n## API Overview\n\n### Creating an EventBus\n\n```go\neb := eventbus.New[T](inputChanSize int) *EventBus[T]\n```\n\nCreates a new EventBus for type `T` with specified input channel buffer size.\n\n### Starting and Stopping\n\n```go\nerr := eb.Start()  // Start processing events\nerr := eb.Stop()   // Stop processing and close all subscriber channels\n```\n\n### Subscribing\n\n```go\n// Subscribe without filter\nsub, err := eb.Subscribe(\"subscriber-name\", chanSize, nil)\n\n// Subscribe with filter\nfilter := func(event T) bool { return /* condition */ }\nsub, err := eb.Subscribe(\"subscriber-name\", chanSize, filter)\n\n// Subscribe with context (auto-unsubscribes when context is cancelled)\nsub, err := eb.SubscribeWithContext(ctx, \"subscriber-name\", chanSize, filter)\n\n// Access events from subscriber\nevents := sub.Events()  // Returns <-chan T\n```\n\n### Unsubscribing\n\n```go\n// Using the Subscriber instance (recommended)\nerr := sub.Stop()\n\n// Or using the EventBus directly\nerr := eb.Unsubscribe(\"subscriber-name\")\n```\n\n### Sending Events\n\n```go\neb.PublishEvent(event)\n```\n\n### Getting Information\n\n```go\ncount := eb.SubscriberCount()\nstr := eb.String() // Debug string representation\n```\n\n## Examples\n\n### Basic Usage\n\n```go\neb := eventbus.New[string](10)\neb.Start()\ndefer eb.Stop()\n\nsub, _ := eb.Subscribe(\"logger\", 10, nil)\ndefer sub.Stop()\n\ngo func() {\n    eb.PublishEvent(\"Hello\")\n    eb.PublishEvent(\"World\")\n}()\n\nfor msg := range sub.Events() {\n    fmt.Println(msg)\n}\n```\n\n### With Event Filtering\n\n```go\ntype LogEvent struct {\n    Level   string\n    Message string\n}\n\neb := eventbus.New[LogEvent](10)\neb.Start()\ndefer eb.Stop()\n\n// Subscribe to errors only\nerrorFilter := func(e LogEvent) bool { return e.Level == \"ERROR\" }\nerrorSub, _ := eb.Subscribe(\"error-handler\", 10, errorFilter)\ndefer errorSub.Stop()\n\n// Subscribe to all events\nallSub, _ := eb.Subscribe(\"all-handler\", 10, nil)\ndefer allSub.Stop()\n\n// Access events\nfor event := range errorSub.Events() {\n    // Handle error events only\n}\n```\n\n### With Context\n\n```go\nctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\ndefer cancel()\n\neb := eventbus.New[int](10)\neb.Start()\ndefer eb.Stop()\n\n// Automatically unsubscribes after 5 seconds\nsub, _ := eb.SubscribeWithContext(ctx, \"temp\", 10, nil)\n\nfor {\n    select {\n    case event := <-sub.Events():\n        // Process event\n    case <-ctx.Done():\n        return\n    }\n}\n```\n\n## Design Decisions\n\n### Non-Blocking Distribution\n\nEventBus uses non-blocking sends to subscriber channels. If a subscriber's channel is full, the event is dropped for that subscriber only, ensuring that slow subscribers don't block the event bus or other subscribers.\n\n### Channel Closure\n\n- Subscriber channels are closed when `Subscriber.Stop()` or `EventBus.Unsubscribe()` is called\n- All subscriber channels are closed when `EventBus.Stop()` is called\n- When using `SubscribeWithContext()`, channels are closed when the context is cancelled\n\n### Subscriber API\n\nThe `Subscribe()` and `SubscribeWithContext()` methods return a `Subscriber` instance that provides:\n- `Events()` - Returns the receive-only channel for events\n- `Stop()` - Unsubscribes and closes the channel\n\n### Thread Safety\n\nAll public methods are thread-safe and can be called concurrently from multiple goroutines. The EventBus uses read-write mutexes to minimize contention during event distribution.\n\n## Performance Considerations\n\n- **Buffer Sizes**: Choose appropriate buffer sizes based on your event rate and processing speed\n- **Filter Functions**: Keep filter functions fast; they're called for every event-subscriber pair\n- **Slow Subscribers**: Slow subscribers with small buffers will drop events; increase buffer size or process events asynchronously\n\n## Testing\n\nRun unit tests:\n\n```bash\ngo test -tags unit -v ./eventbus\n```\n\nRun benchmarks:\n\n```bash\ngo test -tags unit -bench=. ./eventbus\n```\n\n## License\n\nLicensed under the Apache License, Version 2.0. See LICENSE file for details.\n"
  },
  {
    "path": "internal/eventbus/eventbus.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n// Package eventbus provides a generic event processing system based on channels.\n// It allows multiple subscribers to receive events from a single input channel,\n// with optional filtering and configurable buffer sizes.\npackage eventbus\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n)\n\nvar (\n\t// ErrAlreadyStarted is returned when Start is called on an already running EventBus\n\tErrAlreadyStarted = errors.New(\"eventbus: already started\")\n\t// ErrNotStarted is returned when operations are attempted on a non-started EventBus\n\tErrNotStarted = errors.New(\"eventbus: not started\")\n\t// ErrAlreadyStopped is returned when Stop is called on an already stopped EventBus\n\tErrAlreadyStopped = errors.New(\"eventbus: already stopped\")\n\t// ErrSubscriberNotFound is returned when unsubscribing a non-existent subscriber\n\tErrSubscriberNotFound = errors.New(\"eventbus: subscriber not found\")\n)\n\n// FilterFunc is a function type that filters events.\n// It returns true if the event should be sent to the subscriber, false otherwise.\ntype FilterFunc[T any] func(T) bool\n\n// subscriber represents a single subscriber to the event bus\ntype subscriber[T any] struct {\n\tch     chan T\n\tfilter FilterFunc[T]\n\tname   string\n\tid     int\n}\n\n// Subscriber provides access to events and control over the subscription\ntype Subscriber[T any] struct {\n\tch   <-chan T\n\teb   *EventBus[T]\n\tname string\n\tid   int\n}\n\n// Events returns the channel to receive events from\nfunc (s *Subscriber[T]) Events() <-chan T {\n\treturn s.ch\n}\n\n// Stop unsubscribes and closes the subscriber's channel\nfunc (s *Subscriber[T]) Stop() error {\n\treturn s.eb.remove(s)\n}\n\ntype status uint8\n\nconst (\n\tstatusInitialized status = iota\n\tstatusStarted\n\tstatusStopped\n)\n\n// EventBus manages event distribution to multiple subscribers\ntype EventBus[T any] struct {\n\tlogger       StdLogger\n\tinput        chan T\n\tclosedSignal chan struct{}\n\tsubscribers  []*subscriber[T]\n\twg           sync.WaitGroup\n\tcfg          EventBusConfig\n\tmu           sync.RWMutex\n\tstatus       status\n}\n\ntype StdLogger interface {\n\tPrint(v ...any)\n\tPrintf(format string, v ...any)\n\tPrintln(v ...any)\n}\n\ntype EventBusConfig struct {\n\t// Size of the event queue, when queue gets more events than queue events are getting dropped.\n\tInputEventsQueueSize int\n}\n\n// New creates a new EventBus with the specified input channel buffer size.\n// The EventBus must be status with Start() before it begins processing events.\nfunc New[T any](cfg EventBusConfig, logger StdLogger) *EventBus[T] {\n\treturn &EventBus[T]{\n\t\tcfg:          cfg,\n\t\tlogger:       logger,\n\t\tinput:        make(chan T, cfg.InputEventsQueueSize),\n\t\tclosedSignal: make(chan struct{}, 1),\n\t\tstatus:       statusInitialized,\n\t}\n}\n\n// Start begins processing events from the input channel and distributing them to subscribers.\n// Returns ErrAlreadyStarted if the EventBus is already running.\nfunc (eb *EventBus[T]) Start() error {\n\teb.mu.Lock()\n\tdefer eb.mu.Unlock()\n\n\tswitch eb.status {\n\tcase statusStarted:\n\t\treturn ErrAlreadyStarted\n\tcase statusStopped:\n\t\treturn ErrAlreadyStopped\n\tdefault:\n\t}\n\n\teb.status = statusStarted\n\teb.wg.Add(1)\n\n\tgo eb.run()\n\n\treturn nil\n}\n\n// Stop halts event processing and closes all subscriber channels.\n// It waits for the processing goroutine to finish.\n// Returns ErrNotStarted if the EventBus was never started, or ErrAlreadyStopped if already stopped.\nfunc (eb *EventBus[T]) Stop() error {\n\teb.mu.Lock()\n\n\tdefer eb.mu.Unlock()\n\tswitch eb.status {\n\tcase statusStopped:\n\t\treturn ErrAlreadyStopped\n\tcase statusInitialized:\n\t\treturn ErrNotStarted\n\tdefault:\n\t\teb.status = statusStopped\n\t}\n\tclose(eb.closedSignal)\n\t// Wait for the run goroutine to finish\n\teb.mu.Unlock()\n\teb.wg.Wait()\n\teb.mu.Lock()\n\n\tfor _, sub := range eb.subscribers {\n\t\tclose(sub.ch)\n\t}\n\teb.subscribers = nil\n\treturn nil\n}\n\n// PublishEvent sends an event onto the bus. If the input buffer is full the\n// event is dropped to avoid blocking publishers.\nfunc (eb *EventBus[T]) PublishEvent(e T) bool {\n\tselect {\n\tcase eb.input <- e:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// PublishEventBlocking sends an event onto the bus. If the input buffer is full is blocks, until event is published\nfunc (eb *EventBus[T]) PublishEventBlocking(e T) {\n\tselect {\n\tcase eb.input <- e:\n\t}\n}\n\n// Subscribe adds a new subscriber to the event bus.\n// name: unique identifier for the subscriber\n// queueSize: buffer size for the subscriber's channel (must be >= 0)\n// filter: optional filter function (can be nil to receive all events)\n//\n// Returns a Subscriber instance that provides access to events and a Stop method.\nfunc (eb *EventBus[T]) Subscribe(name string, queueSize int, filter FilterFunc[T]) *Subscriber[T] {\n\teb.mu.Lock()\n\tdefer eb.mu.Unlock()\n\n\tsub := &subscriber[T]{\n\t\tname:   name,\n\t\tch:     make(chan T, queueSize),\n\t\tfilter: filter,\n\t}\n\teb.subscribers = append(eb.subscribers, sub)\n\n\treturn &Subscriber[T]{\n\t\tch:   sub.ch,\n\t\tname: name,\n\t\teb:   eb,\n\t}\n}\n\n// Unsubscribe removes a subscriber from the event bus and closes its channel.\n// Returns ErrSubscriberNotFound if the subscriber doesn't exist.\nfunc (eb *EventBus[T]) remove(s *Subscriber[T]) error {\n\teb.mu.Lock()\n\tdefer eb.mu.Unlock()\n\n\tsubID := -1\n\n\tfor id, sub := range eb.subscribers {\n\t\tif s.ch == sub.ch {\n\t\t\tsubID = id\n\t\t\tclose(sub.ch)\n\t\t}\n\t}\n\n\tif subID == -1 {\n\t\treturn ErrSubscriberNotFound\n\t}\n\n\teb.subscribers = append(eb.subscribers[0:subID], eb.subscribers[subID+1:]...)\n\treturn nil\n}\n\n// SubscriberCount returns the current number of active subscribers\nfunc (eb *EventBus[T]) SubscriberCount() int {\n\teb.mu.RLock()\n\tdefer eb.mu.RUnlock()\n\treturn len(eb.subscribers)\n}\n\n// run is the main event processing loop\nfunc (eb *EventBus[T]) run() {\n\tdefer eb.wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase <-eb.closedSignal:\n\t\t\treturn\n\t\tcase event, ok := <-eb.input:\n\t\t\tif !ok {\n\t\t\t\tif eb.logger == nil {\n\t\t\t\t\teb.logger.Printf(\"eventbus channel has been closed, it should not have happened, report the bug please.\")\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\teb.distribute(event)\n\t\t}\n\t}\n}\n\n// distribute sends an event to all matching subscribers\nfunc (eb *EventBus[T]) distribute(event T) {\n\teb.mu.RLock()\n\tdefer eb.mu.RUnlock()\n\n\tfor _, sub := range eb.subscribers {\n\t\tif sub.filter != nil && !sub.filter(event) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Non-blocking send to avoid slow subscribers blocking the bus\n\t\tselect {\n\t\tcase sub.ch <- event:\n\t\tdefault:\n\t\t\tif eb.logger != nil {\n\t\t\t\teb.logger.Printf(\"eventbus: dropped event for subscriber %s, make sure it is running and update it's channel size\\n\", sub.name)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// SubscribeWithContext subscribes with a context that can cancel the subscription.\n// The subscriber will be automatically unsubscribed when the context is cancelled.\n// Returns a Subscriber instance and an error if subscription fails.\nfunc (eb *EventBus[T]) SubscribeWithContext(ctx context.Context, name string, chanSize int, filter FilterFunc[T]) *Subscriber[T] {\n\tsub := eb.Subscribe(name, chanSize, filter)\n\n\t// Start a goroutine to handle context cancellation\n\tgo func() {\n\t\t<-ctx.Done()\n\t\t_ = eb.remove(sub) // Ignore error if already unsubscribed\n\t}()\n\n\treturn sub\n}\n\n// String returns a string representation of the EventBus for debugging\nfunc (eb *EventBus[T]) String() string {\n\teb.mu.RLock()\n\tdefer eb.mu.RUnlock()\n\n\treturn fmt.Sprintf(\"EventBus{subscribers: %d, status: %v}\", len(eb.subscribers), eb.status)\n}\n"
  },
  {
    "path": "internal/eventbus/eventbus_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage eventbus\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestNew(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\tif eb == nil {\n\t\tt.Fatal(\"New returned nil\")\n\t}\n\tif eb.input == nil {\n\t\tt.Error(\"input channel is nil\")\n\t}\n\tif len(eb.subscribers) != 0 {\n\t\tt.Error(\"subscribers list is not empty\")\n\t}\n\tif eb.status != statusInitialized {\n\t\tt.Error(\"EventBus should not be status initially\")\n\t}\n}\n\nfunc TestStartStop(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\n\t// Test starting\n\terr := eb.Start()\n\tif err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\n\t// Test double start\n\terr = eb.Start()\n\tif err != ErrAlreadyStarted {\n\t\tt.Errorf(\"Expected ErrAlreadyStarted, got: %v\", err)\n\t}\n\n\t// Test stopping\n\terr = eb.Stop()\n\tif err != nil {\n\t\tt.Fatalf(\"Stop failed: %v\", err)\n\t}\n\n\t// Test double stop\n\terr = eb.Stop()\n\tif err != ErrAlreadyStopped {\n\t\tt.Errorf(\"Expected ErrAlreadyStopped, got: %v\", err)\n\t}\n}\n\nfunc TestStopWithoutStart(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\terr := eb.Stop()\n\tif err != ErrNotStarted {\n\t\tt.Errorf(\"Expected ErrNotStarted, got: %v\", err)\n\t}\n}\n\nfunc TestEventDistribution(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\terr := eb.Start()\n\tif err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\tdefer eb.Stop()\n\n\tsub1 := eb.Subscribe(\"sub1\", 10, nil)\n\tsub2 := eb.Subscribe(\"sub2\", 10, nil)\n\n\t// Send events\n\teb.PublishEvent(1)\n\teb.PublishEvent(2)\n\teb.PublishEvent(3)\n\n\t// Verify both subscribers receive all events\n\tfor i := 1; i <= 3; i++ {\n\t\tselect {\n\t\tcase val := <-sub1.Events():\n\t\t\tif val != i {\n\t\t\t\tt.Errorf(\"sub1: expected %d, got %d\", i, val)\n\t\t\t}\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tt.Fatal(\"sub1: timeout waiting for event\")\n\t\t}\n\n\t\tselect {\n\t\tcase val := <-sub2.Events():\n\t\t\tif val != i {\n\t\t\t\tt.Errorf(\"sub2: expected %d, got %d\", i, val)\n\t\t\t}\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tt.Fatal(\"sub2: timeout waiting for event\")\n\t\t}\n\t}\n}\n\nfunc TestEventFiltering(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\terr := eb.Start()\n\tif err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\tdefer eb.Stop()\n\n\t// Subscriber 1: no filter (receives all)\n\tsub1 := eb.Subscribe(\"all\", 10, nil)\n\n\t// Subscriber 2: only even numbers\n\tevenFilter := func(n int) bool { return n%2 == 0 }\n\tsub2 := eb.Subscribe(\"even\", 10, evenFilter)\n\n\t// Subscriber 3: only odd numbers\n\toddFilter := func(n int) bool { return n%2 != 0 }\n\tsub3 := eb.Subscribe(\"odd\", 10, oddFilter)\n\n\t// Send events\n\tfor i := 1; i <= 6; i++ {\n\t\teb.PublishEvent(i)\n\t}\n\n\t// Verify subscriber 1 gets all events\n\treceived1 := make([]int, 0, 6)\n\tfor i := 0; i < 6; i++ {\n\t\tselect {\n\t\tcase val := <-sub1.Events():\n\t\t\treceived1 = append(received1, val)\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tt.Fatal(\"timeout waiting for event on ch1\")\n\t\t}\n\t}\n\tif len(received1) != 6 {\n\t\tt.Errorf(\"ch1: expected 6 events, got %d\", len(received1))\n\t}\n\n\t// Verify subscriber 2 gets only even numbers\n\treceived2 := make([]int, 0, 3)\n\tfor i := 0; i < 3; i++ {\n\t\tselect {\n\t\tcase val := <-sub2.Events():\n\t\t\tif val%2 != 0 {\n\t\t\t\tt.Errorf(\"ch2: received odd number %d\", val)\n\t\t\t}\n\t\t\treceived2 = append(received2, val)\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tt.Fatal(\"timeout waiting for event on ch2\")\n\t\t}\n\t}\n\n\t// Verify subscriber 3 gets only odd numbers\n\treceived3 := make([]int, 0, 3)\n\tfor i := 0; i < 3; i++ {\n\t\tselect {\n\t\tcase val := <-sub3.Events():\n\t\t\tif val%2 == 0 {\n\t\t\t\tt.Errorf(\"ch3: received even number %d\", val)\n\t\t\t}\n\t\t\treceived3 = append(received3, val)\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tt.Fatal(\"timeout waiting for event on ch3\")\n\t\t}\n\t}\n}\n\nfunc TestSubscriberCount(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\n\tif eb.SubscriberCount() != 0 {\n\t\tt.Errorf(\"Expected 0 subscribers, got %d\", eb.SubscriberCount())\n\t}\n\n\tsub1 := eb.Subscribe(\"sub1\", 5, nil)\n\tif eb.SubscriberCount() != 1 {\n\t\tt.Errorf(\"Expected 1 subscriber, got %d\", eb.SubscriberCount())\n\t}\n\n\teb.Subscribe(\"sub2\", 5, nil)\n\tif eb.SubscriberCount() != 2 {\n\t\tt.Errorf(\"Expected 2 subscribers, got %d\", eb.SubscriberCount())\n\t}\n\n\terr := sub1.Stop()\n\tif err != nil {\n\t\tt.Fatalf(\"Stop failed: %v\", err)\n\t}\n\tif eb.SubscriberCount() != 1 {\n\t\tt.Errorf(\"Expected 1 subscriber, got %d\", eb.SubscriberCount())\n\t}\n}\n\nfunc TestConcurrentSubscribers(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\terr := eb.Start()\n\tif err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\tdefer eb.Stop()\n\n\tnumSubscribers := 10\n\teventsPerSubscriber := 100\n\n\tvar wg sync.WaitGroup\n\twg.Add(numSubscribers)\n\n\t// Create multiple subscribers\n\tfor i := 0; i < numSubscribers; i++ {\n\t\tsub := eb.Subscribe(string(rune('A'+i)), 100, nil)\n\n\t\tgo func(sub *Subscriber[int], subName string) {\n\t\t\tdefer wg.Done()\n\t\t\tcount := 0\n\t\t\tfor range sub.Events() {\n\t\t\t\tcount++\n\t\t\t\tif count == eventsPerSubscriber {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(sub, string(rune('A'+i)))\n\t}\n\n\t// Send events\n\tgo func() {\n\t\tfor i := 0; i < eventsPerSubscriber; i++ {\n\t\t\teb.PublishEventBlocking(i)\n\t\t}\n\t}()\n\n\t// Wait for all subscribers to receive their events\n\tdone := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\t// All subscribers received events\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timeout waiting for subscribers\")\n\t}\n}\n\nfunc TestSubscribeWithContext(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\terr := eb.Start()\n\tif err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\tdefer eb.Stop()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tsub := eb.SubscribeWithContext(ctx, \"test\", 10, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"SubscribeWithContext failed: %v\", err)\n\t}\n\n\t// Send an event\n\teb.PublishEvent(42)\n\n\t// Verify event is received\n\tselect {\n\tcase val := <-sub.Events():\n\t\tif val != 42 {\n\t\t\tt.Errorf(\"Expected 42, got %d\", val)\n\t\t}\n\tcase <-time.After(1 * time.Second):\n\t\tt.Fatal(\"timeout waiting for event\")\n\t}\n\n\t// Cancel context\n\tcancel()\n\n\t// Give it time to be removed\n\ttime.Sleep(100 * time.Millisecond)\n\n\t// Verify subscriber was removed\n\tif eb.SubscriberCount() != 0 {\n\t\tt.Errorf(\"Expected 0 subscribers after context cancel, got %d\", eb.SubscriberCount())\n\t}\n}\n\nfunc TestChannelClosedOnStop(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\terr := eb.Start()\n\tif err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\n\tsub := eb.Subscribe(\"test\", 10, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Subscribe failed: %v\", err)\n\t}\n\n\terr = eb.Stop()\n\tif err != nil {\n\t\tt.Fatalf(\"Stop failed: %v\", err)\n\t}\n\n\t// Verify channel is closed\n\tselect {\n\tcase _, ok := <-sub.Events():\n\t\tif ok {\n\t\t\tt.Error(\"Channel should be closed\")\n\t\t}\n\tcase <-time.After(1 * time.Second):\n\t\tt.Fatal(\"timeout waiting for channel close\")\n\t}\n}\n\nfunc TestSubscriberStopAfterEventBusStop(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\terr := eb.Start()\n\tif err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\n\tsub := eb.Subscribe(\"test\", 10, nil)\n\n\terr = eb.Stop()\n\tif err != nil {\n\t\tt.Fatalf(\"Stop failed: %v\", err)\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tt.Fatalf(\"Stop should not panic after eventbus Stop: %v\", r)\n\t\t}\n\t}()\n\n\terr = sub.Stop()\n\tif err != ErrSubscriberNotFound {\n\t\tt.Fatalf(\"Expected ErrSubscriberNotFound, got: %v\", err)\n\t}\n}\n\nfunc TestChannelClosedOnUnsubscribe(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\n\tsub := eb.Subscribe(\"test\", 10, nil)\n\n\terr := sub.Stop()\n\tif err != nil {\n\t\tt.Fatalf(\"Stop failed: %v\", err)\n\t}\n\n\t// Verify channel is closed\n\tselect {\n\tcase _, ok := <-sub.Events():\n\t\tif ok {\n\t\t\tt.Error(\"Channel should be closed\")\n\t\t}\n\tdefault:\n\t\t// Channel might not be immediately readable, try with timeout\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\tselect {\n\t\tcase _, ok := <-sub.Events():\n\t\t\tif ok {\n\t\t\t\tt.Error(\"Channel should be closed\")\n\t\t\t}\n\t\tdefault:\n\t\t\tt.Error(\"Channel should be closed but is not readable\")\n\t\t}\n\t}\n}\n\nfunc TestString(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\tstr := eb.String()\n\tif str == \"\" {\n\t\tt.Error(\"String() returned empty string\")\n\t}\n\n\teb.Subscribe(\"test\", 5, nil)\n\tstr = eb.String()\n\tif str == \"\" {\n\t\tt.Error(\"String() returned empty string after subscription\")\n\t}\n}\n\nfunc TestSlowSubscriberDoesNotBlockBus(t *testing.T) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\terr := eb.Start()\n\tif err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\tdefer eb.Stop()\n\n\t// Fast subscriber with buffer\n\tfastSub := eb.Subscribe(\"fast\", 100, nil)\n\n\t// Slow subscriber with small buffer\n\tslowSub := eb.Subscribe(\"slow\", 1, nil)\n\n\t// Send many events quickly\n\tfor i := 0; i < 50; i++ {\n\t\teb.PublishEventBlocking(i)\n\t}\n\n\t// Fast subscriber should receive most/all events\n\tfastCount := 0\n\ttimeout := time.After(1 * time.Second)\ndrainFast:\n\tfor {\n\t\tselect {\n\t\tcase <-fastSub.Events():\n\t\t\tfastCount++\n\t\t\tif fastCount == 50 {\n\t\t\t\tbreak drainFast\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\tbreak drainFast\n\t\t}\n\t}\n\n\tif fastCount < 50 { // Should receive most events\n\t\tt.Errorf(\"Fast subscriber only received %d events, expected 50\", fastCount)\n\t}\n\n\t// Slow subscriber may have dropped events (buffer overflow)\n\tslowCount := 0\ndrainSlow:\n\tfor {\n\t\tselect {\n\t\tcase <-slowSub.Events():\n\t\t\tslowCount++\n\t\tdefault:\n\t\t\tbreak drainSlow\n\t\t}\n\t}\n\n\t// Slow subscriber should have received some events but likely not all\n\tt.Logf(\"Slow subscriber received %d events (some may have been dropped)\", slowCount)\n}\n\nfunc BenchmarkEventDistribution(b *testing.B) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 1000,\n\t\t}, nil)\n\teb.Start()\n\tdefer eb.Stop()\n\n\t// Create 10 subscribers\n\tfor i := 0; i < 10; i++ {\n\t\teb.Subscribe(string(rune('A'+i)), 1000, nil)\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\teb.PublishEvent(i)\n\t}\n}\n\nfunc BenchmarkEventDistributionWithFilter(b *testing.B) {\n\teb := New[int](\n\t\tEventBusConfig{\n\t\t\tInputEventsQueueSize: 1000,\n\t\t}, nil)\n\teb.Start()\n\tdefer eb.Stop()\n\n\tfilter := func(n int) bool { return n%2 == 0 }\n\n\t// Create 10 subscribers with filters\n\tfor i := 0; i < 10; i++ {\n\t\teb.Subscribe(string(rune('A'+i)), 1000, filter)\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\teb.PublishEvent(i)\n\t}\n}\n"
  },
  {
    "path": "internal/eventbus/example_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage eventbus_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/internal/eventbus\"\n)\n\n// Example demonstrates basic usage of EventBus\nfunc Example() {\n\t// Create a new EventBus for integer events with input buffer of 10\n\teb := eventbus.New[int](eventbus.EventBusConfig{\n\t\tInputEventsQueueSize: 10,\n\t}, nil)\n\n\t// Start the event bus\n\tif err := eb.Start(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer eb.Stop()\n\n\t// Subscribe to all events\n\tallSub := eb.Subscribe(\"all-subscriber\", 10, nil)\n\n\t// Subscribe to even numbers only\n\tevenFilter := func(n int) bool { return n%2 == 0 }\n\tevenSub := eb.Subscribe(\"even-subscriber\", 10, evenFilter)\n\n\t// Send some events\n\tgo func() {\n\t\tfor i := 1; i <= 5; i++ {\n\t\t\teb.PublishEvent(i)\n\t\t}\n\t}()\n\n\t// Receive events\n\ttime.Sleep(100 * time.Millisecond) // Give time for events to be distributed\n\n\t// Drain all events from allEvents\n\tfor {\n\t\tselect {\n\t\tcase val := <-allSub.Events():\n\t\t\tfmt.Printf(\"All subscriber received: %d\\n\", val)\n\t\tdefault:\n\t\t\tgoto evenLoop\n\t\t}\n\t}\n\nevenLoop:\n\t// Drain even events\n\tfor {\n\t\tselect {\n\t\tcase val := <-evenSub.Events():\n\t\t\tfmt.Printf(\"Even subscriber received: %d\\n\", val)\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Output:\n\t// All subscriber received: 1\n\t// All subscriber received: 2\n\t// All subscriber received: 3\n\t// All subscriber received: 4\n\t// All subscriber received: 5\n\t// Even subscriber received: 2\n\t// Even subscriber received: 4\n}\n\n// Example_withContext demonstrates using context-based subscriptions\nfunc Example_withContext() {\n\teb := eventbus.New[string](\n\t\teventbus.EventBusConfig{\n\t\t\tInputEventsQueueSize: 10,\n\t\t}, nil)\n\teb.Start()\n\tdefer eb.Stop()\n\n\t// Create a context that will be cancelled\n\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\tdefer cancel()\n\n\t// Subscribe with context - will auto-remove when context is cancelled\n\tsub := eb.SubscribeWithContext(ctx, \"temp-subscriber\", 10, nil)\n\n\t// Send some events\n\tgo func() {\n\t\tfor i := 0; i < 3; i++ {\n\t\t\teb.PublishEvent(fmt.Sprintf(\"event-%d\", i))\n\t\t\ttime.Sleep(30 * time.Millisecond)\n\t\t}\n\t}()\n\n\t// Receive events until context is cancelled\n\tfor {\n\t\tselect {\n\t\tcase event := <-sub.Events():\n\t\t\tfmt.Println(\"Received:\", event)\n\t\tcase <-ctx.Done():\n\t\t\tfmt.Println(\"Context cancelled, subscription ended\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Output:\n\t// Received: event-0\n\t// Received: event-1\n\t// Received: event-2\n\t// Context cancelled, subscription ended\n}\n\n// Example_multipleSubscribers demonstrates multiple subscribers with different filters\nfunc Example_multipleSubscribers() {\n\ttype LogEvent struct {\n\t\tLevel   string\n\t\tMessage string\n\t}\n\n\teb := eventbus.New[LogEvent](eventbus.EventBusConfig{\n\t\tInputEventsQueueSize: 10,\n\t}, nil)\n\teb.Start()\n\tdefer eb.Stop()\n\n\t// Subscribe to error logs only\n\terrorFilter := func(e LogEvent) bool { return e.Level == \"ERROR\" }\n\terrorSub := eb.Subscribe(\"error-logger\", 5, errorFilter)\n\n\t// Subscribe to all logs\n\tallSub := eb.Subscribe(\"all-logger\", 10, nil)\n\n\t// Send various log events\n\tgo func() {\n\t\tlogs := []LogEvent{\n\t\t\t{Level: \"INFO\", Message: \"Application status\"},\n\t\t\t{Level: \"ERROR\", Message: \"Connection failed\"},\n\t\t\t{Level: \"INFO\", Message: \"Retrying connection\"},\n\t\t\t{Level: \"ERROR\", Message: \"Max retries exceeded\"},\n\t\t}\n\t\tfor _, log := range logs {\n\t\t\teb.PublishEvent(log)\n\t\t}\n\t}()\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tfmt.Println(\"Error logger received:\")\n\tfor {\n\t\tselect {\n\t\tcase event := <-errorSub.Events():\n\t\t\tfmt.Printf(\"  [%s] %s\\n\", event.Level, event.Message)\n\t\tdefault:\n\t\t\tgoto allLogs\n\t\t}\n\t}\n\nallLogs:\n\tfmt.Println(\"All logger received:\")\n\tfor {\n\t\tselect {\n\t\tcase event := <-allSub.Events():\n\t\t\tfmt.Printf(\"  [%s] %s\\n\", event.Level, event.Message)\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Output:\n\t// Error logger received:\n\t//   [ERROR] Connection failed\n\t//   [ERROR] Max retries exceeded\n\t// All logger received:\n\t//   [INFO] Application status\n\t//   [ERROR] Connection failed\n\t//   [INFO] Retrying connection\n\t//   [ERROR] Max retries exceeded\n}\n\n// Example_unsubscribe demonstrates dynamic subscription management\nfunc Example_unsubscribe() {\n\teb := eventbus.New[int](eventbus.EventBusConfig{\n\t\tInputEventsQueueSize: 10,\n\t}, nil)\n\n\teb.Start()\n\tdefer eb.Stop()\n\n\t// Subscribe\n\tsub := eb.Subscribe(\"temporary\", 10, nil)\n\n\t// Send first batch of events\n\teb.PublishEvent(1)\n\teb.PublishEvent(2)\n\n\t// Receive first batch\n\tfmt.Println(\"Before remove:\")\n\tfor i := 0; i < 2; i++ {\n\t\tfmt.Println(\"Received:\", <-sub.Events())\n\t}\n\n\t// Unsubscribe using Stop method\n\terr := sub.Stop()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Send more events (won't be received)\n\teb.PublishEvent(3)\n\teb.PublishEvent(4)\n\n\t// Channel is now closed\n\tif _, ok := <-sub.Events(); !ok {\n\t\tfmt.Println(\"Channel closed after remove\")\n\t}\n\n\t// Output:\n\t// Before remove:\n\t// Received: 1\n\t// Received: 2\n\t// Channel closed after remove\n}\n"
  },
  {
    "path": "internal/frame/frames.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage frame\n\nimport (\n\t\"fmt\"\n\t\"net\"\n)\n\nconst (\n\tprotoDirectionMask = 0x80\n\tprotoVersionMask   = 0x7F\n\tprotoVersion1      = 0x01\n\tprotoVersion2      = 0x02\n\tprotoVersion3      = 0x03\n\tprotoVersion4      = 0x04\n\tprotoVersion5      = 0x05\n\n\tmaxFrameSize = 256 * 1024 * 1024\n)\n\nconst (\n\t// result kind\n\tResultKindVoid          = 1\n\tResultKindRows          = 2\n\tResultKindKeyspace      = 3\n\tResultKindPrepared      = 4\n\tResultKindSchemaChanged = 5\n\n\t// rows flags\n\tFlagGlobalTableSpec int = 0x01\n\tFlagHasMorePages    int = 0x02\n\tFlagNoMetaData      int = 0x04\n\n\t// query flags\n\tFlagValues                byte = 0x01\n\tFlagSkipMetaData          byte = 0x02\n\tFlagPageSize              byte = 0x04\n\tFlagWithPagingState       byte = 0x08\n\tFlagWithSerialConsistency byte = 0x10\n\tFlagDefaultTimestamp      byte = 0x20\n\tFlagWithNameValues        byte = 0x40\n\tFlagWithKeyspace          byte = 0x80\n\n\t// prepare flags\n\tFlagWithPreparedKeyspace uint32 = 0x01\n\n\t// header flags\n\tFlagCompress      byte = 0x01\n\tFlagTracing       byte = 0x02\n\tFlagCustomPayload byte = 0x04\n\tFlagWarning       byte = 0x08\n\tFlagBetaProtocol  byte = 0x10\n)\n\ntype ProtoVersion byte\n\nfunc (p ProtoVersion) Request() bool {\n\treturn p&protoDirectionMask == 0x00\n}\n\nfunc (p ProtoVersion) Response() bool {\n\treturn p&protoDirectionMask == 0x80\n}\n\nfunc (p ProtoVersion) Version() byte {\n\treturn byte(p) & protoVersionMask\n}\n\nfunc (p ProtoVersion) String() string {\n\tdir := \"REQ\"\n\tif p.Response() {\n\t\tdir = \"RESP\"\n\t}\n\n\treturn fmt.Sprintf(\"[version=%d direction=%s]\", p.Version(), dir)\n}\n\ntype Op byte\n\nconst (\n\t// header ops\n\tOpError         Op = 0x00\n\tOpStartup       Op = 0x01\n\tOpReady         Op = 0x02\n\tOpAuthenticate  Op = 0x03\n\tOpOptions       Op = 0x05\n\tOpSupported     Op = 0x06\n\tOpQuery         Op = 0x07\n\tOpResult        Op = 0x08\n\tOpPrepare       Op = 0x09\n\tOpExecute       Op = 0x0A\n\tOpRegister      Op = 0x0B\n\tOpEvent         Op = 0x0C\n\tOpBatch         Op = 0x0D\n\tOpAuthChallenge Op = 0x0E\n\tOpAuthResponse  Op = 0x0F\n\tOpAuthSuccess   Op = 0x10\n)\n\nfunc (f Op) String() string {\n\tswitch f {\n\tcase OpError:\n\t\treturn \"ERROR\"\n\tcase OpStartup:\n\t\treturn \"STARTUP\"\n\tcase OpReady:\n\t\treturn \"READY\"\n\tcase OpAuthenticate:\n\t\treturn \"AUTHENTICATE\"\n\tcase OpOptions:\n\t\treturn \"OPTIONS\"\n\tcase OpSupported:\n\t\treturn \"SUPPORTED\"\n\tcase OpQuery:\n\t\treturn \"QUERY\"\n\tcase OpResult:\n\t\treturn \"RESULT\"\n\tcase OpPrepare:\n\t\treturn \"PREPARE\"\n\tcase OpExecute:\n\t\treturn \"EXECUTE\"\n\tcase OpRegister:\n\t\treturn \"REGISTER\"\n\tcase OpEvent:\n\t\treturn \"EVENT\"\n\tcase OpBatch:\n\t\treturn \"BATCH\"\n\tcase OpAuthChallenge:\n\t\treturn \"AUTH_CHALLENGE\"\n\tcase OpAuthResponse:\n\t\treturn \"AUTH_RESPONSE\"\n\tcase OpAuthSuccess:\n\t\treturn \"AUTH_SUCCESS\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"UNKNOWN_OP_%d\", f)\n\t}\n}\n\ntype FrameHeader struct {\n\tWarnings []string\n\tStream   int\n\tLength   int\n\tVersion  ProtoVersion\n\tFlags    byte\n\tOp       Op\n}\n\nfunc (f FrameHeader) String() string {\n\treturn fmt.Sprintf(\"[header version=%s flags=0x%x stream=%d op=%s length=%d]\", f.Version, f.Flags, f.Stream, f.Op, f.Length)\n}\n\nfunc (f FrameHeader) Header() FrameHeader {\n\treturn f\n}\n\ntype Frame interface {\n\tHeader() FrameHeader\n}\n\ntype ReadyFrame struct {\n\tFrameHeader\n}\n\ntype SupportedFrame struct {\n\tSupported map[string][]string\n\tFrameHeader\n}\n\ntype SchemaChangeKeyspace struct {\n\tChange   string\n\tKeyspace string\n\tFrameHeader\n}\n\nfunc (f SchemaChangeKeyspace) String() string {\n\treturn fmt.Sprintf(\"[event schema_change_keyspace change=%q keyspace=%q]\", f.Change, f.Keyspace)\n}\n\ntype SchemaChangeTable struct {\n\tChange   string\n\tKeyspace string\n\tObject   string\n\tFrameHeader\n}\n\nfunc (f SchemaChangeTable) String() string {\n\treturn fmt.Sprintf(\"[event schema_change change=%q keyspace=%q object=%q]\", f.Change, f.Keyspace, f.Object)\n}\n\ntype SchemaChangeType struct {\n\tChange   string\n\tKeyspace string\n\tObject   string\n\tFrameHeader\n}\n\ntype SchemaChangeFunction struct {\n\tChange   string\n\tKeyspace string\n\tName     string\n\tArgs     []string\n\tFrameHeader\n}\n\ntype SchemaChangeAggregate struct {\n\tChange   string\n\tKeyspace string\n\tName     string\n\tArgs     []string\n\tFrameHeader\n}\n\ntype ClientRoutesChanged struct {\n\tChangeType    string\n\tConnectionIDs []string\n\tHostIDs       []string\n\tFrameHeader\n}\n\ntype AuthenticateFrame struct {\n\tClass string\n\tFrameHeader\n}\n\nfunc (a *AuthenticateFrame) String() string {\n\treturn fmt.Sprintf(\"[authenticate class=%q]\", a.Class)\n}\n\ntype AuthSuccessFrame struct {\n\tData []byte\n\tFrameHeader\n}\n\nfunc (a *AuthSuccessFrame) String() string {\n\treturn fmt.Sprintf(\"[auth_success data=%q]\", a.Data)\n}\n\ntype AuthChallengeFrame struct {\n\tData []byte\n\tFrameHeader\n}\n\nfunc (a *AuthChallengeFrame) String() string {\n\treturn fmt.Sprintf(\"[auth_challenge data=%q]\", a.Data)\n}\n\ntype StatusChangeEventFrame struct {\n\tChange string\n\tHost   net.IP\n\tFrameHeader\n\tPort int\n}\n\nfunc (t StatusChangeEventFrame) String() string {\n\treturn fmt.Sprintf(\"[status_change change=%s host=%v port=%v]\", t.Change, t.Host, t.Port)\n}\n\n// essentially the same as statusChange\ntype TopologyChangeEventFrame struct {\n\tChange string\n\tHost   net.IP\n\tFrameHeader\n\tPort int\n}\n\nfunc (t TopologyChangeEventFrame) String() string {\n\treturn fmt.Sprintf(\"[topology_change change=%s host=%v port=%v]\", t.Change, t.Host, t.Port)\n}\n\ntype ErrorFrame struct {\n\tMessage string\n\tFrameHeader\n\tCode int\n}\n\nfunc (e ErrorFrame) GetCode() int {\n\treturn e.Code\n}\n\nfunc (e ErrorFrame) GetMessage() string {\n\treturn e.Message\n}\n\nfunc (e ErrorFrame) Error() string {\n\treturn e.GetMessage()\n}\n\nfunc (e ErrorFrame) String() string {\n\treturn fmt.Sprintf(\"[error code=%x message=%q]\", e.Code, e.Message)\n}\n"
  },
  {
    "path": "internal/lru/lru.go",
    "content": "/*\nCopyright 2015 To gocql authors\nCopyright 2013 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package lru implements an LRU cache.\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage lru\n\nimport \"container/list\"\n\n// Cache is a generic LRU cache. It is not safe for concurrent access.\n//\n// This cache has been forked from github.com/golang/groupcache/lru and\n// generalized with a comparable type parameter to avoid the allocations\n// caused by wrapping keys in any.\ntype Cache[K comparable] struct {\n\t// OnEvicted optionally specifies a callback function to be\n\t// executed when an entry is purged from the cache.\n\tOnEvicted func(key K, value any)\n\tll        *list.List\n\tcache     map[K]*list.Element\n\t// MaxEntries is the maximum number of cache entries before\n\t// an item is evicted. Zero means no limit.\n\tMaxEntries int\n}\n\ntype entry[K comparable] struct {\n\tvalue any\n\tkey   K\n}\n\n// New creates a new Cache.\n// If maxEntries is zero, the cache has no limit and it's assumed\n// that eviction is done by the caller.\nfunc New[K comparable](maxEntries int) *Cache[K] {\n\treturn &Cache[K]{\n\t\tMaxEntries: maxEntries,\n\t\tll:         list.New(),\n\t\tcache:      make(map[K]*list.Element),\n\t}\n}\n\n// Add adds a value to the cache.\nfunc (c *Cache[K]) Add(key K, value any) {\n\tif c.cache == nil {\n\t\tc.cache = make(map[K]*list.Element)\n\t\tc.ll = list.New()\n\t}\n\tif ee, ok := c.cache[key]; ok {\n\t\tc.ll.MoveToFront(ee)\n\t\tee.Value.(*entry[K]).value = value\n\t\treturn\n\t}\n\tele := c.ll.PushFront(&entry[K]{key: key, value: value})\n\tc.cache[key] = ele\n\tif c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {\n\t\tc.RemoveOldest()\n\t}\n}\n\n// Get looks up a key's value from the cache.\nfunc (c *Cache[K]) Get(key K) (value any, ok bool) {\n\tif c.cache == nil {\n\t\treturn\n\t}\n\tif ele, hit := c.cache[key]; hit {\n\t\tc.ll.MoveToFront(ele)\n\t\treturn ele.Value.(*entry[K]).value, true\n\t}\n\treturn\n}\n\n// Remove removes the provided key from the cache.\nfunc (c *Cache[K]) Remove(key K) bool {\n\tif c.cache == nil {\n\t\treturn false\n\t}\n\n\tif ele, hit := c.cache[key]; hit {\n\t\tc.removeElement(ele)\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n// RemoveOldest removes the oldest item from the cache.\nfunc (c *Cache[K]) RemoveOldest() {\n\tif c.cache == nil {\n\t\treturn\n\t}\n\tele := c.ll.Back()\n\tif ele != nil {\n\t\tc.removeElement(ele)\n\t}\n}\n\nfunc (c *Cache[K]) removeElement(e *list.Element) {\n\tc.ll.Remove(e)\n\tkv := e.Value.(*entry[K])\n\tdelete(c.cache, kv.key)\n\tif c.OnEvicted != nil {\n\t\tc.OnEvicted(kv.key, kv.value)\n\t}\n}\n\n// Len returns the number of items in the cache.\nfunc (c *Cache[K]) Len() int {\n\tif c.cache == nil {\n\t\treturn 0\n\t}\n\treturn c.ll.Len()\n}\n"
  },
  {
    "path": "internal/lru/lru_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\nCopyright 2015 To gocql authors\nCopyright 2013 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage lru\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nvar getTests = []struct {\n\tname       string\n\tkeyToAdd   string\n\tkeyToGet   string\n\texpectedOk bool\n}{\n\t{\"string_hit\", \"mystring\", \"mystring\", true},\n\t{\"string_miss\", \"mystring\", \"nonsense\", false},\n\t{\"simple_struct_hit\", \"two\", \"two\", true},\n\t{\"simeple_struct_miss\", \"two\", \"noway\", false},\n}\n\nfunc TestGet(t *testing.T) {\n\tt.Parallel()\n\n\tfor _, tt := range getTests {\n\t\tlru := New[string](0)\n\t\tlru.Add(tt.keyToAdd, 1234)\n\t\tval, ok := lru.Get(tt.keyToGet)\n\t\tif ok != tt.expectedOk {\n\t\t\tt.Fatalf(\"%s: cache hit = %v; want %v\", tt.name, ok, !ok)\n\t\t} else if ok && val != 1234 {\n\t\t\tt.Fatalf(\"%s expected get to return 1234 but got %v\", tt.name, val)\n\t\t}\n\t}\n}\n\nfunc TestRemove(t *testing.T) {\n\tt.Parallel()\n\n\tlru := New[string](0)\n\tlru.Add(\"mystring\", 1234)\n\tif val, ok := lru.Get(\"mystring\"); !ok {\n\t\tt.Fatal(\"TestRemove returned no match\")\n\t} else if val != 1234 {\n\t\tt.Fatalf(\"TestRemove failed.  Expected %d, got %v\", 1234, val)\n\t}\n\n\tlru.Remove(\"mystring\")\n\tif _, ok := lru.Get(\"mystring\"); ok {\n\t\tt.Fatal(\"TestRemove returned a removed entry\")\n\t}\n}\n\n// TestStructKey verifies that struct keys work correctly with the generic cache.\nfunc TestStructKey(t *testing.T) {\n\tt.Parallel()\n\n\ttype compositeKey struct {\n\t\tA string\n\t\tB string\n\t}\n\n\tc := New[compositeKey](0)\n\tk1 := compositeKey{A: \"ab\", B: \"cd\"}\n\tk2 := compositeKey{A: \"a\", B: \"bcd\"}\n\n\tc.Add(k1, \"value1\")\n\tc.Add(k2, \"value2\")\n\n\tif val, ok := c.Get(k1); !ok || val != \"value1\" {\n\t\tt.Fatalf(\"expected value1 for k1, got %v (ok=%v)\", val, ok)\n\t}\n\tif val, ok := c.Get(k2); !ok || val != \"value2\" {\n\t\tt.Fatalf(\"expected value2 for k2, got %v (ok=%v)\", val, ok)\n\t}\n\n\t// Verify that keys with same concatenation but different field boundaries\n\t// are distinct (this was a bug with string concatenation keys).\n\tif c.Len() != 2 {\n\t\tt.Fatalf(\"expected 2 entries, got %d\", c.Len())\n\t}\n}\n\ntype stmtKey struct {\n\thostID    string\n\tkeyspace  string\n\tstatement string\n}\n\n// BenchmarkStructKeyLookup benchmarks the hot path: looking up a struct key\n// in a populated cache.\nfunc BenchmarkStructKeyLookup(b *testing.B) {\n\tc := New[stmtKey](1000)\n\tkey := stmtKey{\n\t\thostID:    \"550e8400-e29b-41d4-a716-446655440000\",\n\t\tkeyspace:  \"my_keyspace\",\n\t\tstatement: \"SELECT id, name, email FROM users WHERE id = ?\",\n\t}\n\tc.Add(key, \"prepared-id\")\n\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\tc.Get(key)\n\t}\n}\n\n// BenchmarkStringKeyLookup benchmarks the old approach: looking up a\n// concatenated string key in a populated cache.\nfunc BenchmarkStringKeyLookup(b *testing.B) {\n\tc := New[string](1000)\n\tkey := \"550e8400-e29b-41d4-a716-446655440000\" + \"my_keyspace\" + \"SELECT id, name, email FROM users WHERE id = ?\"\n\tc.Add(key, \"prepared-id\")\n\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\tc.Get(key)\n\t}\n}\n\n// BenchmarkStructKeyInsert benchmarks inserting entries with struct keys,\n// including eviction when the cache is full.\nfunc BenchmarkStructKeyInsert(b *testing.B) {\n\tc := New[stmtKey](1000)\n\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\tk := stmtKey{\n\t\t\thostID:    \"550e8400-e29b-41d4-a716-446655440000\",\n\t\t\tkeyspace:  \"my_keyspace\",\n\t\t\tstatement: fmt.Sprintf(\"SELECT id FROM users WHERE id = %d\", i),\n\t\t}\n\t\tc.Add(k, \"prepared-id\")\n\t}\n}\n\n// BenchmarkStringKeyInsert benchmarks inserting entries with concatenated\n// string keys, including the per-query allocation cost of key construction.\nfunc BenchmarkStringKeyInsert(b *testing.B) {\n\tc := New[string](1000)\n\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\tk := fmt.Sprintf(\"%s%s%s\", \"550e8400-e29b-41d4-a716-446655440000\", \"my_keyspace\", fmt.Sprintf(\"SELECT id FROM users WHERE id = %d\", i))\n\t\tc.Add(k, \"prepared-id\")\n\t}\n}\n"
  },
  {
    "path": "internal/murmur/murmur.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage murmur\n\nconst (\n\tc1    int64 = -8663945395140668459 // 0x87c37b91114253d5\n\tc2    int64 = 5545529020109919103  // 0x4cf5ad432745937f\n\tfmix1 int64 = -49064778989728563   // 0xff51afd7ed558ccd\n\tfmix2 int64 = -4265267296055464877 // 0xc4ceb9fe1a85ec53\n)\n\nfunc fmix(n int64) int64 {\n\t// cast to unsigned for logical right bitshift (to match C* MM3 implementation)\n\tn ^= int64(uint64(n) >> 33)\n\tn *= fmix1\n\tn ^= int64(uint64(n) >> 33)\n\tn *= fmix2\n\tn ^= int64(uint64(n) >> 33)\n\n\treturn n\n}\n\nfunc block(p byte) int64 {\n\treturn int64(int8(p))\n}\n\nfunc rotl(x int64, r uint8) int64 {\n\t// cast to unsigned for logical right bitshift (to match C* MM3 implementation)\n\treturn (x << r) | (int64)((uint64(x) >> (64 - r)))\n}\n\nfunc Murmur3H1(data []byte) int64 {\n\tlength := len(data)\n\n\tvar h1, h2, k1, k2 int64\n\n\t// body\n\tnBlocks := length / 16\n\tfor i := 0; i < nBlocks; i++ {\n\t\tk1, k2 = getBlock(data, i)\n\n\t\tk1 *= c1\n\t\tk1 = rotl(k1, 31)\n\t\tk1 *= c2\n\t\th1 ^= k1\n\n\t\th1 = rotl(h1, 27)\n\t\th1 += h2\n\t\th1 = h1*5 + 0x52dce729\n\n\t\tk2 *= c2\n\t\tk2 = rotl(k2, 33)\n\t\tk2 *= c1\n\t\th2 ^= k2\n\n\t\th2 = rotl(h2, 31)\n\t\th2 += h1\n\t\th2 = h2*5 + 0x38495ab5\n\t}\n\n\t// tail\n\ttail := data[nBlocks*16:]\n\tk1 = 0\n\tk2 = 0\n\tswitch length & 15 {\n\tcase 15:\n\t\tk2 ^= block(tail[14]) << 48\n\t\tfallthrough\n\tcase 14:\n\t\tk2 ^= block(tail[13]) << 40\n\t\tfallthrough\n\tcase 13:\n\t\tk2 ^= block(tail[12]) << 32\n\t\tfallthrough\n\tcase 12:\n\t\tk2 ^= block(tail[11]) << 24\n\t\tfallthrough\n\tcase 11:\n\t\tk2 ^= block(tail[10]) << 16\n\t\tfallthrough\n\tcase 10:\n\t\tk2 ^= block(tail[9]) << 8\n\t\tfallthrough\n\tcase 9:\n\t\tk2 ^= block(tail[8])\n\n\t\tk2 *= c2\n\t\tk2 = rotl(k2, 33)\n\t\tk2 *= c1\n\t\th2 ^= k2\n\n\t\tfallthrough\n\tcase 8:\n\t\tk1 ^= block(tail[7]) << 56\n\t\tfallthrough\n\tcase 7:\n\t\tk1 ^= block(tail[6]) << 48\n\t\tfallthrough\n\tcase 6:\n\t\tk1 ^= block(tail[5]) << 40\n\t\tfallthrough\n\tcase 5:\n\t\tk1 ^= block(tail[4]) << 32\n\t\tfallthrough\n\tcase 4:\n\t\tk1 ^= block(tail[3]) << 24\n\t\tfallthrough\n\tcase 3:\n\t\tk1 ^= block(tail[2]) << 16\n\t\tfallthrough\n\tcase 2:\n\t\tk1 ^= block(tail[1]) << 8\n\t\tfallthrough\n\tcase 1:\n\t\tk1 ^= block(tail[0])\n\n\t\tk1 *= c1\n\t\tk1 = rotl(k1, 31)\n\t\tk1 *= c2\n\t\th1 ^= k1\n\t}\n\n\th1 ^= int64(length)\n\th2 ^= int64(length)\n\n\th1 += h2\n\th2 += h1\n\n\th1 = fmix(h1)\n\th2 = fmix(h2)\n\n\th1 += h2\n\t// the following is extraneous since h2 is discarded\n\t// h2 += h1\n\n\treturn h1\n}\n"
  },
  {
    "path": "internal/murmur/murmur_appengine.go",
    "content": "//go:build appengine || s390x\n// +build appengine s390x\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage murmur\n\nimport \"encoding/binary\"\n\nfunc getBlock(data []byte, n int) (int64, int64) {\n\tk1 := int64(binary.LittleEndian.Uint64(data[n*16:]))\n\tk2 := int64(binary.LittleEndian.Uint64(data[(n*16)+8:]))\n\treturn k1, k2\n}\n"
  },
  {
    "path": "internal/murmur/murmur_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage murmur\n\nimport (\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"testing\"\n)\n\nfunc TestRotl(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tin, rotate, exp int64\n\t}{\n\t\t{123456789, 33, 1060485742448345088},\n\t\t{-123456789, 33, -1060485733858410497},\n\t\t{-12345678987654, 33, 1756681988166642059},\n\n\t\t{7210216203459776512, 31, -4287945813905642825},\n\t\t{2453826951392495049, 27, -2013042863942636044},\n\t\t{270400184080946339, 33, -3553153987756601583},\n\t\t{2060965185473694757, 31, 6290866853133484661},\n\t\t{3075794793055692309, 33, -3158909918919076318},\n\t\t{-6486402271863858009, 31, 405973038345868736},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(fmt.Sprintf(\"%d >> %d\", test.in, test.rotate), func(t *testing.T) {\n\t\t\tif v := rotl(test.in, uint8(test.rotate)); v != test.exp {\n\t\t\t\tt.Fatalf(\"expected %d got %d\", test.exp, v)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestFmix(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tin, exp int64\n\t}{\n\t\t{123456789, -8107560010088384378},\n\t\t{-123456789, -5252787026298255965},\n\t\t{-12345678987654, -1122383578793231303},\n\t\t{-1241537367799374202, 3388197556095096266},\n\t\t{-7566534940689533355, 4729783097411765989},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(strconv.Itoa(int(test.in)), func(t *testing.T) {\n\t\t\tif v := fmix(test.in); v != test.exp {\n\t\t\t\tt.Fatalf(\"expected %d got %d\", test.exp, v)\n\t\t\t}\n\t\t})\n\t}\n\n}\n\nfunc TestMurmur3H1_CassandraSign(t *testing.T) {\n\tt.Parallel()\n\n\tkey, err := hex.DecodeString(\"00104327529fb645dd00b883ec39ae448bb800000400066a6b00\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\th := Murmur3H1(key)\n\tconst exp int64 = -9223371632693506265\n\n\tif h != exp {\n\t\tt.Fatalf(\"expected %d got %d\", exp, h)\n\t}\n}\n\n// Test the implementation of murmur3\nfunc TestMurmur3H1(t *testing.T) {\n\tt.Parallel()\n\n\t// these examples are based on adding a index number to a sample string in\n\t// a loop. The expected values were generated by the java datastax murmur3\n\t// implementation. The number of examples here of increasing lengths ensure\n\t// test coverage of all tail-length branches in the murmur3 algorithm\n\tseriesExpected := [...]uint64{\n\t\t0x0000000000000000, // \"\"\n\t\t0x2ac9debed546a380, // \"0\"\n\t\t0x649e4eaa7fc1708e, // \"01\"\n\t\t0xce68f60d7c353bdb, // \"012\"\n\t\t0x0f95757ce7f38254, // \"0123\"\n\t\t0x0f04e459497f3fc1, // \"01234\"\n\t\t0x88c0a92586be0a27, // \"012345\"\n\t\t0x13eb9fb82606f7a6, // \"0123456\"\n\t\t0x8236039b7387354d, // \"01234567\"\n\t\t0x4c1e87519fe738ba, // \"012345678\"\n\t\t0x3f9652ac3effeb24, // \"0123456789\"\n\t\t0x3f33760ded9006c6, // \"01234567890\"\n\t\t0xaed70a6631854cb1, // \"012345678901\"\n\t\t0x8a299a8f8e0e2da7, // \"0123456789012\"\n\t\t0x624b675c779249a6, // \"01234567890123\"\n\t\t0xa4b203bb1d90b9a3, // \"012345678901234\"\n\t\t0xa3293ad698ecb99a, // \"0123456789012345\"\n\t\t0xbc740023dbd50048, // \"01234567890123456\"\n\t\t0x3fe5ab9837d25cdd, // \"012345678901234567\"\n\t\t0x2d0338c1ca87d132, // \"0123456789012345678\"\n\t}\n\tsample := \"\"\n\tfor i, expected := range seriesExpected {\n\t\tassertMurmur3H1(t, []byte(sample), expected)\n\n\t\tsample = sample + strconv.Itoa(i%10)\n\t}\n\n\t// Here are some test examples from other driver implementations\n\tassertMurmur3H1(t, []byte(\"hello\"), 0xcbd8a7b341bd9b02)\n\tassertMurmur3H1(t, []byte(\"hello, world\"), 0x342fac623a5ebc8e)\n\tassertMurmur3H1(t, []byte(\"19 Jan 2038 at 3:14:07 AM\"), 0xb89e5988b737affc)\n\tassertMurmur3H1(t, []byte(\"The quick brown fox jumps over the lazy dog.\"), 0xcd99481f9ee902c9)\n}\n\n// helper function for testing the murmur3 implementation\nfunc assertMurmur3H1(t *testing.T, data []byte, expected uint64) {\n\tactual := Murmur3H1(data)\n\tif actual != int64(expected) {\n\t\tt.Errorf(\"Expected h1 = %x for data = %x, but was %x\", int64(expected), data, actual)\n\t}\n}\n\n// Benchmark of the performance of the murmur3 implementation\nfunc BenchmarkMurmur3H1(b *testing.B) {\n\tdata := make([]byte, 1024)\n\tfor i := 0; i < 1024; i++ {\n\t\tdata[i] = byte(i)\n\t}\n\n\tb.ResetTimer()\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\th1 := Murmur3H1(data)\n\t\t\tif h1 != int64(7627370222079200297) {\n\t\t\t\tb.Fatalf(\"expected %d got %d\", int64(7627370222079200297), h1)\n\t\t\t}\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "internal/murmur/murmur_unsafe.go",
    "content": "//go:build !appengine && !s390x\n// +build !appengine,!s390x\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage murmur\n\nimport (\n\t\"unsafe\"\n)\n\nfunc getBlock(data []byte, n int) (int64, int64) {\n\tblock := (*[2]int64)(unsafe.Pointer(&data[n*16]))\n\n\tk1 := block[0]\n\tk2 := block[1]\n\treturn k1, k2\n}\n"
  },
  {
    "path": "internal/streams/streams.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage streams\n\nimport (\n\t\"math\"\n\t\"strconv\"\n\t\"sync/atomic\"\n)\n\nconst bucketBits = 64\n\n// IDGenerator tracks and allocates streams which are in use.\ntype IDGenerator struct {\n\t// streams is a bitset where each bit represents a stream, a 1 implies in use\n\tstreams      []uint64\n\tNumStreams   int\n\tinuseStreams int32\n\tnumBuckets   uint32\n\toffset       uint32\n}\n\nfunc New() *IDGenerator {\n\treturn NewLimited(32768)\n}\n\nfunc NewLimited(maxStreams int) *IDGenerator {\n\t// Round up maxStreams to a nearest\n\t// multiple of 64\n\tmaxStreams = ((maxStreams + 63) / 64) * 64\n\n\tbuckets := maxStreams / 64\n\t// reserve stream 0\n\tstreams := make([]uint64, buckets)\n\tstreams[0] = 1 << 63\n\n\treturn &IDGenerator{\n\t\tNumStreams: maxStreams,\n\t\tstreams:    streams,\n\t\tnumBuckets: uint32(buckets),\n\t\toffset:     uint32(buckets) - 1,\n\t}\n}\n\nfunc streamFromBucket(bucket, streamInBucket int) int {\n\treturn (bucket * bucketBits) + streamInBucket\n}\n\nfunc (s *IDGenerator) GetStream() (int, bool) {\n\t// Reduce collisions by offsetting the starting point\n\toffset := atomic.AddUint32(&s.offset, 1)\n\n\tfor i := uint32(0); i < s.numBuckets; i++ {\n\t\tpos := int((i + offset) % s.numBuckets)\n\n\t\tbucket := atomic.LoadUint64(&s.streams[pos])\n\t\tif bucket == math.MaxUint64 {\n\t\t\t// all streams in use\n\t\t\tcontinue\n\t\t}\n\n\t\tfor j := 0; j < bucketBits; j++ {\n\t\t\tmask := uint64(1 << streamOffset(j))\n\t\t\tfor bucket&mask == 0 {\n\t\t\t\tif atomic.CompareAndSwapUint64(&s.streams[pos], bucket, bucket|mask) {\n\t\t\t\t\tatomic.AddInt32(&s.inuseStreams, 1)\n\t\t\t\t\treturn streamFromBucket(int(pos), j), true\n\t\t\t\t}\n\t\t\t\tbucket = atomic.LoadUint64(&s.streams[pos])\n\t\t\t}\n\t\t}\n\t}\n\n\treturn 0, false\n}\n\nfunc bitfmt(b uint64) string {\n\treturn strconv.FormatUint(b, 16)\n}\n\n// returns the bucket offset of a given stream\nfunc bucketOffset(i int) int {\n\treturn i / bucketBits\n}\n\nfunc streamOffset(stream int) uint64 {\n\treturn bucketBits - uint64(stream%bucketBits) - 1\n}\n\nfunc isSet(bits uint64, stream int) bool {\n\treturn bits>>streamOffset(stream)&1 == 1\n}\n\nfunc (s *IDGenerator) isSet(stream int) bool {\n\tbits := atomic.LoadUint64(&s.streams[bucketOffset(stream)])\n\treturn isSet(bits, stream)\n}\n\nfunc (s *IDGenerator) String() string {\n\tsize := s.numBuckets * (bucketBits + 1)\n\tbuf := make([]byte, 0, size)\n\tfor i := 0; i < int(s.numBuckets); i++ {\n\t\tbits := atomic.LoadUint64(&s.streams[i])\n\t\tbuf = append(buf, bitfmt(bits)...)\n\t\tbuf = append(buf, ' ')\n\t}\n\treturn string(buf[: size-1 : size-1])\n}\n\nfunc (s *IDGenerator) Clear(stream int) (inuse bool) {\n\toffset := bucketOffset(stream)\n\tbucket := atomic.LoadUint64(&s.streams[offset])\n\n\tmask := uint64(1) << streamOffset(stream)\n\tif bucket&mask != mask {\n\t\t// already cleared\n\t\treturn false\n\t}\n\n\tfor !atomic.CompareAndSwapUint64(&s.streams[offset], bucket, bucket & ^mask) {\n\t\tbucket = atomic.LoadUint64(&s.streams[offset])\n\t\tif bucket&mask != mask {\n\t\t\t// already cleared\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// TODO: make this account for 0 stream being reserved\n\tif atomic.AddInt32(&s.inuseStreams, -1) < 0 {\n\t\t// TODO(zariel): remove this\n\t\tpanic(\"negative streams inuse\")\n\t}\n\n\treturn true\n}\n\nfunc (s *IDGenerator) Available() int {\n\treturn s.NumStreams - int(atomic.LoadInt32(&s.inuseStreams)) - 1\n}\n\nfunc (s *IDGenerator) InUse() int {\n\treturn int(atomic.LoadInt32(&s.inuseStreams))\n}\n"
  },
  {
    "path": "internal/streams/streams_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage streams\n\nimport (\n\t\"math\"\n\t\"strconv\"\n\t\"sync/atomic\"\n\t\"testing\"\n)\n\nfunc TestUsesAllStreams(t *testing.T) {\n\tt.Parallel()\n\n\tstreams := New()\n\n\tgot := make(map[int]struct{})\n\n\tfor i := 1; i < streams.NumStreams; i++ {\n\t\tstream, ok := streams.GetStream()\n\t\tif !ok {\n\t\t\tt.Fatalf(\"unable to get stream %d\", i)\n\t\t}\n\n\t\tif _, ok = got[stream]; ok {\n\t\t\tt.Fatalf(\"got an already allocated stream: %d\", stream)\n\t\t}\n\t\tgot[stream] = struct{}{}\n\n\t\tif !streams.isSet(stream) {\n\t\t\tbucket := atomic.LoadUint64(&streams.streams[bucketOffset(stream)])\n\t\t\tt.Logf(\"bucket=%d: %s\\n\", bucket, strconv.FormatUint(bucket, 2))\n\t\t\tt.Fatalf(\"stream not set: %d\", stream)\n\t\t}\n\t}\n\n\tfor i := 1; i < streams.NumStreams; i++ {\n\t\tif _, ok := got[i]; !ok {\n\t\t\tt.Errorf(\"did not use stream %d\", i)\n\t\t}\n\t}\n\tif _, ok := got[0]; ok {\n\t\tt.Fatal(\"expected to not use stream 0\")\n\t}\n\n\tfor i, bucket := range streams.streams {\n\t\tif bucket != math.MaxUint64 {\n\t\t\tt.Errorf(\"did not use all streams in offset=%d bucket=%s\", i, bitfmt(bucket))\n\t\t}\n\t}\n}\n\nfunc TestFullStreams(t *testing.T) {\n\tt.Parallel()\n\n\tstreams := New()\n\tfor i := range streams.streams {\n\t\tstreams.streams[i] = math.MaxUint64\n\t}\n\n\tstream, ok := streams.GetStream()\n\tif ok {\n\t\tt.Fatalf(\"should not get stream when all in use: stream=%d\", stream)\n\t}\n}\n\nfunc TestClearStreams(t *testing.T) {\n\tt.Parallel()\n\n\tstreams := New()\n\tfor i := range streams.streams {\n\t\tstreams.streams[i] = math.MaxUint64\n\t}\n\tstreams.inuseStreams = int32(streams.NumStreams)\n\n\tfor i := 0; i < streams.NumStreams; i++ {\n\t\tstreams.Clear(i)\n\t}\n\n\tfor i, bucket := range streams.streams {\n\t\tif bucket != 0 {\n\t\t\tt.Errorf(\"did not clear streams in offset=%d bucket=%s\", i, bitfmt(bucket))\n\t\t}\n\t}\n}\n\nfunc TestDoubleClear(t *testing.T) {\n\tt.Parallel()\n\n\tstreams := New()\n\tstream, ok := streams.GetStream()\n\tif !ok {\n\t\tt.Fatal(\"did not get stream\")\n\t}\n\n\tif !streams.Clear(stream) {\n\t\tt.Fatalf(\"stream not indicated as in use: %d\", stream)\n\t}\n\tif streams.Clear(stream) {\n\t\tt.Fatalf(\"stream not as in use after clear: %d\", stream)\n\t}\n}\n\nfunc BenchmarkConcurrentUse(b *testing.B) {\n\tstreams := New()\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tstream, ok := streams.GetStream()\n\t\t\tif !ok {\n\t\t\t\tb.Error(\"unable to get stream\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !streams.Clear(stream) {\n\t\t\t\tb.Errorf(\"stream was already cleared: %d\", stream)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc TestStreamOffset(t *testing.T) {\n\tt.Parallel()\n\n\ttests := [...]struct {\n\t\tn   int\n\t\toff uint64\n\t}{\n\t\t{0, 63},\n\t\t{1, 62},\n\t\t{2, 61},\n\t\t{3, 60},\n\t\t{63, 0},\n\t\t{64, 63},\n\n\t\t{128, 63},\n\t}\n\n\tfor _, test := range tests {\n\t\tif off := streamOffset(test.n); off != test.off {\n\t\t\tt.Errorf(\"n=%d expected %d got %d\", test.n, off, test.off)\n\t\t}\n\t}\n}\n\nfunc TestIsSet(t *testing.T) {\n\tt.Parallel()\n\n\ttests := [...]struct {\n\t\tstream int\n\t\tbucket uint64\n\t\tset    bool\n\t}{\n\t\t{0, 0, false},\n\t\t{0, 1 << 63, true},\n\t\t{1, 0, false},\n\t\t{1, 1 << 62, true},\n\t\t{63, 1, true},\n\t\t{64, 1 << 63, true},\n\t\t{0, 0x8000000000000000, true},\n\t}\n\n\tfor i, test := range tests {\n\t\tif set := isSet(test.bucket, test.stream); set != test.set {\n\t\t\tt.Errorf(\"[%d] stream=%d expected %v got %v\", i, test.stream, test.set, set)\n\t\t}\n\t}\n\n\tfor i := 0; i < bucketBits; i++ {\n\t\tif !isSet(math.MaxUint64, i) {\n\t\t\tvar shift uint64 = math.MaxUint64 >> streamOffset(i)\n\t\t\tt.Errorf(\"expected isSet for all i=%d got=%d\", i, shift)\n\t\t}\n\t}\n}\n\nfunc TestBucketOfset(t *testing.T) {\n\tt.Parallel()\n\n\ttests := [...]struct {\n\t\tn      int\n\t\tbucket int\n\t}{\n\t\t{0, 0},\n\t\t{1, 0},\n\t\t{63, 0},\n\t\t{64, 1},\n\t}\n\n\tfor _, test := range tests {\n\t\tif bucket := bucketOffset(test.n); bucket != test.bucket {\n\t\t\tt.Errorf(\"n=%d expected %v got %v\", test.n, test.bucket, bucket)\n\t\t}\n\t}\n}\n\nfunc TestStreamFromBucket(t *testing.T) {\n\tt.Parallel()\n\n\ttests := [...]struct {\n\t\tbucket int\n\t\tpos    int\n\t\tstream int\n\t}{\n\t\t{0, 0, 0},\n\t\t{0, 1, 1},\n\t\t{0, 2, 2},\n\t\t{0, 63, 63},\n\t\t{1, 0, 64},\n\t\t{1, 1, 65},\n\t}\n\n\tfor _, test := range tests {\n\t\tif stream := streamFromBucket(test.bucket, test.pos); stream != test.stream {\n\t\t\tt.Errorf(\"bucket=%d pos=%d expected %v got %v\", test.bucket, test.pos, test.stream, stream)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "internal/tests/common.go",
    "content": "package tests\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/google/uuid\"\n)\n\nfunc AssertTrue(t *testing.T, description string, value bool) {\n\tt.Helper()\n\tif !value {\n\t\tt.Fatalf(\"expected %s to be true\", description)\n\t}\n}\n\nfunc AssertEqual(t *testing.T, description string, expected, actual any) {\n\tt.Helper()\n\tif expected != actual {\n\t\tt.Fatalf(\"expected %s to be (%+v) but was (%+v) instead\", description, expected, actual)\n\t}\n}\n\nfunc AssertDeepEqual(t *testing.T, description string, expected, actual any) {\n\tt.Helper()\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tt.Fatalf(\"expected %s to be (%+v) but was (%+v) instead\", description, expected, actual)\n\t}\n}\n\nfunc AssertNil(t *testing.T, description string, actual any) {\n\tt.Helper()\n\tif actual != nil {\n\t\tt.Fatalf(\"expected %s to be (nil) but was (%+v) instead\", description, actual)\n\t}\n}\n\nfunc RandomUUID() string {\n\tval, err := uuid.NewRandom()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to generate UUID: %s\", err.Error()))\n\t}\n\treturn val.String()\n}\n\n// GenerateHostNames generates a slice of host names with the format \"host0\", \"host1\", ..., \"hostN-1\",\n// where N is the specified hostCount.\n//\n// Parameters:\n//\n//\thostCount - the number of host names to generate.\n//\n// Returns:\n//\n//\tA slice of strings containing host names.\nfunc GenerateHostNames(hostCount int) []string {\n\thosts := make([]string, hostCount)\n\tfor i := 0; i < hostCount; i++ {\n\t\thosts[i] = \"host\" + strconv.Itoa(i)\n\t}\n\treturn hosts\n}\n"
  },
  {
    "path": "internal/tests/err_equal.go",
    "content": "package tests\n\nfunc ErrEqual(err1, err2 error) bool {\n\tif err1 != nil && err2 != nil {\n\t\treturn err1.Error() == err2.Error()\n\t}\n\treturn err1 == nil && err2 == nil\n}\n"
  },
  {
    "path": "internal/tests/mock/mock_framer.go",
    "content": "package mock\n\ntype MockFramer struct {\n\tData [][]byte\n\tpos  int\n}\n\nfunc (m *MockFramer) ReadBytesInternal() ([]byte, error) {\n\tif m.pos < len(m.Data) {\n\t\tm.pos = m.pos + 1\n\t\treturn m.Data[m.pos-1], nil\n\t}\n\treturn []byte{}, nil\n}\n\nfunc (*MockFramer) GetCustomPayload() map[string][]byte { return map[string][]byte{} }\nfunc (*MockFramer) GetHeaderWarnings() []string         { return []string{} }\nfunc (*MockFramer) Release()                            {}\n"
  },
  {
    "path": "internal/tests/rand.go",
    "content": "package tests\n\nimport (\n\t\"math/rand\"\n\t\"sync\"\n\t\"time\"\n)\n\n// RandInterface defines the thread-safe random number generator interface.\n// It abstracts all methods provided by ThreadSafeRand.\ntype RandInterface interface {\n\tUint64() uint64\n\tUint32() uint32\n\tInt() int\n\tIntn(n int) int\n\tInt63() int64\n\tInt63n(n int64) int64\n\tInt31() int32\n\tInt31n(n int32) int32\n\tFloat64() float64\n\tFloat32() float32\n\tExpFloat64() float64\n\tNormFloat64() float64\n\tShuffle(n int, swap func(i, j int))\n\tRead(p []byte) (n int, err error)\n}\n\n// RandomTokens generates a slice of n random int64 tokens using a thread-safe random number generator.\n//\n// Parameters:\n//\n//\tn - the number of random tokens to generate.\n//\n// Returns:\n//\n//\tA slice of n randomly generated int64 tokens.\nfunc RandomTokens(rnd RandInterface, n int) []int64 {\n\tvar tokens []int64\n\tfor i := 0; i < n; i++ {\n\t\ttokens = append(tokens, rnd.Int63())\n\t}\n\treturn tokens\n}\n\n// ShuffledIndexes returns a slice containing integers from 0 to n-1 in random order.\n//\n// It uses a thread-safe random number generator to perform an in-place shuffle.\n//\n// Parameters:\n//\n//\tn - the number of elements to include in the shuffled list.\n//\n// Returns:\n//\n//\tA randomly shuffled slice of integers from 0 to n-1.\nfunc ShuffledIndexes(rnd RandInterface, n int) []int {\n\tindexes := make([]int, n)\n\tfor i := range indexes {\n\t\tindexes[i] = i\n\t}\n\trnd.Shuffle(n, func(i, j int) {\n\t\tindexes[i], indexes[j] = indexes[j], indexes[i]\n\t})\n\treturn indexes\n}\n\n// ThreadSafeRand provides a concurrency-safe wrapper around math/rand.Rand.\n// It allows safe usage of random number generation methods from multiple goroutines.\n// All access to the underlying rand.Rand is synchronized via a mutex.\ntype ThreadSafeRand struct {\n\tr   *rand.Rand\n\tmux sync.Mutex\n}\n\n// NewThreadSafeRand creates and returns a new instance of ThreadSafeRand,\n// initialized with the given seed. The resulting generator is safe for concurrent use.\nfunc NewThreadSafeRand(seed int64) *ThreadSafeRand {\n\treturn &ThreadSafeRand{\n\t\tr: rand.New(rand.NewSource(seed)),\n\t}\n}\n\nfunc (r *ThreadSafeRand) Uint64() uint64 {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Uint64()\n}\n\nfunc (r *ThreadSafeRand) Uint32() uint32 {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Uint32()\n}\n\nfunc (r *ThreadSafeRand) Int() int {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Int()\n}\n\nfunc (r *ThreadSafeRand) Intn(n int) int {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Intn(n)\n}\n\nfunc (r *ThreadSafeRand) Int63() int64 {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Int63()\n}\n\nfunc (r *ThreadSafeRand) Int63n(n int64) int64 {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Int63n(n)\n}\n\nfunc (r *ThreadSafeRand) Int31() int32 {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Int31()\n}\n\nfunc (r *ThreadSafeRand) Int31n(n int32) int32 {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Int31n(n)\n}\n\nfunc (r *ThreadSafeRand) Float64() float64 {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Float64()\n}\n\nfunc (r *ThreadSafeRand) Float32() float32 {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Float32()\n}\n\nfunc (r *ThreadSafeRand) ExpFloat64() float64 {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.ExpFloat64()\n}\n\nfunc (r *ThreadSafeRand) NormFloat64() float64 {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.NormFloat64()\n}\n\nfunc (r *ThreadSafeRand) Shuffle(n int, swap func(i, j int)) {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\tr.r.Shuffle(n, swap)\n}\n\nfunc (r *ThreadSafeRand) Read(p []byte) (n int, err error) {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\treturn r.r.Read(p)\n}\n\nvar seededRand *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))\n\nconst randCharset = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\nfunc RandomText(size int) string {\n\tresult := make([]byte, size)\n\tfor i := range result {\n\t\tresult[i] = randCharset[rand.Intn(len(randCharset))]\n\t}\n\treturn string(result)\n}\n"
  },
  {
    "path": "internal/tests/serialization/mod/all.go",
    "content": "package mod\n\nvar All = []Mod{CustomType, Reference, CustomTypeRef}\n\n// Mod - value modifiers.\ntype Mod func(vals ...any) []any\n\ntype Values []any\n\nfunc (v Values) AddVariants(mods ...Mod) Values {\n\tout := append(make([]any, 0), v...)\n\tfor _, mod := range mods {\n\t\tout = append(out, mod(v...)...)\n\t}\n\treturn out\n}\n"
  },
  {
    "path": "internal/tests/serialization/mod/custom.go",
    "content": "package mod\n\ntype (\n\tBool bool\n\n\tInt8  int8\n\tInt16 int16\n\tInt32 int32\n\tInt64 int64\n\tInt   int\n\n\tUint8  uint8\n\tUint16 uint16\n\tUint32 uint32\n\tUint64 uint64\n\tUint   uint\n\n\tFloat32 float32\n\tFloat64 float64\n\n\tString string\n\n\tBytes   []byte\n\tBytes3  [3]byte\n\tBytes4  [4]byte\n\tBytes5  [5]byte\n\tBytes15 [15]byte\n\tBytes16 [16]byte\n\tBytes17 [17]byte\n\n\tSliceInt16   []int16\n\tSliceInt16R  []*int16\n\tSliceInt16C  []Int16\n\tSliceInt16CR []*Int16\n\n\tSliceInt32   []int32\n\tSliceInt32R  []*int32\n\tSliceInt32C  []Int32\n\tSliceInt32CR []*Int32\n\n\tSliceAny []any\n\n\tArr1Int16   [1]int16\n\tArr1Int16R  [1]*int16\n\tArr1Int16C  [1]Int16\n\tArr1Int16CR [1]*Int16\n\n\tArr1Int32   [1]int32\n\tArr1Int32R  [1]*int32\n\tArr1Int32C  [1]Int32\n\tArr1Int32CR [1]*Int32\n\n\tArrAny [1]any\n\n\tMapInt16   map[int16]int16\n\tMapInt16R  map[int16]*int16\n\tMapInt16C  map[Int16]Int16\n\tMapInt16CR map[Int16]*Int16\n\n\tMapInt32   map[int32]int32\n\tMapInt32R  map[int32]*int32\n\tMapInt32C  map[Int32]Int32\n\tMapInt32CR map[Int32]*Int32\n\n\tMapUDT map[string]any\n)\n\nvar CustomType Mod = func(vals ...any) []any {\n\tout := make([]any, 0)\n\tfor i := range vals {\n\t\tif vals[i] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tct := customType(vals[i])\n\t\tif ct != nil {\n\t\t\tout = append(out, ct)\n\t\t}\n\t}\n\treturn out\n}\n\nfunc customType(i any) any {\n\tswitch v := i.(type) {\n\tcase bool:\n\t\treturn Bool(v)\n\tcase int8:\n\t\treturn Int8(v)\n\tcase int16:\n\t\treturn Int16(v)\n\tcase int32:\n\t\treturn Int32(v)\n\tcase int64:\n\t\treturn Int64(v)\n\tcase int:\n\t\treturn Int(v)\n\tcase uint:\n\t\treturn Uint(v)\n\tcase uint8:\n\t\treturn Uint8(v)\n\tcase uint16:\n\t\treturn Uint16(v)\n\tcase uint32:\n\t\treturn Uint32(v)\n\tcase uint64:\n\t\treturn Uint64(v)\n\tcase float32:\n\t\treturn Float32(v)\n\tcase float64:\n\t\treturn Float64(v)\n\tcase string:\n\t\treturn String(v)\n\tcase []byte:\n\t\treturn Bytes(v)\n\tcase [3]byte:\n\t\treturn Bytes3(v)\n\tcase [4]byte:\n\t\treturn Bytes4(v)\n\tcase [5]byte:\n\t\treturn Bytes5(v)\n\tcase [15]byte:\n\t\treturn Bytes15(v)\n\tcase [16]byte:\n\t\treturn Bytes16(v)\n\tcase [17]byte:\n\t\treturn Bytes17(v)\n\tcase []int16:\n\t\treturn SliceInt16(v)\n\tcase []*int16:\n\t\treturn SliceInt16R(v)\n\tcase []Int16:\n\t\treturn SliceInt16C(v)\n\tcase []*Int16:\n\t\treturn SliceInt16CR(v)\n\tcase []int32:\n\t\treturn SliceInt32(v)\n\tcase []*int32:\n\t\treturn SliceInt32R(v)\n\tcase []Int32:\n\t\treturn SliceInt32C(v)\n\tcase []*Int32:\n\t\treturn SliceInt32CR(v)\n\tcase [1]int16:\n\t\treturn Arr1Int16(v)\n\tcase [1]*int16:\n\t\treturn Arr1Int16R(v)\n\tcase [1]Int16:\n\t\treturn Arr1Int16C(v)\n\tcase [1]*Int16:\n\t\treturn Arr1Int16CR(v)\n\tcase [1]int32:\n\t\treturn Arr1Int32(v)\n\tcase [1]*int32:\n\t\treturn Arr1Int32R(v)\n\tcase [1]Int32:\n\t\treturn Arr1Int32C(v)\n\tcase [1]*Int32:\n\t\treturn Arr1Int32CR(v)\n\tcase map[int16]int16:\n\t\treturn MapInt16(v)\n\tcase map[int16]*int16:\n\t\treturn MapInt16R(v)\n\tcase map[Int16]Int16:\n\t\treturn MapInt16C(v)\n\tcase map[Int16]*Int16:\n\t\treturn MapInt16CR(v)\n\tcase map[int32]int32:\n\t\treturn MapInt32(v)\n\tcase map[int32]*int32:\n\t\treturn MapInt32R(v)\n\tcase map[Int32]Int32:\n\t\treturn MapInt32C(v)\n\tcase map[Int32]*Int32:\n\t\treturn MapInt32CR(v)\n\tcase map[string]any:\n\t\treturn MapUDT(v)\n\tcase []any:\n\t\treturn SliceAny(v)\n\tcase [1]any:\n\t\treturn ArrAny(v)\n\tdefault:\n\t\treturn intoCustomR(i)\n\t}\n}\n\nfunc intoCustomR(i any) any {\n\tswitch v := i.(type) {\n\tcase *bool:\n\t\treturn (*Bool)(v)\n\tcase *int8:\n\t\treturn (*Int8)(v)\n\tcase *int16:\n\t\treturn (*Int16)(v)\n\tcase *int32:\n\t\treturn (*Int32)(v)\n\tcase *int64:\n\t\treturn (*Int64)(v)\n\tcase *int:\n\t\treturn (*Int)(v)\n\tcase *uint:\n\t\treturn (*Uint)(v)\n\tcase *uint8:\n\t\treturn (*Uint8)(v)\n\tcase *uint16:\n\t\treturn (*Uint16)(v)\n\tcase *uint32:\n\t\treturn (*Uint32)(v)\n\tcase *uint64:\n\t\treturn (*Uint64)(v)\n\tcase *float32:\n\t\treturn (*Float32)(v)\n\tcase *float64:\n\t\treturn (*Float64)(v)\n\tcase *string:\n\t\treturn (*String)(v)\n\tcase *[]byte:\n\t\treturn (*Bytes)(v)\n\tcase *[4]byte:\n\t\treturn (*Bytes4)(v)\n\tcase *[16]byte:\n\t\treturn (*Bytes16)(v)\n\tcase *[]int16:\n\t\treturn (*SliceInt16)(v)\n\tcase *[]*int16:\n\t\treturn (*SliceInt16R)(v)\n\tcase *[]Int16:\n\t\treturn (*SliceInt16C)(v)\n\tcase *[]*Int16:\n\t\treturn (*SliceInt16CR)(v)\n\tcase *[]int32:\n\t\treturn (*SliceInt32)(v)\n\tcase *[]*int32:\n\t\treturn (*SliceInt32R)(v)\n\tcase *[]Int32:\n\t\treturn (*SliceInt32C)(v)\n\tcase *[]*Int32:\n\t\treturn (*SliceInt32CR)(v)\n\tcase *[1]int16:\n\t\treturn (*Arr1Int16)(v)\n\tcase *[1]*int16:\n\t\treturn (*Arr1Int16R)(v)\n\tcase *[1]Int16:\n\t\treturn (*Arr1Int16C)(v)\n\tcase *[1]*Int16:\n\t\treturn (*Arr1Int16CR)(v)\n\tcase *[1]int32:\n\t\treturn (*Arr1Int32)(v)\n\tcase *[1]*int32:\n\t\treturn (*Arr1Int32R)(v)\n\tcase *[1]Int32:\n\t\treturn (*Arr1Int32C)(v)\n\tcase *[1]*Int32:\n\t\treturn (*Arr1Int32CR)(v)\n\tcase *map[int16]int16:\n\t\treturn (*MapInt16)(v)\n\tcase *map[int16]*int16:\n\t\treturn (*MapInt16R)(v)\n\tcase *map[Int16]Int16:\n\t\treturn (*MapInt16C)(v)\n\tcase *map[Int16]*Int16:\n\t\treturn (*MapInt16CR)(v)\n\tcase *map[int32]int32:\n\t\treturn (*MapInt32)(v)\n\tcase *map[int32]*int32:\n\t\treturn (*MapInt32R)(v)\n\tcase *map[Int32]Int32:\n\t\treturn (*MapInt32C)(v)\n\tcase *map[Int32]*Int32:\n\t\treturn (*MapInt32CR)(v)\n\tcase *map[string]any:\n\t\treturn (*MapUDT)(v)\n\tcase *[]any:\n\t\treturn (*SliceAny)(v)\n\tcase *[1]any:\n\t\treturn (*ArrAny)(v)\n\tdefault:\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "internal/tests/serialization/mod/custom_refs.go",
    "content": "package mod\n\nvar CustomTypeRef Mod = func(vals ...any) []any {\n\treturn Reference(CustomType(vals...)...)\n}\n"
  },
  {
    "path": "internal/tests/serialization/mod/refs.go",
    "content": "package mod\n\nimport \"reflect\"\n\nvar Reference Mod = func(vals ...any) []any {\n\tout := make([]any, 0)\n\tfor i := range vals {\n\t\tif vals[i] != nil {\n\t\t\tout = append(out, reference(vals[i]))\n\t\t}\n\t}\n\treturn out\n}\n\nfunc reference(val any) any {\n\tinV := reflect.ValueOf(val)\n\tout := reflect.New(reflect.TypeOf(val))\n\tout.Elem().Set(inV)\n\treturn out.Interface()\n}\n"
  },
  {
    "path": "internal/tests/serialization/pointers.go",
    "content": "package serialization\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n)\n\n// errFirstPtrChanged this error indicates that a double or single reference was passed to the Unmarshal function\n// (example (**int)(**0) or (*int)(*0)) and Unmarshal overwritten first reference.\nvar errFirstPtrChanged = errors.New(\"unmarshal function rewrote first pointer\")\n\n// errSecondPtrNotChanged this error indicates that a double reference was passed to the Unmarshal function\n// (example (**int)(**0)) and the function did not overwrite the second reference.\n// Of course, it's not friendly to the garbage collector, overwriting references to values all the time,\n// but this is the current implementation `gocql` and changing it can lead to unexpected results in some cases.\nvar errSecondPtrNotChanged = errors.New(\"unmarshal function did not rewrite second pointer\")\n\nfunc getPointers(i any) *pointer {\n\trv := reflect.ValueOf(i)\n\tif rv.Kind() != reflect.Ptr {\n\t\treturn nil\n\t}\n\tout := pointer{\n\t\tFist: rv.Pointer(),\n\t}\n\trt := rv.Type()\n\tif rt.Elem().Kind() == reflect.Ptr && !rv.Elem().IsNil() {\n\t\tout.Second = rv.Elem().Pointer()\n\t}\n\treturn &out\n}\n\ntype pointer struct {\n\tFist   uintptr\n\tSecond uintptr\n}\n\nfunc (p *pointer) NotNil() bool {\n\treturn p != nil\n}\n\n// Valid validates if pointers has been manipulated by unmarshal functions in an expected manner:\n// Fist pointer should not be overwritten,\n// Second pointer, if applicable, should be overwritten.\nfunc (p *pointer) Valid(v any) error {\n\tp2 := getPointers(v)\n\tif p.Fist != p2.Fist {\n\t\treturn errFirstPtrChanged\n\t}\n\tif p.Second != 0 && p2.Second != 0 && p2.Second == p.Second {\n\t\treturn errSecondPtrNotChanged\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "internal/tests/serialization/pointers_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization\n\nimport \"testing\"\n\nfunc Test1Pointers(t *testing.T) {\n\tt.Parallel()\n\n\tval1 := new(int16)\n\t*val1 = int16(0)\n\ttestPtr := getPointers(val1)\n\n\t// the first pointer has not been changed - it must be not error.\n\tif err := testPtr.Valid(val1); err != nil {\n\t\tt.Error(\"valid function not should return error\")\n\t}\n\n\tval2 := new(int16)\n\t// the first pointer has been changed - it must be an error.\n\tif err := testPtr.Valid(val2); err == nil {\n\t\tt.Error(\"valid function should return error\")\n\t}\n}\n\nfunc Test2Pointers(t *testing.T) {\n\tt.Parallel()\n\n\tval1 := new(*int16)\n\t*val1 = new(int16)\n\ttestPtr := getPointers(val1)\n\t// the first pointer has not been changed - it must be not error,\n\t// but the second pointer has not been changed too - it must be an error.\n\tif err := testPtr.Valid(val1); err == nil {\n\t\tt.Error(\"valid function should return error\")\n\t}\n\n\t*val1 = new(int16)\n\t// the first pointer has not been changed - it must be not error,\n\t// the second pointer has been changed - it must be not error.\n\tif err := testPtr.Valid(val1); err != nil {\n\t\tt.Error(\"valid function not should return error\")\n\t}\n}\n"
  },
  {
    "path": "internal/tests/serialization/set_negative_marshal.go",
    "content": "package serialization\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"runtime/debug\"\n\t\"testing\"\n)\n\n// NegativeMarshalSet is a tool for marshal funcs testing for cases when the function should an error.\ntype NegativeMarshalSet struct {\n\tValues      []any\n\tBrokenTypes []reflect.Type\n}\n\nfunc (s NegativeMarshalSet) Run(name string, t *testing.T, marshal func(any) ([]byte, error)) {\n\tif name == \"\" {\n\t\tt.Fatal(\"name should be provided\")\n\t}\n\tif marshal == nil {\n\t\tt.Fatal(\"marshal function should be provided\")\n\t}\n\tt.Run(name, func(t *testing.T) {\n\t\tfor m := range s.Values {\n\t\t\tval := s.Values[m]\n\n\t\t\tt.Run(stringValue(val), func(t *testing.T) {\n\t\t\t\t_, err := func() (d []byte, err error) {\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t\t\terr = panicErr{err: r.(error), stack: debug.Stack()}\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\t\treturn marshal(val)\n\t\t\t\t}()\n\n\t\t\t\ttestFailed := false\n\t\t\t\twasPanic := errors.As(err, &panicErr{})\n\t\t\t\tif err == nil || wasPanic {\n\t\t\t\t\ttestFailed = true\n\t\t\t\t}\n\n\t\t\t\tif isTypeOf(val, s.BrokenTypes) {\n\t\t\t\t\tif testFailed {\n\t\t\t\t\t\tt.Skipf(\"skipped bacause there is unsolved problem\")\n\t\t\t\t\t}\n\t\t\t\t\tt.Fatalf(\"expected to panic or no error for (%T), but got an error\", val)\n\t\t\t\t}\n\n\t\t\t\tif testFailed {\n\t\t\t\t\tif wasPanic {\n\t\t\t\t\t\tt.Fatalf(\"was panic %s\", err)\n\t\t\t\t\t}\n\t\t\t\t\tt.Errorf(\"expected an error for (%T), but got no error\", val)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "internal/tests/serialization/set_negative_unmarshal.go",
    "content": "package serialization\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"runtime/debug\"\n\t\"testing\"\n)\n\n// NegativeUnmarshalSet is a tool for unmarshal funcs testing for cases when the function should an error.\ntype NegativeUnmarshalSet struct {\n\tData        []byte\n\tValues      []any\n\tBrokenTypes []reflect.Type\n}\n\nfunc (s NegativeUnmarshalSet) Run(name string, t *testing.T, unmarshal func([]byte, any) error) {\n\tif name == \"\" {\n\t\tt.Fatal(\"name should be provided\")\n\t}\n\tif unmarshal == nil {\n\t\tt.Fatal(\"unmarshal function should be provided\")\n\t}\n\tt.Run(name, func(t *testing.T) {\n\t\tfor m := range s.Values {\n\t\t\tval := s.Values[m]\n\n\t\t\tif rt := reflect.TypeOf(val); rt.Kind() != reflect.Ptr {\n\t\t\t\tunmarshalIn := newRef(val)\n\t\t\t\ts.run(fmt.Sprintf(\"%T\", val), t, unmarshal, val, unmarshalIn)\n\t\t\t} else {\n\t\t\t\t// Test unmarshal to (*type)(nil)\n\t\t\t\tunmarshalIn := newRef(val)\n\t\t\t\ts.run(fmt.Sprintf(\"%T**nil\", val), t, unmarshal, val, unmarshalIn)\n\n\t\t\t\t// Test unmarshal to &type{}\n\t\t\t\tunmarshalInZero := newRefToZero(val)\n\t\t\t\ts.run(fmt.Sprintf(\"%T**zero\", val), t, unmarshal, val, unmarshalInZero)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc (s NegativeUnmarshalSet) run(name string, t *testing.T, f func([]byte, any) error, val, unmarshalIn any) {\n\tt.Run(name, func(t *testing.T) {\n\t\terr := func() (err error) {\n\t\t\tdefer func() {\n\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\terr = panicErr{err: r.(error), stack: debug.Stack()}\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn f(bytes.Clone(s.Data), unmarshalIn)\n\t\t}()\n\n\t\ttestFailed := false\n\t\twasPanic := errors.As(err, &panicErr{})\n\t\tif err == nil || wasPanic {\n\t\t\ttestFailed = true\n\t\t}\n\n\t\tif isTypeOf(val, s.BrokenTypes) {\n\t\t\tif testFailed {\n\t\t\t\tt.Skipf(\"skipped bacause there is unsolved problem\")\n\t\t\t}\n\t\t\tt.Fatalf(\"expected to panic or no error for (%T), but got an error\", unmarshalIn)\n\t\t}\n\n\t\tif testFailed {\n\t\t\tif wasPanic {\n\t\t\t\tt.Fatalf(\"was panic %s\", err)\n\t\t\t}\n\t\t\tt.Errorf(\"expected an error for (%T), but got no error\", unmarshalIn)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "internal/tests/serialization/set_positive.go",
    "content": "package serialization\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"runtime/debug\"\n\t\"testing\"\n)\n\n// PositiveSet is a tool for marshal and unmarshall funcs testing for cases when the function should no error,\n// on marshal - marshaled data from PositiveSet.Values should be equal with PositiveSet.Data,\n// on unmarshall - unmarshalled value from PositiveSet.Data should be equal with PositiveSet.Values.\ntype PositiveSet struct {\n\tData   []byte\n\tValues []any\n\n\tBrokenMarshalTypes   []reflect.Type\n\tBrokenUnmarshalTypes []reflect.Type\n}\n\nfunc (s PositiveSet) Run(name string, t *testing.T, marshal func(any) ([]byte, error), unmarshal func([]byte, any) error) {\n\tif name == \"\" {\n\t\tt.Fatal(\"name should be provided\")\n\t}\n\n\tt.Run(name, func(t *testing.T) {\n\t\tfor i := range s.Values {\n\t\t\tval := s.Values[i]\n\n\t\t\tt.Run(fmt.Sprintf(\"%T\", val), func(t *testing.T) {\n\t\t\t\tif marshal != nil {\n\t\t\t\t\ts.runMarshalTest(t, marshal, val)\n\t\t\t\t}\n\n\t\t\t\tif unmarshal != nil {\n\t\t\t\t\tif rt := reflect.TypeOf(val); rt.Kind() != reflect.Ptr {\n\t\t\t\t\t\tunmarshalIn := newRef(val)\n\t\t\t\t\t\ts.runUnmarshalTest(\"unmarshal\", t, unmarshal, val, unmarshalIn)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Test unmarshal to (*type)(nil)\n\t\t\t\t\t\tunmarshalIn := newRef(val)\n\t\t\t\t\t\ts.runUnmarshalTest(\"unmarshal**nil\", t, unmarshal, val, unmarshalIn)\n\n\t\t\t\t\t\t// Test unmarshal to &type{}\n\t\t\t\t\t\tunmarshalInZero := newRefToZero(val)\n\t\t\t\t\t\ts.runUnmarshalTest(\"unmarshal**zero\", t, unmarshal, val, unmarshalInZero)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc (s PositiveSet) runMarshalTest(t *testing.T, f func(any) ([]byte, error), val any) {\n\tt.Run(\"marshal\", func(t *testing.T) {\n\n\t\tresult, err := func() (d []byte, err error) {\n\t\t\tdefer func() {\n\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\terr = panicErr{err: r.(error), stack: debug.Stack()}\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn f(val)\n\t\t}()\n\n\t\texpected := bytes.Clone(s.Data)\n\t\tif err != nil {\n\t\t\tif !errors.As(err, &panicErr{}) {\n\t\t\t\terr = errors.Join(marshalErr, err)\n\t\t\t}\n\t\t} else if !equalData(expected, result) {\n\t\t\terr = unequalError{Expected: stringData(s.Data), Got: stringData(result)}\n\t\t}\n\n\t\tif isTypeOf(val, s.BrokenMarshalTypes) {\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"expected to fail for (%T), but did not fail\", val)\n\t\t\t}\n\t\t\tt.Skipf(\"skipped bacause there is unsolved problem\")\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t})\n}\n\nfunc (s PositiveSet) runUnmarshalTest(name string, t *testing.T, f func([]byte, any) error, expected, result any) {\n\tt.Run(name, func(t *testing.T) {\n\n\t\texpectedPtr := getPointers(result)\n\n\t\terr := func() (err error) {\n\t\t\tdefer func() {\n\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\terr = panicErr{err: fmt.Errorf(\"%s\", r), stack: debug.Stack()}\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn f(bytes.Clone(s.Data), result)\n\t\t}()\n\n\t\tif err != nil {\n\t\t\tif !errors.As(err, &panicErr{}) {\n\t\t\t\terr = errors.Join(unmarshalErr, err)\n\t\t\t}\n\t\t} else if !equalVals(expected, deReference(result)) {\n\t\t\terr = unequalError{Expected: stringValue(expected), Got: stringValue(deReference(result))}\n\t\t} else {\n\t\t\terr = expectedPtr.Valid(result)\n\t\t}\n\n\t\tif isTypeOf(expected, s.BrokenUnmarshalTypes) {\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"expected to fail for (%T), but did not fail\", expected)\n\t\t\t}\n\t\t\tt.Skipf(\"skipped bacause there is unsolved problem\")\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "internal/tests/serialization/utils.go",
    "content": "package serialization\n\nimport (\n\t\"reflect\"\n)\n\nfunc GetTypes(values ...any) []reflect.Type {\n\ttypes := make([]reflect.Type, len(values))\n\tfor i, value := range values {\n\t\ttypes[i] = reflect.TypeOf(value)\n\t}\n\treturn types\n}\n\nfunc isTypeOf(value any, types []reflect.Type) bool {\n\tvalueType := reflect.TypeOf(value)\n\tfor i := range types {\n\t\tif types[i] == valueType {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc deReference(in any) any {\n\treturn reflect.Indirect(reflect.ValueOf(in)).Interface()\n}\n"
  },
  {
    "path": "internal/tests/serialization/utils_equal.go",
    "content": "package serialization\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"unsafe\"\n\n\t\"gopkg.in/inf.v0\"\n\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n)\n\nfunc equalData(in1, in2 []byte) bool {\n\tif in1 == nil || in2 == nil {\n\t\treturn in1 == nil && in2 == nil\n\t}\n\treturn bytes.Equal(in1, in2)\n}\n\nfunc equalVals(in1, in2 any) bool {\n\trin1 := reflect.ValueOf(in1)\n\trin2 := reflect.ValueOf(in2)\n\tif rin1.Kind() != rin2.Kind() {\n\t\treturn false\n\t}\n\tif rin1.Kind() == reflect.Ptr && (rin1.IsNil() || rin2.IsNil()) {\n\t\treturn rin1.IsNil() && rin2.IsNil()\n\t}\n\n\tswitch vin1 := in1.(type) {\n\tcase float32:\n\t\tvin2 := in2.(float32)\n\t\treturn *(*[4]byte)(unsafe.Pointer(&vin1)) == *(*[4]byte)(unsafe.Pointer(&vin2))\n\tcase *float32:\n\t\tvin2 := in2.(*float32)\n\t\treturn *(*[4]byte)(unsafe.Pointer(vin1)) == *(*[4]byte)(unsafe.Pointer(vin2))\n\tcase *mod.Float32:\n\t\tvin2 := in2.(*mod.Float32)\n\t\treturn *(*[4]byte)(unsafe.Pointer(vin1)) == *(*[4]byte)(unsafe.Pointer(vin2))\n\tcase mod.Float32:\n\t\tvin2 := in2.(mod.Float32)\n\t\treturn *(*[4]byte)(unsafe.Pointer(&vin1)) == *(*[4]byte)(unsafe.Pointer(&vin2))\n\tcase float64:\n\t\tvin2 := in2.(float64)\n\t\treturn *(*[8]byte)(unsafe.Pointer(&vin1)) == *(*[8]byte)(unsafe.Pointer(&vin2))\n\tcase *float64:\n\t\tvin2 := in2.(*float64)\n\t\treturn *(*[8]byte)(unsafe.Pointer(vin1)) == *(*[8]byte)(unsafe.Pointer(vin2))\n\tcase *mod.Float64:\n\t\tvin2 := in2.(*mod.Float64)\n\t\treturn *(*[8]byte)(unsafe.Pointer(vin1)) == *(*[8]byte)(unsafe.Pointer(vin2))\n\tcase mod.Float64:\n\t\tvin2 := in2.(mod.Float64)\n\t\treturn *(*[8]byte)(unsafe.Pointer(&vin1)) == *(*[8]byte)(unsafe.Pointer(&vin2))\n\tcase big.Int:\n\t\tvin2 := in2.(big.Int)\n\t\treturn vin1.Cmp(&vin2) == 0\n\tcase *big.Int:\n\t\tvin2 := in2.(*big.Int)\n\t\treturn vin1.Cmp(vin2) == 0\n\tcase inf.Dec:\n\t\tvin2 := in2.(inf.Dec)\n\t\tif vin1.Scale() != vin2.Scale() {\n\t\t\treturn false\n\t\t}\n\t\treturn vin1.UnscaledBig().Cmp(vin2.UnscaledBig()) == 0\n\tcase *inf.Dec:\n\t\tvin2 := in2.(*inf.Dec)\n\t\tif vin1.Scale() != vin2.Scale() {\n\t\t\treturn false\n\t\t}\n\t\treturn vin1.UnscaledBig().Cmp(vin2.UnscaledBig()) == 0\n\tcase fmt.Stringer:\n\t\tvin2 := in2.(fmt.Stringer)\n\t\treturn vin1.String() == vin2.String()\n\tdefault:\n\t\treturn reflect.DeepEqual(in1, in2)\n\t}\n}\n"
  },
  {
    "path": "internal/tests/serialization/utils_error.go",
    "content": "package serialization\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\nvar unmarshalErr = errors.New(\"unmarshal unexpectedly failed with error\")\nvar marshalErr = errors.New(\"marshal unexpectedly failed with error\")\n\ntype unequalError struct {\n\tExpected string\n\tGot      string\n}\n\nfunc (e unequalError) Error() string {\n\treturn fmt.Sprintf(\"expect %s but got %s\", e.Expected, e.Got)\n}\n\ntype panicErr struct {\n\terr   error\n\tstack []byte\n}\n\nfunc (e panicErr) Error() string {\n\treturn fmt.Sprintf(\"%v\\n%s\", e.err, e.stack)\n}\n"
  },
  {
    "path": "internal/tests/serialization/utils_new.go",
    "content": "package serialization\n\nimport (\n\t\"reflect\"\n)\n\nfunc newRef(in any) any {\n\tout := reflect.New(reflect.TypeOf(in)).Interface()\n\treturn out\n}\n\nfunc newRefToZero(in any) any {\n\trv := reflect.ValueOf(in)\n\tnw := reflect.New(rv.Type().Elem())\n\tout := reflect.New(rv.Type())\n\tout.Elem().Set(nw)\n\treturn out.Interface()\n}\n"
  },
  {
    "path": "internal/tests/serialization/utils_str.go",
    "content": "package serialization\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"net\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"gopkg.in/inf.v0\"\n)\n\nconst printLimit = 100\n\n// stringValue returns (value_type)(value) in the human-readable format.\nfunc stringValue(in any) string {\n\tvalStr := stringVal(in)\n\tif len(valStr) > printLimit {\n\t\treturn fmt.Sprintf(\"(%T)\", in)\n\t}\n\treturn fmt.Sprintf(\"(%T)(%s)\", in, valStr)\n}\n\nfunc stringData(p []byte) string {\n\tif len(p) > printLimit {\n\t\tp = p[:printLimit]\n\t}\n\tif p == nil {\n\t\treturn \"[nil]\"\n\t}\n\treturn fmt.Sprintf(\"[%x]\", p)\n}\n\nfunc stringVal(in any) string {\n\tswitch i := in.(type) {\n\tcase string:\n\t\treturn i\n\tcase inf.Dec:\n\t\treturn fmt.Sprintf(\"%v\", i.String())\n\tcase big.Int:\n\t\treturn fmt.Sprintf(\"%v\", i.String())\n\tcase net.IP:\n\t\treturn fmt.Sprintf(\"%v\", []byte(i))\n\tcase time.Time:\n\t\treturn fmt.Sprintf(\"%v\", i.UnixMilli())\n\tcase nil:\n\t\treturn \"nil\"\n\t}\n\n\trv := reflect.ValueOf(in)\n\tswitch rv.Kind() {\n\tcase reflect.Ptr:\n\t\tif rv.IsNil() {\n\t\t\treturn \"*nil\"\n\t\t}\n\t\treturn fmt.Sprintf(\"*%s\", stringVal(rv.Elem().Interface()))\n\tcase reflect.Slice:\n\t\tif rv.IsNil() {\n\t\t\treturn \"[nil]\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%v\", rv.Interface())\n\tdefault:\n\t\treturn fmt.Sprintf(\"%v\", in)\n\t}\n}\n"
  },
  {
    "path": "internal/tests/serialization/valcases/get.go",
    "content": "package valcases\n\nimport (\n\t\"reflect\"\n)\n\ntype SimpleTypes []SimpleTypeCases\n\ntype SimpleTypeCases struct {\n\tCQLName string\n\tCases   []SimpleTypeCase\n\tCQLType int\n}\n\ntype SimpleTypeCase struct {\n\tName      string\n\tData      []byte\n\tLangCases []LangCase\n}\n\ntype LangCase struct {\n\tValue     any\n\tLangType  string\n\tErrInsert bool\n\tErrSelect bool\n}\n\nvar nilBytes = ([]byte)(nil)\n\nfunc GetSimple() SimpleTypes {\n\treturn simpleTypesCases\n}\n\nfunc nilRef(in any) any {\n\tout := reflect.NewAt(reflect.TypeOf(in), nil).Interface()\n\treturn out\n}\n"
  },
  {
    "path": "internal/tests/serialization/valcases/simple.go",
    "content": "package valcases\n\nimport (\n\t\"math\"\n\t\"math/big\"\n\t\"net\"\n\t\"time\"\n\n\t\"gopkg.in/inf.v0\"\n\n\t\"github.com/gocql/gocql/serialization/duration\"\n)\n\nvar simpleTypesCases = SimpleTypes{\n\t{\n\t\tCQLName: \"boolean\",\n\t\tCQLType: 0x0004,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte{1},\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"bool\", Value: true},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte{0},\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"bool\", Value: false},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"bool\", Value: nilRef(false)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"tinyint\",\n\t\tCQLType: 0x0014,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\x7f\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int8\", Value: int8(math.MaxInt8)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(math.MaxInt8)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte(\"\\x80\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int8\", Value: int8(math.MinInt8)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(math.MinInt8)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"+1\",\n\t\t\t\tData: []byte(\"\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int8\", Value: int8(1)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(1)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"-1\",\n\t\t\t\tData: []byte(\"\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int8\", Value: int8(-1)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(-1)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int8\", Value: int8(0)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(0)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int8\", Value: nilRef(int8(0))},\n\t\t\t\t\t{LangType: \"big.Int\", Value: nilRef(big.Int{})},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"smallint\",\n\t\tCQLType: 0x0013,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\x7f\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int16\", Value: int16(math.MaxInt16)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(math.MaxInt16)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte(\"\\x80\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int16\", Value: int16(math.MinInt16)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(math.MinInt16)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"+1\",\n\t\t\t\tData: []byte(\"\\x00\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int16\", Value: int16(1)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(1)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"-1\",\n\t\t\t\tData: []byte(\"\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int16\", Value: int16(-1)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(-1)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int16\", Value: int16(0)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(0)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int16\", Value: nilRef(int16(0))},\n\t\t\t\t\t{LangType: \"big.Int\", Value: nilRef(big.Int{})},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"int\",\n\t\tCQLType: 0x0009,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int32\", Value: int32(math.MaxInt32)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(math.MaxInt32)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int32\", Value: int32(math.MinInt32)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(math.MinInt32)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"+1\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int32\", Value: int32(1)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(1)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"-1\",\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int32\", Value: int32(-1)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(-1)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int32\", Value: int32(0)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(0)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int32\", Value: nilRef(int32(0))},\n\t\t\t\t\t{LangType: \"big.Int\", Value: nilRef(big.Int{})},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"bigint\",\n\t\tCQLType: 0x0002,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(math.MaxInt64)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(math.MaxInt64)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(math.MinInt64)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(math.MinInt64)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"+1\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(1)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(1)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"-1\",\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(-1)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(-1)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(0)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(0)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: nilRef(int64(0))},\n\t\t\t\t\t{LangType: \"big.Int\", Value: nilRef(big.Int{})},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"varint\",\n\t\tCQLType: 0x000E,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(math.MaxInt64)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(math.MaxInt64)},\n\t\t\t\t\t{LangType: \"string\", Value: \"9223372036854775807\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(math.MinInt64)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(math.MinInt64)},\n\t\t\t\t\t{LangType: \"string\", Value: \"-9223372036854775808\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"+1\",\n\t\t\t\tData: []byte(\"\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(1)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(1)},\n\t\t\t\t\t{LangType: \"string\", Value: \"1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"-1\",\n\t\t\t\tData: []byte(\"\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(-1)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(-1)},\n\t\t\t\t\t{LangType: \"string\", Value: \"-1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(0)},\n\t\t\t\t\t{LangType: \"big.Int\", Value: big.NewInt(0)},\n\t\t\t\t\t{LangType: \"string\", Value: \"0\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: nilRef(int64(0))},\n\t\t\t\t\t{LangType: \"big.Int\", Value: nilRef(big.Int{})},\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t\t{LangType: \"string_ref\", Value: nilRef(\"\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"float\",\n\t\tCQLType: 0x0008,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\x7f\\x7f\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float32\", Value: float32(math.MaxFloat32)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte(\"\\xff\\x7f\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float32\", Value: float32(-math.MaxFloat32)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"smallest_pos\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float32\", Value: float32(math.SmallestNonzeroFloat32)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"smallest_neg\",\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float32\", Value: float32(-math.SmallestNonzeroFloat32)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float32\", Value: float32(0)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float32\", Value: nilRef(float32(0))},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"double\",\n\t\tCQLType: 0x0007,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\x7f\\xef\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float64\", Value: math.MaxFloat64},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte(\"\\xff\\xef\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float64\", Value: -math.MaxFloat64},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"smallest_pos\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float64\", Value: math.SmallestNonzeroFloat64},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"smallest_neg\",\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float64\", Value: -math.SmallestNonzeroFloat64},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float64\", Value: float64(0)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"float64\", Value: nilRef(float64(0))},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"decimal\",\n\t\tCQLType: 0x0006,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x7f\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"inf.Dec\", Value: *inf.NewDec(math.MaxInt64, math.MaxInt16)},\n\t\t\t\t\t{LangType: \"string\", Value: \"32767;9223372036854775807\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte(\"\\xff\\xff\\x80\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"inf.Dec\", Value: *inf.NewDec(math.MinInt64, math.MinInt16)},\n\t\t\t\t\t{LangType: \"string\", Value: \"-32768;-9223372036854775808\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"inf.Dec\", Value: *inf.NewDec(0, 0)},\n\t\t\t\t\t{LangType: \"string\", Value: \"0;0\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"inf.Dec\", Value: nilRef(inf.Dec{})},\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t\t{LangType: \"string_ref\", Value: nilRef(\"\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"varchar\",\n\t\tCQLType: 0x000D,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"val\",\n\t\t\t\tData: []byte(\"test string\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"test string\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: nilRef(\"\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"text\",\n\t\tCQLType: 0x000A,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"val\",\n\t\t\t\tData: []byte(\"test string\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"test string\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: nilRef(\"\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"blob\",\n\t\tCQLType: 0x0003,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"val\",\n\t\t\t\tData: []byte(\"test string\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"test string\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: nilRef(\"\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"ascii\",\n\t\tCQLType: 0x0001,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"val\",\n\t\t\t\tData: []byte(\"test string\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"test string\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: nilRef(\"\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"uuid\",\n\t\tCQLType: 0x000C,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"ffffffff-ffff-ffff-ffff-ffffffffffff\"},\n\t\t\t\t\t{LangType: \"[16]byte\", Value: [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"val\",\n\t\t\t\tData: []byte(\"\\xe9\\x39\\xf5\\x2a\\xd6\\x90\\x11\\xef\\x9c\\xd2\\x02\\x42\\xac\\x12\\x00\\x02\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"e939f52a-d690-11ef-9cd2-0242ac120002\"},\n\t\t\t\t\t{LangType: \"[16]byte\", Value: [16]byte{233, 57, 245, 42, 214, 144, 17, 239, 156, 210, 2, 66, 172, 18, 0, 2}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: make([]byte, 16),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"00000000-0000-0000-0000-000000000000\"},\n\t\t\t\t\t{LangType: \"[16]byte\", Value: [16]byte{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t\t{LangType: \"string_ref\", Value: nilRef(\"\")},\n\t\t\t\t\t{LangType: \"[16]byte\", Value: nilRef([16]byte{})},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"timeuuid\",\n\t\tCQLType: 0x000F,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\x1f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"ffffffff-ffff-1fff-ffff-ffffffffffff\"},\n\t\t\t\t\t{LangType: \"[16]byte\", Value: [16]byte{255, 255, 255, 255, 255, 255, 31, 255, 255, 255, 255, 255, 255, 255, 255, 255}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"val\",\n\t\t\t\tData: []byte(\"\\xe9\\x39\\xf5\\x2a\\xd6\\x90\\x11\\xef\\x9c\\xd2\\x02\\x42\\xac\\x12\\x00\\x02\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"e939f52a-d690-11ef-9cd2-0242ac120002\"},\n\t\t\t\t\t{LangType: \"[16]byte\", Value: [16]byte{233, 57, 245, 42, 214, 144, 17, 239, 156, 210, 2, 66, 172, 18, 0, 2}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte{0, 0, 0, 0, 0, 0, 1 << 4, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"00000000-0000-1000-0000-000000000000\"},\n\t\t\t\t\t{LangType: \"[16]byte\", Value: [16]byte{0, 0, 0, 0, 0, 0, 1 << 4, 0, 0, 0, 0, 0, 0, 0, 0, 0}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t\t{LangType: \"string_ref\", Value: nilRef(\"\")},\n\t\t\t\t\t{LangType: \"[16]byte\", Value: nilRef([16]byte{})},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"inet\",\n\t\tCQLType: 0x0010,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"v6max\",\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\"},\n\t\t\t\t\t{LangType: \"net.IP\", Value: net.IP(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"v4max\",\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"255.255.255.255\"},\n\t\t\t\t\t{LangType: \"net.IP\", Value: net.IP(\"\\xff\\xff\\xff\\xff\")},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"v6zeros\",\n\t\t\t\tData: make([]byte, 16),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"::\"},\n\t\t\t\t\t{LangType: \"net.IP\", Value: make(net.IP, 16)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"v4zeros\",\n\t\t\t\tData: make([]byte, 4),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"0.0.0.0\"},\n\t\t\t\t\t{LangType: \"net.IP\", Value: make(net.IP, 4)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t\t{LangType: \"string_ref\", Value: nilRef(\"\")},\n\t\t\t\t\t{LangType: \"net.IP\", Value: (net.IP)(nil)},\n\t\t\t\t\t{LangType: \"net.IP_ref\", Value: nilRef(net.IP{})},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"time\",\n\t\tCQLType: 0x0012,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x4e\\x94\\x91\\x4e\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(86399999999999)},\n\t\t\t\t\t{LangType: \"time.Duration\", Value: time.Duration(86399999999999)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(0)},\n\t\t\t\t\t{LangType: \"time.Duration\", Value: time.Duration(0)},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: nilRef(int64(0))},\n\t\t\t\t\t{LangType: \"time.Duration\", Value: nilRef(time.Duration(0))},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"timestamp\",\n\t\tCQLType: 0x000B,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(math.MaxInt64)},\n\t\t\t\t\t{LangType: \"time.Time\", Value: time.UnixMilli(math.MaxInt64).UTC()},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(math.MinInt64)},\n\t\t\t\t\t{LangType: \"time.Time\", Value: time.UnixMilli(math.MinInt64).UTC()},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"+1\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(1)},\n\t\t\t\t\t{LangType: \"time.Time\", Value: time.UnixMilli(1).UTC()},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"-1\",\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(-1)},\n\t\t\t\t\t{LangType: \"time.Time\", Value: time.UnixMilli(-1).UTC()},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: int64(0)},\n\t\t\t\t\t{LangType: \"time.Time\", Value: time.UnixMilli(0).UTC()},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int64\", Value: nilRef(int64(0))},\n\t\t\t\t\t{LangType: \"time.Time\", Value: nilRef(time.Time{})},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"date\",\n\t\tCQLType: 0x0011,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"uint32\", Value: uint32(math.MaxUint32)},\n\t\t\t\t\t{LangType: \"int32\", Value: int32(-1)},\n\t\t\t\t\t{LangType: \"time.Time\", Value: time.Date(5881580, 07, 11, 0, 0, 0, 0, time.UTC)},\n\t\t\t\t\t{LangType: \"string\", Value: \"5881580-07-11\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"mid\",\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"uint32\", Value: uint32(1 << 31)},\n\t\t\t\t\t{LangType: \"int32\", Value: int32(math.MinInt32)},\n\t\t\t\t\t{LangType: \"time.Time\", Value: time.Date(1970, 01, 01, 0, 0, 0, 0, time.UTC)},\n\t\t\t\t\t{LangType: \"string\", Value: \"1970-01-01\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"1\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"uint32\", Value: uint32(1)},\n\t\t\t\t\t{LangType: \"int32\", Value: int32(1)},\n\t\t\t\t\t{LangType: \"time.Time\", Value: time.Date(-5877641, 06, 24, 0, 0, 0, 0, time.UTC)},\n\t\t\t\t\t{LangType: \"string\", Value: \"-5877641-06-24\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"uint32\", Value: uint32(0)},\n\t\t\t\t\t{LangType: \"int32\", Value: int32(0)},\n\t\t\t\t\t{LangType: \"time.Time\", Value: time.Date(-5877641, 06, 23, 0, 0, 0, 0, time.UTC)},\n\t\t\t\t\t{LangType: \"string\", Value: \"-5877641-06-23\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"int32\", Value: nilRef(int32(0))},\n\t\t\t\t\t{LangType: \"time.Time\", Value: nilRef(time.Time{})},\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t\t{LangType: \"string_ref\", Value: nilRef(\"\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tCQLName: \"duration\",\n\t\tCQLType: 0x0015,\n\t\tCases: []SimpleTypeCase{\n\t\t\t{\n\t\t\t\tName: \"max\",\n\t\t\t\tData: []byte(\"\\xf0\\xff\\xff\\xff\\xfe\\xf0\\xff\\xff\\xff\\xfe\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfe\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"duration\", Value: duration.Duration{Days: math.MaxInt32, Months: math.MaxInt32, Nanoseconds: math.MaxInt64}},\n\t\t\t\t\t{LangType: \"string\", Value: \"178956970y7mo306783378w1d2562047h47m16.854775807s\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"min\",\n\t\t\t\tData: []byte(\"\\xf0\\xff\\xff\\xff\\xff\\xf0\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"duration\", Value: duration.Duration{Days: math.MinInt32, Months: math.MinInt32, Nanoseconds: math.MinInt64}},\n\t\t\t\t\t{LangType: \"string\", Value: \"-178956970y8mo306783378w2d2562047h47m16.854775808s\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"+1\",\n\t\t\t\tData: []byte(\"\\x02\\x02\\x02\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"duration\", Value: duration.Duration{Days: 1, Months: 1, Nanoseconds: 1}},\n\t\t\t\t\t{LangType: \"string\", Value: \"1mo1d1ns\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"-1\",\n\t\t\t\tData: []byte(\"\\x01\\x01\\x01\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"duration\", Value: duration.Duration{Days: -1, Months: -1, Nanoseconds: -1}},\n\t\t\t\t\t{LangType: \"string\", Value: \"-1mo1d1ns\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"maxNanos\",\n\t\t\t\tData: []byte(\"\\x00\\xc3\\x41\\xfe\\xfc\\x9b\\xc5\\xc4\\x9d\\xff\\xfe\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"duration\", Value: duration.Duration{Days: 106751, Months: 0, Nanoseconds: 85636854775807}},\n\t\t\t\t\t{LangType: \"int64\", Value: int64(math.MaxInt64)},\n\t\t\t\t\t{LangType: \"time.Duration\", Value: time.Duration(math.MaxInt64)},\n\t\t\t\t\t{LangType: \"string\", Value: \"15250w1d23h47m16.854775807s\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"minNanos\",\n\t\t\t\tData: []byte(\"\\x00\\xc3\\x41\\xfd\\xfc\\x9b\\xc5\\xc4\\x9d\\xff\\xff\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"duration\", Value: duration.Duration{Days: -106751, Months: 0, Nanoseconds: -85636854775808}},\n\t\t\t\t\t{LangType: \"int64\", Value: int64(math.MinInt64)},\n\t\t\t\t\t{LangType: \"time.Duration\", Value: time.Duration(math.MinInt64)},\n\t\t\t\t\t{LangType: \"string\", Value: \"-15250w1d23h47m16.854775808s\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"zeros\",\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\"),\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"duration\", Value: duration.Duration{}},\n\t\t\t\t\t{LangType: \"int64\", Value: int64(0)},\n\t\t\t\t\t{LangType: \"time.Duration\", Value: time.Duration(0)},\n\t\t\t\t\t{LangType: \"string\", Value: \"0s\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"nil\",\n\t\t\t\tData: nilBytes,\n\t\t\t\tLangCases: []LangCase{\n\t\t\t\t\t{LangType: \"duration\", Value: nilRef(duration.Duration{})},\n\t\t\t\t\t{LangType: \"int64\", Value: nilRef(int64(0))},\n\t\t\t\t\t{LangType: \"time.Duration\", Value: nilRef(time.Duration(0))},\n\t\t\t\t\t{LangType: \"string\", Value: \"\"},\n\t\t\t\t\t{LangType: \"string_ref\", Value: nilRef(\"\")},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n"
  },
  {
    "path": "keyspace_table_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n)\n\n// Keyspace_table checks if Query.Keyspace() is updated based on prepared statement\nfunc TestKeyspaceTable(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\n\tfallback := RoundRobinHostPolicy()\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(fallback)\n\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatal(\"createSession:\", err)\n\t}\n\n\tcluster.Keyspace = \"wrong_keyspace\"\n\n\tkeyspace := testKeyspaceName(t)\n\ttable := testTableName(t)\n\n\terr = createTable(session, `DROP KEYSPACE IF EXISTS `+keyspace)\n\tif err != nil {\n\t\tt.Fatal(\"unable to drop keyspace:\", err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE KEYSPACE %s\n\tWITH replication = {\n\t\t'class' : 'NetworkTopologyStrategy',\n\t\t'replication_factor' : 1\n\t}`, keyspace))\n\n\tif err != nil {\n\t\tt.Fatal(\"unable to create keyspace:\", err)\n\t}\n\n\tif err := session.control.awaitSchemaAgreement(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE %s.%s (pk int, ck int, v int, PRIMARY KEY (pk, ck));\n\t`, keyspace, table))\n\n\tif err != nil {\n\t\tt.Fatal(\"unable to create table:\", err)\n\t}\n\n\tif err := session.control.awaitSchemaAgreement(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx := context.Background()\n\n\t// insert a row\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s.%s(pk, ck, v) VALUES (?, ?, ?)`, keyspace, table),\n\t\t1, 2, 3).WithContext(ctx).Consistency(One).Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar pk int\n\n\t/* Search for a specific set of records whose 'pk' column matches\n\t * the value of inserted row. */\n\tqry := session.Query(fmt.Sprintf(`SELECT pk FROM %s.%s WHERE pk = ? LIMIT 1`, keyspace, table),\n\t\t1).WithContext(ctx).Consistency(One)\n\tif err := qry.Scan(&pk); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// cluster.Keyspace was set to \"wrong_keyspace\", but during prepering statement\n\t// Keyspace in Query should be changed to \"test\" and Table should be changed to table1\n\ttests.AssertEqual(t, \"qry.Keyspace()\", keyspace, qry.Keyspace())\n\ttests.AssertEqual(t, \"qry.Table()\", table, qry.Table())\n}\n"
  },
  {
    "path": "logger.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n)\n\ntype StdLogger interface {\n\tPrint(v ...any)\n\tPrintf(format string, v ...any)\n\tPrintln(v ...any)\n}\n\ntype nopLogger struct{}\n\nfunc (n nopLogger) Print(_ ...any) {}\n\nfunc (n nopLogger) Printf(_ string, _ ...any) {}\n\nfunc (n nopLogger) Println(_ ...any) {}\n\ntype testLogger struct {\n\tcapture bytes.Buffer\n}\n\nfunc (l *testLogger) Print(v ...any)                 { fmt.Fprint(&l.capture, v...) }\nfunc (l *testLogger) Printf(format string, v ...any) { fmt.Fprintf(&l.capture, format, v...) }\nfunc (l *testLogger) Println(v ...any)               { fmt.Fprintln(&l.capture, v...) }\nfunc (l *testLogger) String() string                 { return l.capture.String() }\n\ntype defaultLogger struct{}\n\nfunc (l *defaultLogger) Print(v ...any)                 { log.Print(v...) }\nfunc (l *defaultLogger) Printf(format string, v ...any) { log.Printf(format, v...) }\nfunc (l *defaultLogger) Println(v ...any)               { log.Println(v...) }\n"
  },
  {
    "path": "lz4/go.mod",
    "content": "//\n// Licensed to the Apache Software Foundation (ASF) under one\n// or more contributor license agreements.  See the NOTICE file\n// distributed with this work for additional information\n// regarding copyright ownership.  The ASF licenses this file\n// to you under the Apache License, Version 2.0 (the\n// \"License\"); you may not use this file except in compliance\n// with the License.  You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\nmodule github.com/gocql/gocql/lz4\n\ngo 1.25.0\n\nrequire (\n\tgithub.com/pierrec/lz4/v4 v4.1.26\n\tgithub.com/stretchr/testify v1.11.1\n)\n\nrequire (\n\tgithub.com/davecgh/go-spew v1.1.1 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.0 // indirect\n\tgopkg.in/yaml.v3 v3.0.1 // indirect\n)\n"
  },
  {
    "path": "lz4/go.sum",
    "content": "github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/pierrec/lz4/v4 v4.1.26 h1:GrpZw1gZttORinvzBdXPUXATeqlJjqUG/D87TKMnhjY=\ngithub.com/pierrec/lz4/v4 v4.1.26/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "lz4/lz4.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage lz4\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\n\t\"github.com/pierrec/lz4/v4\"\n)\n\n// LZ4Compressor implements the gocql.Compressor interface and can be used to\n// compress incoming and outgoing frames. According to the Cassandra docs the\n// LZ4 protocol should be preferred over snappy. (For details refer to\n// https://cassandra.apache.org/doc/latest/operating/compression.html)\n//\n// Implementation note: Cassandra prefixes each compressed block with 4 bytes\n// of the uncompressed block length, written in big endian order. But the LZ4\n// compression library github.com/pierrec/lz4/v4 does not expect the length\n// field, so it needs to be added to compressed blocks sent to Cassandra, and\n// removed from ones received from Cassandra before decompression.\ntype LZ4Compressor struct{}\n\nfunc (s LZ4Compressor) Name() string {\n\treturn \"lz4\"\n}\n\nfunc (s LZ4Compressor) Encode(data []byte) ([]byte, error) {\n\tdataLen := len(data)\n\tbuf := make([]byte, lz4.CompressBlockBound(dataLen)+4)\n\tn, err := lz4.CompressBlock(data, buf[4:], nil)\n\t// According to lz4.CompressBlock doc, it doesn't fail as long as the dst\n\t// buffer length is at least lz4.CompressBlockBound(len(data))) bytes, but\n\t// we check for error anyway just to be thorough.\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbinary.BigEndian.PutUint32(buf, uint32(dataLen))\n\treturn buf[:n+4], nil\n}\n\nfunc (s LZ4Compressor) Decode(data []byte) ([]byte, error) {\n\tif len(data) < 4 {\n\t\treturn nil, fmt.Errorf(\"cassandra lz4 block size should be >4, got=%d\", len(data))\n\t}\n\tuncompressedLength := binary.BigEndian.Uint32(data)\n\tif uncompressedLength == 0 {\n\t\treturn nil, nil\n\t}\n\tbuf := make([]byte, uncompressedLength)\n\tn, err := lz4.UncompressBlock(data[4:], buf)\n\treturn buf[:n], err\n}\n"
  },
  {
    "path": "lz4/lz4_bench_test.go",
    "content": "//go:build bench\n// +build bench\n\npackage lz4\n\nimport (\n\t\"testing\"\n)\n\nfunc BenchmarkLZ4Compressor(b *testing.B) {\n\toriginal := []byte(\"My Test String\")\n\tvar c LZ4Compressor\n\n\tb.Run(\"Encode\", func(b *testing.B) {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\t_, err := c.Encode(original)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "lz4/lz4_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage lz4\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestLZ4Compressor(t *testing.T) {\n\tt.Parallel()\n\n\tvar c LZ4Compressor\n\trequire.Equal(t, \"lz4\", c.Name())\n\n\t_, err := c.Decode([]byte{0, 1, 2})\n\trequire.EqualError(t, err, \"cassandra lz4 block size should be >4, got=3\")\n\n\t_, err = c.Decode([]byte{0, 1, 2, 4, 5})\n\trequire.EqualError(t, err, \"lz4: invalid source or destination buffer too short\")\n\n\t// If uncompressed size is zero then nothing is decoded even if present.\n\tdecoded, err := c.Decode([]byte{0, 0, 0, 0, 5, 7, 8})\n\trequire.NoError(t, err)\n\trequire.Nil(t, decoded)\n\n\toriginal := []byte(\"My Test String\")\n\tencoded, err := c.Encode(original)\n\trequire.NoError(t, err)\n\tdecoded, err = c.Decode(encoded)\n\trequire.NoError(t, err)\n\trequire.Equal(t, original, decoded)\n}\n"
  },
  {
    "path": "marshal.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"math/bits\"\n\t\"reflect\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"gopkg.in/inf.v0\"\n\n\t\"github.com/gocql/gocql/serialization/ascii\"\n\t\"github.com/gocql/gocql/serialization/bigint\"\n\t\"github.com/gocql/gocql/serialization/blob\"\n\t\"github.com/gocql/gocql/serialization/boolean\"\n\t\"github.com/gocql/gocql/serialization/counter\"\n\t\"github.com/gocql/gocql/serialization/cqlint\"\n\t\"github.com/gocql/gocql/serialization/cqltime\"\n\t\"github.com/gocql/gocql/serialization/date\"\n\t\"github.com/gocql/gocql/serialization/decimal\"\n\t\"github.com/gocql/gocql/serialization/double\"\n\t\"github.com/gocql/gocql/serialization/duration\"\n\t\"github.com/gocql/gocql/serialization/float\"\n\t\"github.com/gocql/gocql/serialization/inet\"\n\t\"github.com/gocql/gocql/serialization/smallint\"\n\t\"github.com/gocql/gocql/serialization/text\"\n\t\"github.com/gocql/gocql/serialization/timestamp\"\n\t\"github.com/gocql/gocql/serialization/timeuuid\"\n\t\"github.com/gocql/gocql/serialization/tinyint\"\n\t\"github.com/gocql/gocql/serialization/uuid\"\n\t\"github.com/gocql/gocql/serialization/varchar\"\n\t\"github.com/gocql/gocql/serialization/varint\"\n)\n\nvar (\n\temptyValue reflect.Value\n)\n\nvar (\n\tErrorUDTUnavailable = errors.New(\"UDT are not available on protocols less than 3, please update config\")\n)\n\n// Marshaler is an interface for custom unmarshaler.\n// Each value of the 'CQL binary protocol' consist of <value_len> and <value_data>.\n// <value_len> can be 'unset'(-2), 'nil'(-1), 'zero'(0) or any value up to 2147483647.\n// When <value_len> is 'unset', 'nil' or 'zero', <value_data> is not present.\n// 'unset' is applicable only to columns, with some exceptions.\n// As you can see from API MarshalCQL only returns <value_data>, but there is a way for it to control <value_len>:\n//  1. If MarshalCQL returns (gocql.UnsetValue, nil), gocql writes 'unset' to <value_len>\n//  2. If MarshalCQL returns ([]byte(nil), nil), gocql writes 'nil' to <value_len>\n//  3. If MarshalCQL returns ([]byte{}, nil), gocql writes 'zero' to <value_len>\n//\n// Some CQL databases have proprietary value coding features, which you may want to consider.\n// CQL binary protocol info:https://github.com/apache/cassandra/tree/trunk/doc\ntype Marshaler interface {\n\tMarshalCQL(info TypeInfo) ([]byte, error)\n}\n\ntype DirectMarshal []byte\n\nfunc (m DirectMarshal) MarshalCQL(_ TypeInfo) ([]byte, error) {\n\treturn m, nil\n}\n\n// Unmarshaler is an interface for custom unmarshaler.\n// Each value of the 'CQL binary protocol' consist of <value_len> and <value_data>.\n// <value_len> can be 'unset'(-2), 'nil'(-1), 'zero'(0) or any value up to 2147483647.\n// When <value_len> is 'unset', 'nil' or 'zero', <value_data> is not present.\n// As you can see from an API UnmarshalCQL receives only 'info TypeInfo' and\n// 'data []byte', but gocql has the following way to signal about <value_len>:\n//  1. When <value_len> is 'nil' gocql feeds nil to 'data []byte'\n//  2. When <value_len> is 'zero' gocql feeds []byte{} to 'data []byte'\n//\n// The data []byte slice passed to UnmarshalCQL is only valid for the duration\n// of the call. The backing memory may be reused after the call returns.\n// Implementations that need to retain data must copy it (e.g. using\n// bytes.Clone or append([]byte(nil), data...)).\n//\n// Some CQL databases have proprietary value coding features, which you may want to consider.\n// CQL binary protocol info:https://github.com/apache/cassandra/tree/trunk/doc\ntype Unmarshaler interface {\n\tUnmarshalCQL(info TypeInfo, data []byte) error\n}\n\ntype DirectUnmarshal []byte\n\nfunc (d *DirectUnmarshal) UnmarshalCQL(_ TypeInfo, data []byte) error {\n\t*d = bytes.Clone(data)\n\treturn nil\n}\n\n// Marshal returns the CQL encoding of the value for the Cassandra\n// internal type described by the info parameter.\n//\n// nil is serialized as CQL null.\n// If value implements Marshaler, its MarshalCQL method is called to marshal the data.\n// If value is a pointer, the pointed-to value is marshaled.\n//\n// Supported conversions are as follows, other type combinations may be added in the future:\n//\n//\tCQL type                    | Go type (value)    | Note\n//\tvarchar, ascii, blob, text  | string, []byte     |\n//\tboolean                     | bool               |\n//\ttinyint, smallint, int      | integer types      |\n//\ttinyint, smallint, int      | string             | formatted as base 10 number\n//\tbigint, counter             | integer types      |\n//\tbigint, counter             | big.Int            | value limited as int64\n//\tbigint, counter             | string             | formatted as base 10 number\n//\tfloat                       | float32            |\n//\tdouble                      | float64            |\n//\tdecimal                     | inf.Dec            |\n//\ttime                        | int64              | nanoseconds since start of day\n//\ttime                        | time.Duration      | duration since start of day\n//\ttimestamp                   | int64              | milliseconds since Unix epoch\n//\ttimestamp                   | time.Time          |\n//\tlist, set                   | slice, array       |\n//\tlist, set                   | map[X]struct{}     |\n//\tmap                         | map[X]Y            |\n//\tuuid, timeuuid              | gocql.UUID         |\n//\tuuid, timeuuid              | [16]byte           | raw UUID bytes\n//\tuuid, timeuuid              | []byte             | raw UUID bytes, length must be 16 bytes\n//\tuuid, timeuuid              | string             | hex representation, see ParseUUID\n//\tvarint                      | integer types      |\n//\tvarint                      | big.Int            |\n//\tvarint                      | string             | value of number in decimal notation\n//\tinet                        | net.IP             |\n//\tinet                        | string             | IPv4 or IPv6 address string\n//\ttuple                       | slice, array       |\n//\ttuple                       | struct             | fields are marshaled in order of declaration\n//\tuser-defined type           | gocql.UDTMarshaler | MarshalUDT is called\n//\tuser-defined type           | map[string]any         |\n//\tuser-defined type           | struct             | struct fields' cql tags are used for column names\n//\tdate                        | int64              | milliseconds since Unix epoch to start of day (in UTC)\n//\tdate                        | time.Time          | start of day (in UTC)\n//\tdate                        | string             | parsed using \"2006-01-02\" format\n//\tduration                    | int64              | duration in nanoseconds\n//\tduration                    | time.Duration      |\n//\tduration                    | gocql.Duration     |\n//\tduration                    | string             | parsed with time.ParseDuration\n//\n// The marshal/unmarshal error provides a list of supported types when an unsupported type is attempted.\n\nfunc Marshal(info TypeInfo, value any) ([]byte, error) {\n\tif info.Version() < protoVersion1 {\n\t\tpanic(\"protocol version not set\")\n\t}\n\n\tif valueRef := reflect.ValueOf(value); valueRef.Kind() == reflect.Ptr {\n\t\tif valueRef.IsNil() {\n\t\t\treturn nil, nil\n\t\t} else if v, ok := value.(Marshaler); ok {\n\t\t\treturn v.MarshalCQL(info)\n\t\t} else {\n\t\t\treturn Marshal(info, valueRef.Elem().Interface())\n\t\t}\n\t}\n\n\tif v, ok := value.(Marshaler); ok {\n\t\treturn v.MarshalCQL(info)\n\t}\n\n\tswitch info.Type() {\n\tcase TypeVarchar:\n\t\treturn marshalVarchar(value)\n\tcase TypeText:\n\t\treturn marshalText(value)\n\tcase TypeBlob:\n\t\treturn marshalBlob(value)\n\tcase TypeAscii:\n\t\treturn marshalAscii(value)\n\tcase TypeBoolean:\n\t\treturn marshalBool(value)\n\tcase TypeTinyInt:\n\t\treturn marshalTinyInt(value)\n\tcase TypeSmallInt:\n\t\treturn marshalSmallInt(value)\n\tcase TypeInt:\n\t\treturn marshalInt(value)\n\tcase TypeBigInt:\n\t\treturn marshalBigInt(value)\n\tcase TypeCounter:\n\t\treturn marshalCounter(value)\n\tcase TypeFloat:\n\t\treturn marshalFloat(value)\n\tcase TypeDouble:\n\t\treturn marshalDouble(value)\n\tcase TypeDecimal:\n\t\treturn marshalDecimal(value)\n\tcase TypeTime:\n\t\treturn marshalTime(value)\n\tcase TypeTimestamp:\n\t\treturn marshalTimestamp(value)\n\tcase TypeList, TypeSet:\n\t\treturn marshalList(info, value)\n\tcase TypeMap:\n\t\treturn marshalMap(info, value)\n\tcase TypeUUID:\n\t\treturn marshalUUID(value)\n\tcase TypeTimeUUID:\n\t\treturn marshalTimeUUID(value)\n\tcase TypeVarint:\n\t\treturn marshalVarint(value)\n\tcase TypeInet:\n\t\treturn marshalInet(value)\n\tcase TypeTuple:\n\t\treturn marshalTuple(info, value)\n\tcase TypeUDT:\n\t\treturn marshalUDT(info, value)\n\tcase TypeDate:\n\t\treturn marshalDate(value)\n\tcase TypeDuration:\n\t\treturn marshalDuration(value)\n\tcase TypeCustom:\n\t\tif vector, ok := info.(VectorType); ok {\n\t\t\treturn marshalVector(vector, value)\n\t\t}\n\t}\n\n\t// TODO(tux21b): add the remaining types\n\treturn nil, fmt.Errorf(\"can not marshal %T into %s\", value, info)\n}\n\n// Unmarshal parses the CQL encoded data based on the info parameter that\n// describes the Cassandra internal data type and stores the result in the\n// value pointed by value.\n//\n// If value implements Unmarshaler, it's UnmarshalCQL method is called to\n// unmarshal the data.\n// If value is a pointer to pointer, it is set to nil if the CQL value is\n// null. Otherwise, nulls are unmarshalled as zero value.\n//\n// Supported conversions are as follows, other type combinations may be added in the future:\n//\n//\tCQL type                                | Go type (value)         | Note\n//\tvarchar, ascii, blob, text              | *string                 |\n//\tvarchar, ascii, blob, text              | *[]byte                 | non-nil buffer is reused\n//\tbool                                    | *bool                   |\n//\ttinyint, smallint, int, bigint, counter | *integer types          |\n//\ttinyint, smallint, int, bigint, counter | *big.Int                |\n//\ttinyint, smallint, int, bigint, counter | *string                 | formatted as base 10 number\n//\tfloat                                   | *float32                |\n//\tdouble                                  | *float64                |\n//\tdecimal                                 | *inf.Dec                |\n//\ttime                                    | *int64                  | nanoseconds since start of day\n//\ttime                                    | *time.Duration          |\n//\ttimestamp                               | *int64                  | milliseconds since Unix epoch\n//\ttimestamp                               | *time.Time              |\n//\tlist, set                               | *slice, *array          |\n//\tmap                                     | *map[X]Y                |\n//\tuuid, timeuuid                          | *string                 | see UUID.String\n//\tuuid, timeuuid                          | *[]byte                 | raw UUID bytes\n//\tuuid, timeuuid                          | *gocql.UUID             |\n//\ttimeuuid                                | *time.Time              | timestamp of the UUID\n//\tinet                                    | *net.IP                 |\n//\tinet                                    | *string                 | IPv4 or IPv6 address string\n//\ttuple                                   | *slice, *array          |\n//\ttuple                                   | *struct                 | struct fields are set in order of declaration\n//\tuser-defined types                      | gocql.UDTUnmarshaler    | UnmarshalUDT is called\n//\tuser-defined types                      | *map[string]any         |\n//\tuser-defined types                      | *struct                 | cql tag is used to determine field name\n//\tdate                                    | *time.Time              | time of beginning of the day (in UTC)\n//\tdate                                    | *string                 | formatted with 2006-01-02 format\n//\tduration                                | *gocql.Duration         |\nfunc Unmarshal(info TypeInfo, data []byte, value any) error {\n\tif v, ok := value.(Unmarshaler); ok {\n\t\treturn v.UnmarshalCQL(info, data)\n\t}\n\n\tif isNullableValue(value) {\n\t\treturn unmarshalNullable(info, data, value)\n\t}\n\n\tswitch info.Type() {\n\tcase TypeVarchar:\n\t\treturn unmarshalVarchar(data, value)\n\tcase TypeText:\n\t\treturn unmarshalText(data, value)\n\tcase TypeBlob:\n\t\treturn unmarshalBlob(data, value)\n\tcase TypeAscii:\n\t\treturn unmarshalAscii(data, value)\n\tcase TypeBoolean:\n\t\treturn unmarshalBool(data, value)\n\tcase TypeInt:\n\t\treturn unmarshalInt(data, value)\n\tcase TypeBigInt:\n\t\treturn unmarshalBigInt(data, value)\n\tcase TypeCounter:\n\t\treturn unmarshalCounter(data, value)\n\tcase TypeVarint:\n\t\treturn unmarshalVarint(data, value)\n\tcase TypeSmallInt:\n\t\treturn unmarshalSmallInt(data, value)\n\tcase TypeTinyInt:\n\t\treturn unmarshalTinyInt(data, value)\n\tcase TypeFloat:\n\t\treturn unmarshalFloat(data, value)\n\tcase TypeDouble:\n\t\treturn unmarshalDouble(data, value)\n\tcase TypeDecimal:\n\t\treturn unmarshalDecimal(data, value)\n\tcase TypeTime:\n\t\treturn unmarshalTime(data, value)\n\tcase TypeTimestamp:\n\t\treturn unmarshalTimestamp(data, value)\n\tcase TypeList, TypeSet:\n\t\treturn unmarshalList(info, data, value)\n\tcase TypeMap:\n\t\treturn unmarshalMap(info, data, value)\n\tcase TypeTimeUUID:\n\t\treturn unmarshalTimeUUID(data, value)\n\tcase TypeUUID:\n\t\treturn unmarshalUUID(data, value)\n\tcase TypeInet:\n\t\treturn unmarshalInet(data, value)\n\tcase TypeTuple:\n\t\treturn unmarshalTuple(info, data, value)\n\tcase TypeUDT:\n\t\treturn unmarshalUDT(info, data, value)\n\tcase TypeDate:\n\t\treturn unmarshalDate(data, value)\n\tcase TypeDuration:\n\t\treturn unmarshalDuration(data, value)\n\tcase TypeCustom:\n\t\tif vector, ok := info.(VectorType); ok {\n\t\t\treturn unmarshalVector(vector, data, value)\n\t\t}\n\t}\n\n\t// TODO(tux21b): add the remaining types\n\treturn fmt.Errorf(\"can not unmarshal %s into %T\", info, value)\n}\n\nfunc isNullableValue(value any) bool {\n\tv := reflect.ValueOf(value)\n\treturn v.Kind() == reflect.Ptr && v.Type().Elem().Kind() == reflect.Ptr\n}\n\nfunc isNullData(info TypeInfo, data []byte) bool {\n\treturn data == nil\n}\n\nfunc unmarshalNullable(info TypeInfo, data []byte, value any) error {\n\tvalueRef := reflect.ValueOf(value)\n\n\tif isNullData(info, data) {\n\t\tnilValue := reflect.Zero(valueRef.Type().Elem())\n\t\tvalueRef.Elem().Set(nilValue)\n\t\treturn nil\n\t}\n\n\tnewValue := reflect.New(valueRef.Type().Elem().Elem())\n\tvalueRef.Elem().Set(newValue)\n\treturn Unmarshal(info, data, newValue.Interface())\n}\n\nfunc marshalVarchar(value any) ([]byte, error) {\n\tdata, err := varchar.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc marshalText(value any) ([]byte, error) {\n\tdata, err := text.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc marshalBlob(value any) ([]byte, error) {\n\tdata, err := blob.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc marshalAscii(value any) ([]byte, error) {\n\tdata, err := ascii.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalVarchar(data []byte, value any) error {\n\terr := varchar.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc unmarshalText(data []byte, value any) error {\n\terr := text.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc unmarshalBlob(data []byte, value any) error {\n\terr := blob.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc unmarshalAscii(data []byte, value any) error {\n\terr := ascii.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalSmallInt(value any) ([]byte, error) {\n\tdata, err := smallint.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc marshalTinyInt(value any) ([]byte, error) {\n\tdata, err := tinyint.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc marshalInt(value any) ([]byte, error) {\n\tdata, err := cqlint.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc marshalBigInt(value any) ([]byte, error) {\n\tdata, err := bigint.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc marshalCounter(value any) ([]byte, error) {\n\tdata, err := counter.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalCounter(data []byte, value any) error {\n\terr := counter.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc unmarshalInt(data []byte, value any) error {\n\terr := cqlint.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc unmarshalBigInt(data []byte, value any) error {\n\terr := bigint.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc unmarshalSmallInt(data []byte, value any) error {\n\terr := smallint.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc unmarshalTinyInt(data []byte, value any) error {\n\tif err := tinyint.Unmarshal(data, value); err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc unmarshalVarint(data []byte, value any) error {\n\tif err := varint.Unmarshal(data, value); err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalVarint(value any) ([]byte, error) {\n\tdata, err := varint.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc marshalBool(value any) ([]byte, error) {\n\tdata, err := boolean.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalBool(data []byte, value any) error {\n\tif err := boolean.Unmarshal(data, value); err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalFloat(value any) ([]byte, error) {\n\tdata, err := float.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalFloat(data []byte, value any) error {\n\tif err := float.Unmarshal(data, value); err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalDouble(value any) ([]byte, error) {\n\tdata, err := double.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalDouble(data []byte, value any) error {\n\terr := double.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalDecimal(value any) ([]byte, error) {\n\tdata, err := decimal.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalDecimal(data []byte, value any) error {\n\tif err := decimal.Unmarshal(data, value); err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalTime(value any) ([]byte, error) {\n\tdata, err := cqltime.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalTime(data []byte, value any) error {\n\terr := cqltime.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalTimestamp(value any) ([]byte, error) {\n\tdata, err := timestamp.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalTimestamp(data []byte, value any) error {\n\terr := timestamp.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalDate(value any) ([]byte, error) {\n\tdata, err := date.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalDate(data []byte, value any) error {\n\terr := date.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalDuration(value any) ([]byte, error) {\n\tswitch uv := value.(type) {\n\tcase Duration:\n\t\tvalue = duration.Duration(uv)\n\tcase *Duration:\n\t\tvalue = (*duration.Duration)(uv)\n\t}\n\tdata, err := duration.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalDuration(data []byte, value any) error {\n\tswitch uv := value.(type) {\n\tcase *Duration:\n\t\tvalue = (*duration.Duration)(uv)\n\tcase **Duration:\n\t\tif uv == nil {\n\t\t\tvalue = (**duration.Duration)(nil)\n\t\t} else {\n\t\t\tvalue = (**duration.Duration)(unsafe.Pointer(uv))\n\t\t}\n\t}\n\terr := duration.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc writeCollectionSize(info CollectionType, n int, buf *bytes.Buffer) error {\n\tif n > math.MaxInt32 {\n\t\treturn marshalErrorf(\"marshal: collection too large\")\n\t}\n\n\tbuf.WriteByte(byte(n >> 24))\n\tbuf.WriteByte(byte(n >> 16))\n\tbuf.WriteByte(byte(n >> 8))\n\tbuf.WriteByte(byte(n))\n\n\treturn nil\n}\n\nfunc marshalList(info TypeInfo, value any) ([]byte, error) {\n\tlistInfo, ok := info.(CollectionType)\n\tif !ok {\n\t\treturn nil, marshalErrorf(\"marshal: can not marshal non collection type into list\")\n\t}\n\n\tif value == nil {\n\t\treturn nil, nil\n\t} else if _, ok := value.(unsetColumn); ok {\n\t\treturn nil, nil\n\t}\n\n\trv := reflect.ValueOf(value)\n\tt := rv.Type()\n\tk := t.Kind()\n\tif k == reflect.Slice && rv.IsNil() {\n\t\treturn nil, nil\n\t}\n\n\tswitch k {\n\tcase reflect.Slice, reflect.Array:\n\t\tbuf := &bytes.Buffer{}\n\t\tn := rv.Len()\n\n\t\tif err := writeCollectionSize(listInfo, n, buf); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor i := 0; i < n; i++ {\n\t\t\titem, err := Marshal(listInfo.Elem, rv.Index(i).Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\titemLen := len(item)\n\t\t\t// Set the value to null for supported protocols\n\t\t\tif item == nil {\n\t\t\t\titemLen = -1\n\t\t\t}\n\t\t\tif err := writeCollectionSize(listInfo, itemLen, buf); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbuf.Write(item)\n\t\t}\n\t\treturn buf.Bytes(), nil\n\tcase reflect.Map:\n\t\telem := t.Elem()\n\t\tif elem.Kind() == reflect.Struct && elem.NumField() == 0 {\n\t\t\trkeys := rv.MapKeys()\n\t\t\tkeys := make([]any, len(rkeys))\n\t\t\tfor i := 0; i < len(keys); i++ {\n\t\t\t\tkeys[i] = rkeys[i].Interface()\n\t\t\t}\n\t\t\treturn marshalList(listInfo, keys)\n\t\t}\n\t}\n\treturn nil, marshalErrorf(\"can not marshal %T into %s\", value, info)\n}\n\nfunc readCollectionSize(info CollectionType, data []byte) (size, read int, err error) {\n\tif len(data) < 4 {\n\t\treturn 0, 0, unmarshalErrorf(\"unmarshal list: unexpected eof\")\n\t}\n\tsize = int(int32(data[0])<<24 | int32(data[1])<<16 | int32(data[2])<<8 | int32(data[3]))\n\tread = 4\n\treturn\n}\n\nfunc unmarshalList(info TypeInfo, data []byte, value any) error {\n\tlistInfo, ok := info.(CollectionType)\n\tif !ok {\n\t\treturn unmarshalErrorf(\"unmarshal: can not unmarshal none collection type into list\")\n\t}\n\n\trv := reflect.ValueOf(value)\n\tif rv.Kind() != reflect.Ptr {\n\t\treturn unmarshalErrorf(\"can not unmarshal into non-pointer %T\", value)\n\t}\n\trv = rv.Elem()\n\tt := rv.Type()\n\tk := t.Kind()\n\n\t// Handle *any destination\n\tif k == reflect.Interface {\n\t\tif t.NumMethod() != 0 {\n\t\t\treturn unmarshalErrorf(\"can not unmarshal into non-empty interface %T\", value)\n\t\t}\n\t\t// Create a properly typed slice based on the element type\n\t\telemGoType, err := goType(listInfo.Elem)\n\t\tif err != nil {\n\t\t\treturn unmarshalErrorf(\"unmarshal list: cannot determine element type: %v\", err)\n\t\t}\n\t\tt = reflect.SliceOf(elemGoType)\n\t\tk = reflect.Slice\n\t}\n\n\tswitch k {\n\tcase reflect.Slice, reflect.Array:\n\t\tif data == nil {\n\t\t\tif k == reflect.Array {\n\t\t\t\treturn unmarshalErrorf(\"unmarshal list: can not store nil in array value\")\n\t\t\t}\n\t\t\tif rv.IsNil() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\trv.Set(reflect.Zero(t))\n\t\t\treturn nil\n\t\t}\n\t\tn, p, err := readCollectionSize(listInfo, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata = data[p:]\n\t\tif k == reflect.Array {\n\t\t\tif rv.Len() != n {\n\t\t\t\treturn unmarshalErrorf(\"unmarshal list: array with wrong size\")\n\t\t\t}\n\t\t} else {\n\t\t\trv.Set(reflect.MakeSlice(t, n, n))\n\t\t\t// If rv was an interface, get the underlying slice\n\t\t\tif rv.Kind() == reflect.Interface {\n\t\t\t\trv = rv.Elem()\n\t\t\t}\n\t\t}\n\t\tfor i := 0; i < n; i++ {\n\t\t\tm, p, err := readCollectionSize(listInfo, data)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdata = data[p:]\n\t\t\t// In case m < 0, the value is null, and unmarshalData should be nil.\n\t\t\tvar unmarshalData []byte\n\t\t\tif m >= 0 {\n\t\t\t\tif len(data) < m {\n\t\t\t\t\treturn unmarshalErrorf(\"unmarshal list: unexpected eof\")\n\t\t\t\t}\n\t\t\t\tunmarshalData = data[:m]\n\t\t\t\tdata = data[m:]\n\t\t\t}\n\t\t\tif err := Unmarshal(listInfo.Elem, unmarshalData, rv.Index(i).Addr().Interface()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn unmarshalErrorf(\"can not unmarshal %s into %T. Accepted types: *slice, *array, *any.\", info, value)\n}\n\nfunc marshalVector(info VectorType, value any) ([]byte, error) {\n\tif value == nil {\n\t\treturn nil, nil\n\t} else if _, ok := value.(unsetColumn); ok {\n\t\treturn nil, nil\n\t}\n\n\trv := reflect.ValueOf(value)\n\tt := rv.Type()\n\tk := t.Kind()\n\tif k == reflect.Slice && rv.IsNil() {\n\t\treturn nil, nil\n\t}\n\n\tswitch k {\n\tcase reflect.Slice, reflect.Array:\n\t\tn := rv.Len()\n\t\tif n != info.Dimensions {\n\t\t\treturn nil, marshalErrorf(\"expected vector with %d dimensions, received %d\", info.Dimensions, n)\n\t\t}\n\n\t\tisLengthType := isVectorVariableLengthType(info.SubType)\n\t\tbuf := &bytes.Buffer{}\n\t\tif !isLengthType {\n\t\t\tif elemSize := vectorFixedElemSize(info.SubType); elemSize > 0 {\n\t\t\t\tif needed := int64(n) * int64(elemSize); needed > 0 && needed <= math.MaxInt32 {\n\t\t\t\t\tbuf.Grow(int(needed))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor i := 0; i < n; i++ {\n\t\t\titem, err := Marshal(info.SubType, rv.Index(i).Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif isLengthType {\n\t\t\t\twriteUnsignedVInt(buf, uint64(len(item)))\n\t\t\t}\n\t\t\tbuf.Write(item)\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\treturn nil, marshalErrorf(\"can not marshal %T into %s. Accepted types: slice, array.\", value, info)\n}\n\nfunc unmarshalVector(info VectorType, data []byte, value any) error {\n\trv := reflect.ValueOf(value)\n\tif rv.Kind() != reflect.Ptr {\n\t\treturn unmarshalErrorf(\"can not unmarshal into non-pointer %T\", value)\n\t}\n\trv = rv.Elem()\n\tt := rv.Type()\n\tif t.Kind() == reflect.Interface {\n\t\tif t.NumMethod() != 0 {\n\t\t\treturn unmarshalErrorf(\"can not unmarshal into non-empty interface %T\", value)\n\t\t}\n\t\tt = reflect.TypeOf(info.Zero())\n\t}\n\n\tk := t.Kind()\n\tswitch k {\n\tcase reflect.Slice, reflect.Array:\n\t\tif data == nil {\n\t\t\tif k == reflect.Array {\n\t\t\t\treturn unmarshalErrorf(\"unmarshal vector: can not store nil in array value\")\n\t\t\t}\n\t\t\tif rv.IsNil() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\trv.Set(reflect.Zero(t))\n\t\t\treturn nil\n\t\t}\n\t\tif info.Dimensions == 0 {\n\t\t\tif len(data) > 0 {\n\t\t\t\treturn unmarshalErrorf(\"unmarshal vector: %d bytes of data for 0-dimension vector\", len(data))\n\t\t\t}\n\t\t\tif k == reflect.Array {\n\t\t\t\tif rv.Len() != 0 {\n\t\t\t\t\treturn unmarshalErrorf(\"unmarshal vector: array of size %d cannot store vector of 0 dimensions\", rv.Len())\n\t\t\t\t}\n\t\t\t} else if k == reflect.Slice {\n\t\t\t\trv.Set(reflect.MakeSlice(t, 0, 0))\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tif k == reflect.Array {\n\t\t\tif rv.Len() != info.Dimensions {\n\t\t\t\treturn unmarshalErrorf(\"unmarshal vector: array of size %d cannot store vector of %d dimensions\", rv.Len(), info.Dimensions)\n\t\t\t}\n\t\t} else {\n\t\t\trv.Set(reflect.MakeSlice(t, info.Dimensions, info.Dimensions))\n\t\t\tif rv.Kind() == reflect.Interface {\n\t\t\t\trv = rv.Elem()\n\t\t\t}\n\t\t}\n\t\telemSize := len(data) / info.Dimensions\n\t\tisLengthType := isVectorVariableLengthType(info.SubType)\n\t\tfor i := 0; i < info.Dimensions; i++ {\n\t\t\toffset := 0\n\t\t\tif isLengthType {\n\t\t\t\tm, p, err := readUnsignedVInt(data)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\telemSize = int(m)\n\t\t\t\toffset = p\n\t\t\t}\n\t\t\tif offset > 0 {\n\t\t\t\tdata = data[offset:]\n\t\t\t}\n\t\t\tvar unmarshalData []byte\n\t\t\tif elemSize >= 0 {\n\t\t\t\tif len(data) < elemSize {\n\t\t\t\t\treturn unmarshalErrorf(\"unmarshal vector: unexpected eof\")\n\t\t\t\t}\n\t\t\t\tunmarshalData = data[:elemSize]\n\t\t\t\tdata = data[elemSize:]\n\t\t\t}\n\t\t\terr := Unmarshal(info.SubType, unmarshalData, rv.Index(i).Addr().Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn unmarshalErrorf(\"failed to unmarshal %s into %T: %s\", info.SubType, unmarshalData, err.Error())\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn unmarshalErrorf(\"can not unmarshal %s into %T. Accepted types: *slice, *array, *any.\", info, value)\n}\n\nfunc vectorFixedElemSize(elemType TypeInfo) int {\n\tswitch elemType.Type() {\n\tcase TypeBoolean:\n\t\treturn 1\n\tcase TypeInt, TypeFloat:\n\t\treturn 4\n\tcase TypeBigInt, TypeDouble, TypeTimestamp:\n\t\treturn 8\n\tcase TypeUUID, TypeTimeUUID:\n\t\treturn 16\n\t}\n\treturn 0\n}\n\n// isVectorVariableLengthType determines if a type requires explicit length serialization within a vector.\n// Variable-length types need their length encoded (as a vint prefix) before the actual data.\n// Fixed-length types don't require this prefix.\n//\n// This classification must match Cassandra's VectorType behavior. Cassandra's VectorType constructor\n// selects FixedLengthSerializer vs VariableLengthSerializer based on elementType.isValueLengthFixed(),\n// which checks whether the type overrides valueLengthIfFixed() to return something other than -1.\n//\n// Several types that are conceptually fixed-size do NOT override valueLengthIfFixed() in Cassandra\n// and are therefore treated as variable-length inside vectors on the wire:\n//   - CounterColumnType  (counter)  — no valueLengthIfFixed() override\n//   - ShortType          (smallint) — no valueLengthIfFixed() override\n//   - ByteType           (tinyint)  — no valueLengthIfFixed() override\n//   - TimeType           (time)     — no valueLengthIfFixed() override\n//   - SimpleDateType     (date)     — no valueLengthIfFixed() override\n//\n// gocql must match this to produce wire-compatible encoding, even though these types always\n// serialize to a known number of bytes.\n//\n// Reference: https://github.com/apache/cassandra/blob/trunk/src/java/org/apache/cassandra/db/marshal/VectorType.java\nfunc isVectorVariableLengthType(elemType TypeInfo) bool {\n\tswitch elemType.Type() {\n\tcase TypeVarchar, TypeAscii, TypeBlob, TypeText,\n\t\tTypeCounter,\n\t\tTypeDuration, TypeDate, TypeTime,\n\t\tTypeDecimal, TypeSmallInt, TypeTinyInt, TypeVarint,\n\t\tTypeInet,\n\t\tTypeList, TypeSet, TypeMap, TypeUDT, TypeTuple:\n\t\treturn true\n\tcase TypeCustom:\n\t\tif vecType, ok := elemType.(VectorType); ok {\n\t\t\treturn isVectorVariableLengthType(vecType.SubType)\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc writeUnsignedVInt(buf *bytes.Buffer, v uint64) {\n\tnumBytes := computeUnsignedVIntSize(v)\n\tif numBytes <= 1 {\n\t\tbuf.WriteByte(byte(v))\n\t\treturn\n\t}\n\n\textraBytes := numBytes - 1\n\tvar tmp = make([]byte, numBytes)\n\tfor i := extraBytes; i >= 0; i-- {\n\t\ttmp[i] = byte(v)\n\t\tv >>= 8\n\t}\n\ttmp[0] |= byte(^(0xff >> uint(extraBytes)))\n\tbuf.Write(tmp)\n}\n\nfunc readUnsignedVInt(data []byte) (uint64, int, error) {\n\tif len(data) <= 0 {\n\t\treturn 0, 0, errors.New(\"unexpected eof\")\n\t}\n\tfirstByte := data[0]\n\tif firstByte&0x80 == 0 {\n\t\treturn uint64(firstByte), 1, nil\n\t}\n\tnumBytes := bits.LeadingZeros32(uint32(^firstByte)) - 24\n\tret := uint64(firstByte & (0xff >> uint(numBytes)))\n\tif len(data) < numBytes+1 {\n\t\treturn 0, 0, fmt.Errorf(\"data expect to have %d bytes, but it has only %d\", numBytes+1, len(data))\n\t}\n\tfor i := 0; i < numBytes; i++ {\n\t\tret <<= 8\n\t\tret |= uint64(data[i+1] & 0xff)\n\t}\n\treturn ret, numBytes + 1, nil\n}\n\nfunc computeUnsignedVIntSize(v uint64) int {\n\tlead0 := bits.LeadingZeros64(v)\n\treturn (639 - lead0*9) >> 6\n}\n\nfunc marshalMap(info TypeInfo, value any) ([]byte, error) {\n\tmapInfo, ok := info.(CollectionType)\n\tif !ok {\n\t\treturn nil, marshalErrorf(\"marshal: can not marshal none collection type into map\")\n\t}\n\n\tif value == nil {\n\t\treturn nil, nil\n\t} else if _, ok := value.(unsetColumn); ok {\n\t\treturn nil, nil\n\t}\n\n\trv := reflect.ValueOf(value)\n\n\tt := rv.Type()\n\tif t.Kind() != reflect.Map {\n\t\treturn nil, marshalErrorf(\"can not marshal %T into %s\", value, info)\n\t}\n\n\tif rv.IsNil() {\n\t\treturn nil, nil\n\t}\n\n\tbuf := &bytes.Buffer{}\n\tn := rv.Len()\n\n\tif err := writeCollectionSize(mapInfo, n, buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tkeys := rv.MapKeys()\n\tfor _, key := range keys {\n\t\titem, err := Marshal(mapInfo.Key, key.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\titemLen := len(item)\n\t\t// Set the key to null for supported protocols\n\t\tif item == nil {\n\t\t\titemLen = -1\n\t\t}\n\t\tif err := writeCollectionSize(mapInfo, itemLen, buf); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf.Write(item)\n\n\t\titem, err = Marshal(mapInfo.Elem, rv.MapIndex(key).Interface())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\titemLen = len(item)\n\t\t// Set the value to null for supported protocols\n\t\tif item == nil {\n\t\t\titemLen = -1\n\t\t}\n\t\tif err := writeCollectionSize(mapInfo, itemLen, buf); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf.Write(item)\n\t}\n\treturn buf.Bytes(), nil\n}\n\nfunc unmarshalMap(info TypeInfo, data []byte, value any) error {\n\tmapInfo, ok := info.(CollectionType)\n\tif !ok {\n\t\treturn unmarshalErrorf(\"unmarshal: can not unmarshal none collection type into map\")\n\t}\n\n\trv := reflect.ValueOf(value)\n\tif rv.Kind() != reflect.Ptr {\n\t\treturn unmarshalErrorf(\"can not unmarshal into non-pointer %T\", value)\n\t}\n\trv = rv.Elem()\n\tt := rv.Type()\n\n\t// Handle *any destination\n\tif t.Kind() == reflect.Interface {\n\t\tif t.NumMethod() != 0 {\n\t\t\treturn unmarshalErrorf(\"can not unmarshal into non-empty interface %T\", value)\n\t\t}\n\t\t// Create a properly typed map based on the key and element types\n\t\tkeyGoType, err := goType(mapInfo.Key)\n\t\tif err != nil {\n\t\t\treturn unmarshalErrorf(\"unmarshal map: cannot determine key type: %v\", err)\n\t\t}\n\t\telemGoType, err := goType(mapInfo.Elem)\n\t\tif err != nil {\n\t\t\treturn unmarshalErrorf(\"unmarshal map: cannot determine element type: %v\", err)\n\t\t}\n\t\tt = reflect.MapOf(keyGoType, elemGoType)\n\t}\n\n\tif t.Kind() != reflect.Map {\n\t\treturn unmarshalErrorf(\"can not unmarshal %s into %T. Accepted types: *map, *any.\", info, value)\n\t}\n\tif data == nil {\n\t\trv.Set(reflect.Zero(t))\n\t\treturn nil\n\t}\n\tn, p, err := readCollectionSize(mapInfo, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n < 0 {\n\t\treturn unmarshalErrorf(\"negative map size %d\", n)\n\t}\n\trv.Set(reflect.MakeMapWithSize(t, n))\n\t// If rv was an interface, get the underlying map\n\tif rv.Kind() == reflect.Interface {\n\t\trv = rv.Elem()\n\t}\n\tdata = data[p:]\n\tfor i := 0; i < n; i++ {\n\t\tm, p, err := readCollectionSize(mapInfo, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata = data[p:]\n\t\tkey := reflect.New(t.Key())\n\t\t// In case m < 0, the key is null, and unmarshalData should be nil.\n\t\tvar unmarshalData []byte\n\t\tif m >= 0 {\n\t\t\tif len(data) < m {\n\t\t\t\treturn unmarshalErrorf(\"unmarshal map: unexpected eof\")\n\t\t\t}\n\t\t\tunmarshalData = data[:m]\n\t\t\tdata = data[m:]\n\t\t}\n\t\tif err := Unmarshal(mapInfo.Key, unmarshalData, key.Interface()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tm, p, err = readCollectionSize(mapInfo, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata = data[p:]\n\t\tval := reflect.New(t.Elem())\n\n\t\t// In case m < 0, the value is null, and unmarshalData should be nil.\n\t\tunmarshalData = nil\n\t\tif m >= 0 {\n\t\t\tif len(data) < m {\n\t\t\t\treturn unmarshalErrorf(\"unmarshal map: unexpected eof\")\n\t\t\t}\n\t\t\tunmarshalData = data[:m]\n\t\t\tdata = data[m:]\n\t\t}\n\t\tif err := Unmarshal(mapInfo.Elem, unmarshalData, val.Interface()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trv.SetMapIndex(key.Elem(), val.Elem())\n\t}\n\treturn nil\n}\n\nfunc marshalUUID(value any) ([]byte, error) {\n\tswitch uv := value.(type) {\n\tcase UUID:\n\t\tvalue = [16]byte(uv)\n\tcase *UUID:\n\t\tvalue = (*[16]byte)(uv)\n\t}\n\tdata, err := uuid.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalUUID(data []byte, value any) error {\n\tswitch uv := value.(type) {\n\tcase *UUID:\n\t\tvalue = (*[16]byte)(uv)\n\tcase **UUID:\n\t\tif uv == nil {\n\t\t\tvalue = (**[16]byte)(nil)\n\t\t} else {\n\t\t\tvalue = (**[16]byte)(unsafe.Pointer(uv))\n\t\t}\n\t}\n\terr := uuid.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalTimeUUID(value any) ([]byte, error) {\n\tswitch uv := value.(type) {\n\tcase UUID:\n\t\tvalue = [16]byte(uv)\n\tcase *UUID:\n\t\tvalue = (*[16]byte)(uv)\n\t}\n\tdata, err := timeuuid.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalTimeUUID(data []byte, value any) error {\n\tswitch uv := value.(type) {\n\tcase *UUID:\n\t\tvalue = (*[16]byte)(uv)\n\tcase **UUID:\n\t\tif uv == nil {\n\t\t\tvalue = (**[16]byte)(nil)\n\t\t} else {\n\t\t\tvalue = (**[16]byte)(unsafe.Pointer(uv))\n\t\t}\n\t}\n\terr := timeuuid.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalInet(value any) ([]byte, error) {\n\tdata, err := inet.Marshal(value)\n\tif err != nil {\n\t\treturn nil, wrapMarshalError(err, \"marshal error\")\n\t}\n\treturn data, nil\n}\n\nfunc unmarshalInet(data []byte, value any) error {\n\terr := inet.Unmarshal(data, value)\n\tif err != nil {\n\t\treturn wrapUnmarshalError(err, \"unmarshal error\")\n\t}\n\treturn nil\n}\n\nfunc marshalTuple(info TypeInfo, value any) ([]byte, error) {\n\ttuple := info.(TupleTypeInfo)\n\tswitch v := value.(type) {\n\tcase unsetColumn:\n\t\treturn nil, unmarshalErrorf(\"Invalid request: UnsetValue is unsupported for tuples\")\n\tcase []any:\n\t\tif len(v) != len(tuple.Elems) {\n\t\t\treturn nil, unmarshalErrorf(\"cannont marshal tuple: wrong number of elements\")\n\t\t}\n\n\t\tvar buf []byte\n\t\tfor i, elem := range v {\n\t\t\tif elem == nil {\n\t\t\t\tbuf = appendIntNeg1(buf)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdata, err := Marshal(tuple.Elems[i], elem)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tn := len(data)\n\t\t\tbuf = appendInt(buf, int32(n))\n\t\t\tbuf = append(buf, data...)\n\t\t}\n\n\t\treturn buf, nil\n\t}\n\n\trv := reflect.ValueOf(value)\n\tt := rv.Type()\n\tk := t.Kind()\n\n\tswitch k {\n\tcase reflect.Struct:\n\t\tif v := t.NumField(); v != len(tuple.Elems) {\n\t\t\treturn nil, marshalErrorf(\"can not marshal tuple into struct %v, not enough fields have %d need %d\", t, v, len(tuple.Elems))\n\t\t}\n\n\t\tvar buf []byte\n\t\tfor i, elem := range tuple.Elems {\n\t\t\tfield := rv.Field(i)\n\n\t\t\tif field.Kind() == reflect.Ptr && field.IsNil() {\n\t\t\t\tbuf = appendIntNeg1(buf)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdata, err := Marshal(elem, field.Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tn := len(data)\n\t\t\tbuf = appendInt(buf, int32(n))\n\t\t\tbuf = append(buf, data...)\n\t\t}\n\n\t\treturn buf, nil\n\tcase reflect.Slice, reflect.Array:\n\t\tsize := rv.Len()\n\t\tif size != len(tuple.Elems) {\n\t\t\treturn nil, marshalErrorf(\"can not marshal tuple into %v of length %d need %d elements\", k, size, len(tuple.Elems))\n\t\t}\n\n\t\tvar buf []byte\n\t\tfor i, elem := range tuple.Elems {\n\t\t\titem := rv.Index(i)\n\n\t\t\tif item.Kind() == reflect.Ptr && item.IsNil() {\n\t\t\t\tbuf = appendIntNeg1(buf)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdata, err := Marshal(elem, item.Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tn := len(data)\n\t\t\tbuf = appendInt(buf, int32(n))\n\t\t\tbuf = append(buf, data...)\n\t\t}\n\n\t\treturn buf, nil\n\t}\n\n\treturn nil, marshalErrorf(\"cannot marshal %T into %s\", value, tuple)\n}\n\nfunc readBytes(p []byte) ([]byte, []byte) {\n\t// TODO: really should use a framer\n\tsize := readInt(p)\n\tp = p[4:]\n\tif size < 0 {\n\t\treturn nil, p\n\t}\n\treturn p[:size], p[size:]\n}\n\n// currently only support unmarshal into a list of values, this makes it possible\n// to support tuples without changing the query API. In the future this can be extend\n// to allow unmarshalling into custom tuple types.\nfunc unmarshalTuple(info TypeInfo, data []byte, value any) error {\n\tif v, ok := value.(Unmarshaler); ok {\n\t\treturn v.UnmarshalCQL(info, data)\n\t}\n\n\ttuple := info.(TupleTypeInfo)\n\tswitch v := value.(type) {\n\tcase []any:\n\t\tfor i, elem := range tuple.Elems {\n\t\t\t// each element inside data is a [bytes]\n\t\t\tvar p []byte\n\t\t\tif len(data) >= 4 {\n\t\t\t\tp, data = readBytes(data)\n\t\t\t}\n\t\t\terr := Unmarshal(elem, p, v[i])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\trv := reflect.ValueOf(value)\n\tif rv.Kind() != reflect.Ptr {\n\t\treturn unmarshalErrorf(\"can not unmarshal into non-pointer %T\", value)\n\t}\n\n\trv = rv.Elem()\n\tt := rv.Type()\n\tk := t.Kind()\n\n\tswitch k {\n\tcase reflect.Struct:\n\t\tif v := t.NumField(); v != len(tuple.Elems) {\n\t\t\treturn unmarshalErrorf(\"can not unmarshal tuple into struct %v, not enough fields have %d need %d\", t, v, len(tuple.Elems))\n\t\t}\n\n\t\tfor i, elem := range tuple.Elems {\n\t\t\tvar p []byte\n\t\t\tif len(data) >= 4 {\n\t\t\t\tp, data = readBytes(data)\n\t\t\t}\n\n\t\t\tv, err := elem.NewWithError()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := Unmarshal(elem, p, v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tswitch rv.Field(i).Kind() {\n\t\t\tcase reflect.Ptr:\n\t\t\t\tif p != nil {\n\t\t\t\t\trv.Field(i).Set(reflect.ValueOf(v))\n\t\t\t\t} else {\n\t\t\t\t\trv.Field(i).Set(reflect.Zero(reflect.TypeOf(v)))\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\trv.Field(i).Set(reflect.ValueOf(v).Elem())\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\tcase reflect.Slice, reflect.Array:\n\t\tif k == reflect.Array {\n\t\t\tsize := rv.Len()\n\t\t\tif size != len(tuple.Elems) {\n\t\t\t\treturn unmarshalErrorf(\"can not unmarshal tuple into array of length %d need %d elements\", size, len(tuple.Elems))\n\t\t\t}\n\t\t} else {\n\t\t\trv.Set(reflect.MakeSlice(t, len(tuple.Elems), len(tuple.Elems)))\n\t\t}\n\n\t\tfor i, elem := range tuple.Elems {\n\t\t\tvar p []byte\n\t\t\tif len(data) >= 4 {\n\t\t\t\tp, data = readBytes(data)\n\t\t\t}\n\n\t\t\tv, err := elem.NewWithError()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := Unmarshal(elem, p, v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tswitch rv.Index(i).Kind() {\n\t\t\tcase reflect.Ptr:\n\t\t\t\tif p != nil {\n\t\t\t\t\trv.Index(i).Set(reflect.ValueOf(v))\n\t\t\t\t} else {\n\t\t\t\t\trv.Index(i).Set(reflect.Zero(reflect.TypeOf(v)))\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\trv.Index(i).Set(reflect.ValueOf(v).Elem())\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn unmarshalErrorf(\"cannot unmarshal %s into %T\", info, value)\n}\n\n// UDTMarshaler is an interface which should be implemented by users wishing to\n// handle encoding UDT types to sent to Cassandra. Note: due to current implentations\n// methods defined for this interface must be value receivers not pointer receivers.\ntype UDTMarshaler interface {\n\t// MarshalUDT will be called for each field in the the UDT returned by Cassandra,\n\t// the implementor should marshal the type to return by for example calling\n\t// Marshal.\n\tMarshalUDT(name string, info TypeInfo) ([]byte, error)\n}\n\n// UDTUnmarshaler should be implemented by users wanting to implement custom\n// UDT unmarshaling.\ntype UDTUnmarshaler interface {\n\t// UnmarshalUDT will be called for each field in the UDT return by Cassandra,\n\t// the implementor should unmarshal the data into the value of their chosing,\n\t// for example by calling Unmarshal.\n\t//\n\t// The data []byte slice is only valid for the duration of the call.\n\t// The backing memory may be reused after the call returns.\n\t// Implementations that need to retain data must copy it.\n\tUnmarshalUDT(name string, info TypeInfo, data []byte) error\n}\n\nfunc marshalUDT(info TypeInfo, value any) ([]byte, error) {\n\tudt := info.(UDTTypeInfo)\n\n\tswitch v := value.(type) {\n\tcase Marshaler:\n\t\treturn v.MarshalCQL(info)\n\tcase unsetColumn:\n\t\treturn nil, unmarshalErrorf(\"invalid request: UnsetValue is unsupported for user defined types\")\n\tcase UDTMarshaler:\n\t\tvar buf []byte\n\t\tfor _, e := range udt.Elements {\n\t\t\tdata, err := v.MarshalUDT(e.Name, e.Type)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tbuf = appendBytes(buf, data)\n\t\t}\n\n\t\treturn buf, nil\n\tcase map[string]any:\n\t\tvar buf []byte\n\t\tfor _, e := range udt.Elements {\n\t\t\tval, ok := v[e.Name]\n\t\t\tvar data []byte\n\n\t\t\tif ok {\n\t\t\t\tvar err error\n\t\t\t\tdata, err = Marshal(e.Type, val)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbuf = appendBytes(buf, data)\n\t\t}\n\n\t\treturn buf, nil\n\t}\n\n\tk := reflect.ValueOf(value)\n\tif k.Kind() == reflect.Ptr {\n\t\tif k.IsNil() {\n\t\t\treturn nil, marshalErrorf(\"cannot marshal %T into %s\", value, info)\n\t\t}\n\t\tk = k.Elem()\n\t}\n\n\tif k.Kind() != reflect.Struct || !k.IsValid() {\n\t\treturn nil, marshalErrorf(\"cannot marshal %T into %s\", value, info)\n\t}\n\n\tfields := make(map[string]reflect.Value)\n\tt := reflect.TypeOf(value)\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tsf := t.Field(i)\n\n\t\tif tag := sf.Tag.Get(\"cql\"); tag != \"\" {\n\t\t\tfields[tag] = k.Field(i)\n\t\t}\n\t}\n\n\tvar buf []byte\n\tfor _, e := range udt.Elements {\n\t\tf, ok := fields[e.Name]\n\t\tif !ok {\n\t\t\tf = k.FieldByName(e.Name)\n\t\t}\n\n\t\tvar data []byte\n\t\tif f.IsValid() && f.CanInterface() {\n\t\t\tvar err error\n\t\t\tdata, err = Marshal(e.Type, f.Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tbuf = appendBytes(buf, data)\n\t}\n\n\treturn buf, nil\n}\n\nfunc unmarshalUDT(info TypeInfo, data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase Unmarshaler:\n\t\treturn v.UnmarshalCQL(info, data)\n\tcase UDTUnmarshaler:\n\t\tudt := info.(UDTTypeInfo)\n\n\t\tfor id, e := range udt.Elements {\n\t\t\tif len(data) == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif len(data) < 4 {\n\t\t\t\treturn unmarshalErrorf(\"can not unmarshal %s: field [%d]%s: unexpected eof\", info, id, e.Name)\n\t\t\t}\n\n\t\t\tvar p []byte\n\t\t\tp, data = readBytes(data)\n\t\t\tif err := v.UnmarshalUDT(e.Name, e.Type, p); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\tcase *map[string]any:\n\t\tudt := info.(UDTTypeInfo)\n\n\t\trv := reflect.ValueOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn unmarshalErrorf(\"can not unmarshal into non-pointer %T\", value)\n\t\t}\n\n\t\trv = rv.Elem()\n\t\tt := rv.Type()\n\t\tif t.Kind() != reflect.Map {\n\t\t\treturn unmarshalErrorf(\"can not unmarshal %s into %T\", info, value)\n\t\t} else if data == nil {\n\t\t\trv.Set(reflect.Zero(t))\n\t\t\treturn nil\n\t\t}\n\n\t\trv.Set(reflect.MakeMap(t))\n\t\tm := *v\n\n\t\tfor id, e := range udt.Elements {\n\t\t\tif len(data) == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif len(data) < 4 {\n\t\t\t\treturn unmarshalErrorf(\"can not unmarshal %s: field [%d]%s: unexpected eof\", info, id, e.Name)\n\t\t\t}\n\n\t\t\tvalType, err := goType(e.Type)\n\t\t\tif err != nil {\n\t\t\t\treturn unmarshalErrorf(\"can not unmarshal %s: %v\", info, err)\n\t\t\t}\n\n\t\t\tval := reflect.New(valType)\n\n\t\t\tvar p []byte\n\t\t\tp, data = readBytes(data)\n\n\t\t\tif err := Unmarshal(e.Type, p, val.Interface()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tm[e.Name] = val.Elem().Interface()\n\t\t}\n\n\t\treturn nil\n\t}\n\n\trv := reflect.ValueOf(value)\n\tif rv.Kind() != reflect.Ptr {\n\t\treturn unmarshalErrorf(\"can not unmarshal into non-pointer %T\", value)\n\t}\n\tk := rv.Elem()\n\tif k.Kind() != reflect.Struct || !k.IsValid() {\n\t\treturn unmarshalErrorf(\"cannot unmarshal %s into %T\", info, value)\n\t}\n\n\tif len(data) == 0 {\n\t\tif k.CanSet() {\n\t\t\tk.Set(reflect.Zero(k.Type()))\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tt := k.Type()\n\tfields := make(map[string]reflect.Value, t.NumField())\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tsf := t.Field(i)\n\n\t\tif tag := sf.Tag.Get(\"cql\"); tag != \"\" {\n\t\t\tfields[tag] = k.Field(i)\n\t\t}\n\t}\n\n\tudt := info.(UDTTypeInfo)\n\tfor id, e := range udt.Elements {\n\t\tif len(data) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tif len(data) < 4 {\n\t\t\t// UDT def does not match the column value\n\t\t\treturn unmarshalErrorf(\"can not unmarshal %s: field [%d]%s: unexpected eof\", info, id, e.Name)\n\t\t}\n\n\t\tvar p []byte\n\t\tp, data = readBytes(data)\n\n\t\tf, ok := fields[e.Name]\n\t\tif !ok {\n\t\t\tf = k.FieldByName(e.Name)\n\t\t\tif f == emptyValue { //nolint:govet // no other way to do that\n\t\t\t\t// skip fields which exist in the UDT but not in\n\t\t\t\t// the struct passed in\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif !f.IsValid() || !f.CanAddr() {\n\t\t\treturn unmarshalErrorf(\"cannot unmarshal %s into %T: field %v is not valid\", info, value, e.Name)\n\t\t}\n\n\t\tfk := f.Addr().Interface()\n\t\tif err := Unmarshal(e.Type, p, fk); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// TypeInfo describes a Cassandra specific data type.\ntype TypeInfo interface {\n\tType() Type\n\tVersion() byte\n\tCustom() string\n\n\t// NewWithError creates a pointer to an empty version of whatever type\n\t// is referenced by the TypeInfo receiver.\n\t//\n\t// If there is no corresponding Go type for the CQL type, NewWithError returns an error.\n\tNewWithError() (any, error)\n}\n\ntype NativeType struct {\n\t//only used for TypeCustom\n\tcustom string\n\ttyp    Type\n\tproto  byte\n}\n\nfunc NewNativeType(proto byte, typ Type) NativeType {\n\treturn NativeType{proto: proto, typ: typ, custom: \"\"}\n}\n\nfunc NewCustomType(proto byte, typ Type, custom string) NativeType {\n\treturn NativeType{proto: proto, typ: typ, custom: custom}\n}\n\nfunc (t NativeType) NewWithError() (any, error) {\n\t// Fast path for common types to avoid reflection overhead\n\tswitch t.typ {\n\tcase TypeInt:\n\t\treturn new(int), nil\n\tcase TypeBigInt, TypeCounter:\n\t\treturn new(int64), nil\n\tcase TypeVarchar, TypeAscii, TypeText, TypeInet:\n\t\treturn new(string), nil\n\tcase TypeBoolean:\n\t\treturn new(bool), nil\n\tcase TypeFloat:\n\t\treturn new(float32), nil\n\tcase TypeDouble:\n\t\treturn new(float64), nil\n\tcase TypeTimestamp, TypeDate:\n\t\treturn new(time.Time), nil\n\tcase TypeUUID, TypeTimeUUID:\n\t\treturn new(UUID), nil\n\tcase TypeBlob:\n\t\treturn new([]byte), nil\n\tcase TypeSmallInt:\n\t\treturn new(int16), nil\n\tcase TypeTinyInt:\n\t\treturn new(int8), nil\n\tcase TypeTime:\n\t\treturn new(time.Duration), nil\n\tcase TypeDecimal:\n\t\treturn new(*inf.Dec), nil\n\tcase TypeVarint:\n\t\treturn new(*big.Int), nil\n\tcase TypeDuration:\n\t\treturn new(Duration), nil\n\t}\n\n\t// Fallback to reflection for complex/custom types\n\ttyp, err := goType(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn reflect.New(typ).Interface(), nil\n}\n\nfunc (t NativeType) Type() Type {\n\treturn t.typ\n}\n\nfunc (t NativeType) Version() byte {\n\treturn t.proto\n}\n\nfunc (t NativeType) Custom() string {\n\treturn t.custom\n}\n\nfunc (t NativeType) String() string {\n\tswitch t.typ {\n\tcase TypeCustom:\n\t\treturn fmt.Sprintf(\"%s(%s)\", t.typ, t.custom)\n\tdefault:\n\t\treturn t.typ.String()\n\t}\n}\n\nfunc NewCollectionType(m NativeType, key, elem TypeInfo) CollectionType {\n\treturn CollectionType{\n\t\tNativeType: m,\n\t\tKey:        key,\n\t\tElem:       elem,\n\t}\n}\n\ntype CollectionType struct {\n\t// Key is used only for TypeMap\n\tKey TypeInfo\n\t// Elem is used for TypeMap, TypeList and TypeSet\n\tElem TypeInfo\n\tNativeType\n}\n\ntype VectorType struct {\n\tSubType TypeInfo\n\tNativeType\n\tDimensions int\n}\n\n// Zero returns the zero value for the vector CQL type.\nfunc (v VectorType) Zero() any {\n\tt, e := v.SubType.NewWithError()\n\tif e != nil {\n\t\treturn nil\n\t}\n\treturn reflect.Zero(reflect.SliceOf(reflect.TypeOf(t))).Interface()\n}\n\nfunc (t CollectionType) NewWithError() (any, error) {\n\t// Fast path for common collection patterns\n\tswitch t.typ {\n\tcase TypeList, TypeSet:\n\t\t// Fast path for lists/sets of primitive types\n\t\tif nt, ok := t.Elem.(NativeType); ok {\n\t\t\tswitch nt.typ {\n\t\t\tcase TypeInt:\n\t\t\t\treturn new([]int), nil\n\t\t\tcase TypeBigInt, TypeCounter:\n\t\t\t\treturn new([]int64), nil\n\t\t\tcase TypeText, TypeVarchar, TypeAscii:\n\t\t\t\treturn new([]string), nil\n\t\t\tcase TypeBoolean:\n\t\t\t\treturn new([]bool), nil\n\t\t\tcase TypeFloat:\n\t\t\t\treturn new([]float32), nil\n\t\t\tcase TypeDouble:\n\t\t\t\treturn new([]float64), nil\n\t\t\tcase TypeUUID, TypeTimeUUID:\n\t\t\t\treturn new([]UUID), nil\n\t\t\tcase TypeTimestamp, TypeDate:\n\t\t\t\treturn new([]time.Time), nil\n\t\t\tcase TypeSmallInt:\n\t\t\t\treturn new([]int16), nil\n\t\t\tcase TypeTinyInt:\n\t\t\t\treturn new([]int8), nil\n\t\t\tcase TypeBlob:\n\t\t\t\treturn new([][]byte), nil\n\t\t\t}\n\t\t}\n\tcase TypeMap:\n\t\t// Fast path for maps with primitive key/value types\n\t\tif keyNT, keyOk := t.Key.(NativeType); keyOk {\n\t\t\tif valNT, valOk := t.Elem.(NativeType); valOk {\n\t\t\t\t// String keys are most common\n\t\t\t\tif keyNT.typ == TypeText || keyNT.typ == TypeVarchar {\n\t\t\t\t\tswitch valNT.typ {\n\t\t\t\t\tcase TypeInt:\n\t\t\t\t\t\treturn new(map[string]int), nil\n\t\t\t\t\tcase TypeBigInt:\n\t\t\t\t\t\treturn new(map[string]int64), nil\n\t\t\t\t\tcase TypeText, TypeVarchar:\n\t\t\t\t\t\treturn new(map[string]string), nil\n\t\t\t\t\tcase TypeBoolean:\n\t\t\t\t\t\treturn new(map[string]bool), nil\n\t\t\t\t\tcase TypeFloat:\n\t\t\t\t\t\treturn new(map[string]float32), nil\n\t\t\t\t\tcase TypeDouble:\n\t\t\t\t\t\treturn new(map[string]float64), nil\n\t\t\t\t\tcase TypeUUID:\n\t\t\t\t\t\treturn new(map[string]UUID), nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Int keys\n\t\t\t\tif keyNT.typ == TypeInt {\n\t\t\t\t\tswitch valNT.typ {\n\t\t\t\t\tcase TypeText, TypeVarchar:\n\t\t\t\t\t\treturn new(map[int]string), nil\n\t\t\t\t\tcase TypeInt:\n\t\t\t\t\t\treturn new(map[int]int), nil\n\t\t\t\t\tcase TypeFloat:\n\t\t\t\t\t\treturn new(map[int]float32), nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Fallback to reflection for complex types\n\ttyp, err := goType(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn reflect.New(typ).Interface(), nil\n}\n\nfunc (t CollectionType) String() string {\n\tswitch t.typ {\n\tcase TypeMap:\n\t\treturn fmt.Sprintf(\"%s(%s, %s)\", t.typ, t.Key, t.Elem)\n\tcase TypeList, TypeSet:\n\t\treturn fmt.Sprintf(\"%s(%s)\", t.typ, t.Elem)\n\tcase TypeCustom:\n\t\treturn fmt.Sprintf(\"%s(%s)\", t.typ, t.custom)\n\tdefault:\n\t\treturn t.typ.String()\n\t}\n}\n\nfunc NewTupleType(n NativeType, elems ...TypeInfo) TupleTypeInfo {\n\treturn TupleTypeInfo{\n\t\tNativeType: n,\n\t\tElems:      elems,\n\t}\n}\n\ntype TupleTypeInfo struct {\n\tElems []TypeInfo\n\tNativeType\n}\n\nfunc (t TupleTypeInfo) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(fmt.Sprintf(\"%s(\", t.typ))\n\tfor _, elem := range t.Elems {\n\t\tbuf.WriteString(fmt.Sprintf(\"%s, \", elem))\n\t}\n\tbuf.Truncate(buf.Len() - 2)\n\tbuf.WriteByte(')')\n\treturn buf.String()\n}\n\nfunc (t TupleTypeInfo) NewWithError() (any, error) {\n\t// Tuples scan into *[]any — no reflection needed.\n\treturn new([]any), nil\n}\n\ntype UDTField struct {\n\tType TypeInfo\n\tName string\n}\n\nfunc NewUDTType(proto byte, name, keySpace string, elems ...UDTField) UDTTypeInfo {\n\treturn UDTTypeInfo{\n\t\tNativeType: NativeType{proto: proto, typ: TypeUDT, custom: \"\"},\n\t\tName:       name,\n\t\tKeySpace:   keySpace,\n\t\tElements:   elems,\n\t}\n}\n\ntype UDTTypeInfo struct {\n\tKeySpace string\n\tName     string\n\tElements []UDTField\n\tNativeType\n}\n\nfunc (t UDTTypeInfo) NewWithError() (any, error) {\n\ttyp, err := goType(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn reflect.New(typ).Interface(), nil\n}\n\nfunc (t UDTTypeInfo) String() string {\n\tbuf := &bytes.Buffer{}\n\n\tfmt.Fprintf(buf, \"%s.%s{\", t.KeySpace, t.Name)\n\tfirst := true\n\tfor _, e := range t.Elements {\n\t\tif !first {\n\t\t\tfmt.Fprint(buf, \",\")\n\t\t} else {\n\t\t\tfirst = false\n\t\t}\n\n\t\tfmt.Fprintf(buf, \"%s=%v\", e.Name, e.Type)\n\t}\n\tfmt.Fprint(buf, \"}\")\n\n\treturn buf.String()\n}\n\n// String returns a human readable name for the Cassandra datatype\n// described by t.\n// Type is the identifier of a Cassandra internal datatype.\ntype Type int\n\nconst (\n\tTypeCustom    Type = 0x0000\n\tTypeAscii     Type = 0x0001\n\tTypeBigInt    Type = 0x0002\n\tTypeBlob      Type = 0x0003\n\tTypeBoolean   Type = 0x0004\n\tTypeCounter   Type = 0x0005\n\tTypeDecimal   Type = 0x0006\n\tTypeDouble    Type = 0x0007\n\tTypeFloat     Type = 0x0008\n\tTypeInt       Type = 0x0009\n\tTypeText      Type = 0x000A\n\tTypeTimestamp Type = 0x000B\n\tTypeUUID      Type = 0x000C\n\tTypeVarchar   Type = 0x000D\n\tTypeVarint    Type = 0x000E\n\tTypeTimeUUID  Type = 0x000F\n\tTypeInet      Type = 0x0010\n\tTypeDate      Type = 0x0011\n\tTypeTime      Type = 0x0012\n\tTypeSmallInt  Type = 0x0013\n\tTypeTinyInt   Type = 0x0014\n\tTypeDuration  Type = 0x0015\n\tTypeList      Type = 0x0020\n\tTypeMap       Type = 0x0021\n\tTypeSet       Type = 0x0022\n\tTypeUDT       Type = 0x0030\n\tTypeTuple     Type = 0x0031\n)\n\n// String returns the name of the identifier.\nfunc (t Type) String() string {\n\tswitch t {\n\tcase TypeCustom:\n\t\treturn \"custom\"\n\tcase TypeAscii:\n\t\treturn \"ascii\"\n\tcase TypeBigInt:\n\t\treturn \"bigint\"\n\tcase TypeBlob:\n\t\treturn \"blob\"\n\tcase TypeBoolean:\n\t\treturn \"boolean\"\n\tcase TypeCounter:\n\t\treturn \"counter\"\n\tcase TypeDecimal:\n\t\treturn \"decimal\"\n\tcase TypeDouble:\n\t\treturn \"double\"\n\tcase TypeFloat:\n\t\treturn \"float\"\n\tcase TypeInt:\n\t\treturn \"int\"\n\tcase TypeText:\n\t\treturn \"text\"\n\tcase TypeTimestamp:\n\t\treturn \"timestamp\"\n\tcase TypeUUID:\n\t\treturn \"uuid\"\n\tcase TypeVarchar:\n\t\treturn \"varchar\"\n\tcase TypeTimeUUID:\n\t\treturn \"timeuuid\"\n\tcase TypeInet:\n\t\treturn \"inet\"\n\tcase TypeDate:\n\t\treturn \"date\"\n\tcase TypeDuration:\n\t\treturn \"duration\"\n\tcase TypeTime:\n\t\treturn \"time\"\n\tcase TypeSmallInt:\n\t\treturn \"smallint\"\n\tcase TypeTinyInt:\n\t\treturn \"tinyint\"\n\tcase TypeList:\n\t\treturn \"list\"\n\tcase TypeMap:\n\t\treturn \"map\"\n\tcase TypeSet:\n\t\treturn \"set\"\n\tcase TypeVarint:\n\t\treturn \"varint\"\n\tcase TypeTuple:\n\t\treturn \"tuple\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown_type_%d\", t)\n\t}\n}\n\ntype MarshalError struct {\n\tcause error\n\tmsg   string\n}\n\nfunc (m MarshalError) Error() string {\n\tif m.cause != nil {\n\t\treturn m.msg + \": \" + m.cause.Error()\n\t}\n\treturn m.msg\n}\n\nfunc (m MarshalError) Cause() error { return m.cause }\n\nfunc (m MarshalError) Unwrap() error {\n\treturn m.cause\n}\n\nfunc marshalErrorf(format string, args ...any) MarshalError {\n\treturn MarshalError{msg: fmt.Sprintf(format, args...)}\n}\n\nfunc wrapMarshalError(err error, msg string) MarshalError {\n\treturn MarshalError{msg: msg, cause: err}\n}\n\nfunc wrapMarshalErrorf(err error, format string, a ...any) MarshalError {\n\treturn MarshalError{msg: fmt.Sprintf(format, a...), cause: err}\n}\n\ntype UnmarshalError struct {\n\tcause error\n\tmsg   string\n}\n\nfunc (m UnmarshalError) Error() string {\n\tif m.cause != nil {\n\t\treturn m.msg + \": \" + m.cause.Error()\n\t}\n\treturn m.msg\n}\n\nfunc (m UnmarshalError) Cause() error { return m.cause }\n\nfunc (m UnmarshalError) Unwrap() error {\n\treturn m.cause\n}\n\nfunc unmarshalErrorf(format string, args ...any) UnmarshalError {\n\treturn UnmarshalError{msg: fmt.Sprintf(format, args...)}\n}\n\nfunc wrapUnmarshalError(err error, msg string) UnmarshalError {\n\treturn UnmarshalError{msg: msg, cause: err}\n}\n\nfunc wrapUnmarshalErrorf(err error, format string, a ...any) UnmarshalError {\n\treturn UnmarshalError{msg: fmt.Sprintf(format, a...), cause: err}\n}\n"
  },
  {
    "path": "marshal_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"net\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"gopkg.in/inf.v0\"\n)\n\ntype AliasInt int\ntype AliasUint uint\ntype AliasUint8 uint8\ntype AliasUint16 uint16\ntype AliasUint32 uint32\ntype AliasUint64 uint64\n\nvar marshalTests = []struct {\n\tInfo           TypeInfo\n\tData           []byte\n\tValue          any\n\tMarshalError   error\n\tUnmarshalError error\n}{\n\t{\n\t\tCollectionType{\n\t\t\tNativeType: NativeType{proto: protoVersion3, typ: TypeList},\n\t\t\tElem:       NativeType{proto: protoVersion3, typ: TypeInt},\n\t\t},\n\t\t[]byte(\"\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x02\"),\n\t\tfunc() *[]int {\n\t\t\tl := []int{1, 2}\n\t\t\treturn &l\n\t\t}(),\n\t\tnil,\n\t\tnil,\n\t},\n}\n\nvar unmarshalTests = []struct {\n\tInfo           TypeInfo\n\tData           []byte\n\tValue          any\n\tUnmarshalError error\n}{\n\t{\n\t\tCollectionType{\n\t\t\tNativeType: NativeType{proto: protoVersion3, typ: TypeList},\n\t\t\tElem:       NativeType{proto: protoVersion3, typ: TypeInt},\n\t\t},\n\t\t[]byte(\"\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\x00\\x00\"), // truncated data\n\t\tfunc() *[]int {\n\t\t\tl := []int{1, 2}\n\t\t\treturn &l\n\t\t}(),\n\t\tunmarshalErrorf(\"unmarshal list: unexpected eof\"),\n\t},\n}\n\nfunc decimalize(s string) *inf.Dec {\n\ti, _ := new(inf.Dec).SetString(s)\n\treturn i\n}\n\nfunc bigintize(s string) *big.Int {\n\ti, _ := new(big.Int).SetString(s, 10)\n\treturn i\n}\n\nfunc TestMarshal_Encode(t *testing.T) {\n\tt.Parallel()\n\n\tfor i, test := range marshalTests {\n\t\tif test.MarshalError == nil {\n\t\t\tdata, err := Marshal(test.Info, test.Value)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"marshalTest[%d]: %v\", i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !bytes.Equal(data, test.Data) {\n\t\t\t\tt.Errorf(\"marshalTest[%d]: expected %q, got %q (%#v)\", i, test.Data, data, test.Value)\n\t\t\t}\n\t\t} else {\n\t\t\tif _, err := Marshal(test.Info, test.Value); err != test.MarshalError {\n\t\t\t\tt.Errorf(\"unmarshalTest[%d] (%v=>%t): %#v returned error %#v, want %#v.\", i, test.Info, test.Value, test.Value, err, test.MarshalError)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestMarshal_Decode(t *testing.T) {\n\tt.Parallel()\n\n\tfor i, test := range marshalTests {\n\t\tif test.UnmarshalError == nil {\n\t\t\tv := reflect.New(reflect.TypeOf(test.Value))\n\t\t\terr := Unmarshal(test.Info, test.Data, v.Interface())\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"unmarshalTest[%d] (%v=>%T): %v\", i, test.Info, test.Value, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(v.Elem().Interface(), test.Value) {\n\t\t\t\tt.Errorf(\"unmarshalTest[%d] (%v=>%T): expected %#v, got %#v.\", i, test.Info, test.Value, test.Value, v.Elem().Interface())\n\t\t\t}\n\t\t} else {\n\t\t\tif err := Unmarshal(test.Info, test.Data, test.Value); err != test.UnmarshalError {\n\t\t\t\tt.Errorf(\"unmarshalTest[%d] (%v=>%T): %#v returned error %#v, want %#v.\", i, test.Info, test.Value, test.Value, err, test.UnmarshalError)\n\t\t\t}\n\t\t}\n\t}\n\tfor i, test := range unmarshalTests {\n\t\tv := reflect.New(reflect.TypeOf(test.Value))\n\t\tif test.UnmarshalError == nil {\n\t\t\terr := Unmarshal(test.Info, test.Data, v.Interface())\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"unmarshalTest[%d] (%v=>%T): %v\", i, test.Info, test.Value, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(v.Elem().Interface(), test.Value) {\n\t\t\t\tt.Errorf(\"unmarshalTest[%d] (%v=>%T): expected %#v, got %#v.\", i, test.Info, test.Value, test.Value, v.Elem().Interface())\n\t\t\t}\n\t\t} else {\n\t\t\tif err := Unmarshal(test.Info, test.Data, v.Interface()); err != test.UnmarshalError {\n\t\t\t\tt.Errorf(\"unmarshalTest[%d] (%v=>%T): %#v returned error %#v, want %#v.\", i, test.Info, test.Value, test.Value, err, test.UnmarshalError)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc equalStringPointerSlice(leftList, rightList []*string) bool {\n\tif len(leftList) != len(rightList) {\n\t\treturn false\n\t}\n\tfor index := range leftList {\n\t\tif !reflect.DeepEqual(rightList[index], leftList[index]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc TestMarshalList(t *testing.T) {\n\tt.Parallel()\n\n\ttypeInfoV3 := CollectionType{\n\t\tNativeType: NativeType{proto: protoVersion3, typ: TypeList},\n\t\tElem:       NativeType{proto: protoVersion3, typ: TypeVarchar},\n\t}\n\n\ttype tc struct {\n\t\ttypeInfo CollectionType\n\t\tinput    []*string\n\t\texpected []*string\n\t}\n\n\tvalueA := \"valueA\"\n\tvalueB := \"valueB\"\n\tvalueEmpty := \"\"\n\ttestCases := []tc{\n\t\t{\n\t\t\ttypeInfo: typeInfoV3,\n\t\t\tinput:    []*string{&valueEmpty},\n\t\t\texpected: []*string{&valueEmpty},\n\t\t},\n\t\t{\n\t\t\ttypeInfo: typeInfoV3,\n\t\t\tinput:    []*string{nil},\n\t\t\texpected: []*string{nil},\n\t\t},\n\t\t{\n\t\t\ttypeInfo: typeInfoV3,\n\t\t\tinput:    []*string{&valueA, nil, &valueB},\n\t\t\texpected: []*string{&valueA, nil, &valueB},\n\t\t},\n\t}\n\n\tlistDatas := [][]byte{}\n\tfor _, c := range testCases {\n\t\tlistData, marshalErr := Marshal(c.typeInfo, c.input)\n\t\tif nil != marshalErr {\n\t\t\tt.Errorf(\"Error marshal %+v of type %+v: %s\", c.input, c.typeInfo, marshalErr)\n\t\t}\n\t\tlistDatas = append(listDatas, listData)\n\t}\n\n\toutputLists := [][]*string{}\n\n\tvar outputList []*string\n\n\tfor i, listData := range listDatas {\n\t\tif unmarshalErr := Unmarshal(testCases[i].typeInfo, listData, &outputList); nil != unmarshalErr {\n\t\t\tt.Error(unmarshalErr)\n\t\t}\n\t\tresultList := []any{}\n\t\tfor i := range outputList {\n\t\t\tif outputList[i] != nil {\n\t\t\t\tresultList = append(resultList, *outputList[i])\n\t\t\t} else {\n\t\t\t\tresultList = append(resultList, nil)\n\t\t\t}\n\t\t}\n\t\toutputLists = append(outputLists, outputList)\n\t}\n\n\tfor index, c := range testCases {\n\t\toutputList := outputLists[index]\n\t\tif !equalStringPointerSlice(c.expected, outputList) {\n\t\t\tt.Errorf(\"Lists %+v not equal to lists %+v, but should\", c.expected, outputList)\n\t\t}\n\t}\n}\n\ntype CustomString string\n\nfunc (c CustomString) MarshalCQL(info TypeInfo) ([]byte, error) {\n\treturn []byte(strings.ToUpper(string(c))), nil\n}\nfunc (c *CustomString) UnmarshalCQL(info TypeInfo, data []byte) error {\n\t*c = CustomString(strings.ToLower(string(data)))\n\treturn nil\n}\n\ntype MyString string\n\nvar typeLookupTest = []struct {\n\tTypeName     string\n\tExpectedType Type\n}{\n\t{\"AsciiType\", TypeAscii},\n\t{\"LongType\", TypeBigInt},\n\t{\"BytesType\", TypeBlob},\n\t{\"BooleanType\", TypeBoolean},\n\t{\"CounterColumnType\", TypeCounter},\n\t{\"DecimalType\", TypeDecimal},\n\t{\"DoubleType\", TypeDouble},\n\t{\"FloatType\", TypeFloat},\n\t{\"Int32Type\", TypeInt},\n\t{\"DateType\", TypeTimestamp},\n\t{\"TimestampType\", TypeTimestamp},\n\t{\"UUIDType\", TypeUUID},\n\t{\"UTF8Type\", TypeVarchar},\n\t{\"IntegerType\", TypeVarint},\n\t{\"TimeUUIDType\", TypeTimeUUID},\n\t{\"InetAddressType\", TypeInet},\n\t{\"MapType\", TypeMap},\n\t{\"ListType\", TypeList},\n\t{\"SetType\", TypeSet},\n\t{\"unknown\", TypeCustom},\n\t{\"ShortType\", TypeSmallInt},\n\t{\"ByteType\", TypeTinyInt},\n}\n\nfunc testType(t *testing.T, cassType string, expectedType Type) {\n\tif computedType := getApacheCassandraType(apacheCassandraTypePrefix + cassType); computedType != expectedType {\n\t\tt.Errorf(\"Cassandra custom type lookup for %s failed. Expected %s, got %s.\", cassType, expectedType.String(), computedType.String())\n\t}\n}\n\nfunc TestLookupCassType(t *testing.T) {\n\tt.Parallel()\n\n\tfor _, lookupTest := range typeLookupTest {\n\t\ttestType(t, lookupTest.TypeName, lookupTest.ExpectedType)\n\t}\n}\n\ntype MyPointerMarshaler struct{}\n\nfunc (m *MyPointerMarshaler) MarshalCQL(_ TypeInfo) ([]byte, error) {\n\treturn []byte{42}, nil\n}\n\nfunc TestMarshalTuple(t *testing.T) {\n\tt.Parallel()\n\n\tinfo := TupleTypeInfo{\n\t\tNativeType: NativeType{proto: protoVersion3, typ: TypeTuple},\n\t\tElems: []TypeInfo{\n\t\t\tNativeType{proto: protoVersion3, typ: TypeVarchar},\n\t\t\tNativeType{proto: protoVersion3, typ: TypeVarchar},\n\t\t},\n\t}\n\n\tstringToPtr := func(s string) *string { return &s }\n\tcheckString := func(t *testing.T, exp string, got string) {\n\t\tif got != exp {\n\t\t\tt.Errorf(\"expected string to be %v, got %v\", exp, got)\n\t\t}\n\t}\n\n\ttype tupleStruct struct {\n\t\tA string\n\t\tB *string\n\t}\n\tvar (\n\t\ts1 *string\n\t\ts2 *string\n\t)\n\n\ttestCases := []struct {\n\t\tname       string\n\t\texpected   []byte\n\t\tvalue      any\n\t\tcheckValue any\n\t\tcheck      func(*testing.T, any)\n\t}{\n\t\t{\n\t\t\tname:       \"interface-slice:two-strings\",\n\t\t\texpected:   []byte(\"\\x00\\x00\\x00\\x03foo\\x00\\x00\\x00\\x03bar\"),\n\t\t\tvalue:      []any{\"foo\", \"bar\"},\n\t\t\tcheckValue: []any{&s1, &s2},\n\t\t\tcheck: func(t *testing.T, v any) {\n\t\t\t\tcheckString(t, \"foo\", *s1)\n\t\t\t\tcheckString(t, \"bar\", *s2)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"interface-slice:one-string-one-nil-string\",\n\t\t\texpected:   []byte(\"\\x00\\x00\\x00\\x03foo\\xff\\xff\\xff\\xff\"),\n\t\t\tvalue:      []any{\"foo\", nil},\n\t\t\tcheckValue: []any{&s1, &s2},\n\t\t\tcheck: func(t *testing.T, v any) {\n\t\t\t\tcheckString(t, \"foo\", *s1)\n\t\t\t\tif s2 != nil {\n\t\t\t\t\tt.Errorf(\"expected string to be nil, got %v\", *s2)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:     \"struct:two-strings\",\n\t\t\texpected: []byte(\"\\x00\\x00\\x00\\x03foo\\x00\\x00\\x00\\x03bar\"),\n\t\t\tvalue: tupleStruct{\n\t\t\t\tA: \"foo\",\n\t\t\t\tB: stringToPtr(\"bar\"),\n\t\t\t},\n\t\t\tcheckValue: &tupleStruct{},\n\t\t\tcheck: func(t *testing.T, v any) {\n\t\t\t\tgot := v.(*tupleStruct)\n\t\t\t\tif got.A != \"foo\" {\n\t\t\t\t\tt.Errorf(\"expected A string to be %v, got %v\", \"foo\", got.A)\n\t\t\t\t}\n\t\t\t\tif got.B == nil {\n\t\t\t\t\tt.Errorf(\"expected B string to be %v, got nil\", \"bar\")\n\t\t\t\t}\n\t\t\t\tif *got.B != \"bar\" {\n\t\t\t\t\tt.Errorf(\"expected B string to be %v, got %v\", \"bar\", got.B)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"struct:one-string-one-nil-string\",\n\t\t\texpected:   []byte(\"\\x00\\x00\\x00\\x03foo\\xff\\xff\\xff\\xff\"),\n\t\t\tvalue:      tupleStruct{A: \"foo\", B: nil},\n\t\t\tcheckValue: &tupleStruct{},\n\t\t\tcheck: func(t *testing.T, v any) {\n\t\t\t\tgot := v.(*tupleStruct)\n\t\t\t\tif got.A != \"foo\" {\n\t\t\t\t\tt.Errorf(\"expected A string to be %v, got %v\", \"foo\", got.A)\n\t\t\t\t}\n\t\t\t\tif got.B != nil {\n\t\t\t\t\tt.Errorf(\"expected B string to be nil, got %v\", *got.B)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:     \"arrayslice:two-strings\",\n\t\t\texpected: []byte(\"\\x00\\x00\\x00\\x03foo\\x00\\x00\\x00\\x03bar\"),\n\t\t\tvalue: [2]*string{\n\t\t\t\tstringToPtr(\"foo\"),\n\t\t\t\tstringToPtr(\"bar\"),\n\t\t\t},\n\t\t\tcheckValue: &[2]*string{},\n\t\t\tcheck: func(t *testing.T, v any) {\n\t\t\t\tgot := v.(*[2]*string)\n\t\t\t\tcheckString(t, \"foo\", *(got[0]))\n\t\t\t\tcheckString(t, \"bar\", *(got[1]))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:     \"arrayslice:one-string-one-nil-string\",\n\t\t\texpected: []byte(\"\\x00\\x00\\x00\\x03foo\\xff\\xff\\xff\\xff\"),\n\t\t\tvalue: [2]*string{\n\t\t\t\tstringToPtr(\"foo\"),\n\t\t\t\tnil,\n\t\t\t},\n\t\t\tcheckValue: &[2]*string{},\n\t\t\tcheck: func(t *testing.T, v any) {\n\t\t\t\tgot := v.(*[2]*string)\n\t\t\t\tcheckString(t, \"foo\", *(got[0]))\n\t\t\t\tif got[1] != nil {\n\t\t\t\t\tt.Errorf(\"expected string to be nil, got %v\", *got[1])\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tdata, err := Marshal(info, tc.value)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"marshalTest[%d]: %v\", i, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !bytes.Equal(data, tc.expected) {\n\t\t\t\tt.Errorf(\"marshalTest[%d]: expected %x, got %x\",\n\t\t\t\t\ti, tc.expected, data)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = Unmarshal(info, data, tc.checkValue)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"unmarshalTest[%d]: %v\", i, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttc.check(t, tc.checkValue)\n\t\t})\n\t}\n}\n\nfunc TestUnmarshalTuple(t *testing.T) {\n\tt.Parallel()\n\n\tinfo := TupleTypeInfo{\n\t\tNativeType: NativeType{proto: protoVersion3, typ: TypeTuple},\n\t\tElems: []TypeInfo{\n\t\t\tNativeType{proto: protoVersion3, typ: TypeVarchar},\n\t\t\tNativeType{proto: protoVersion3, typ: TypeVarchar},\n\t\t},\n\t}\n\n\t// As per the CQL spec, a tuple is a sequence of \"bytes\" values.\n\t// Here we encode a null value (length -1) and the \"foo\" string (length 3)\n\n\tdata := []byte(\"\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x03foo\")\n\n\tt.Run(\"struct-ptr\", func(t *testing.T) {\n\t\tvar tmp struct {\n\t\t\tA *string\n\t\t\tB *string\n\t\t}\n\n\t\terr := Unmarshal(info, data, &tmp)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unmarshalTest: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif tmp.A != nil || *tmp.B != \"foo\" {\n\t\t\tt.Errorf(\"unmarshalTest: expected [nil, foo], got [%v, %v]\", *tmp.A, *tmp.B)\n\t\t}\n\t})\n\tt.Run(\"struct-nonptr\", func(t *testing.T) {\n\t\tvar tmp struct {\n\t\t\tA string\n\t\t\tB string\n\t\t}\n\n\t\terr := Unmarshal(info, data, &tmp)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unmarshalTest: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif tmp.A != \"\" || tmp.B != \"foo\" {\n\t\t\tt.Errorf(\"unmarshalTest: expected [nil, foo], got [%v, %v]\", tmp.A, tmp.B)\n\t\t}\n\t})\n\n\tt.Run(\"array\", func(t *testing.T) {\n\t\tvar tmp [2]*string\n\n\t\terr := Unmarshal(info, data, &tmp)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unmarshalTest: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif tmp[0] != nil || *tmp[1] != \"foo\" {\n\t\t\tt.Errorf(\"unmarshalTest: expected [nil, foo], got [%v, %v]\", *tmp[0], *tmp[1])\n\t\t}\n\t})\n\tt.Run(\"array-nonptr\", func(t *testing.T) {\n\t\tvar tmp [2]string\n\n\t\terr := Unmarshal(info, data, &tmp)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unmarshalTest: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif tmp[0] != \"\" || tmp[1] != \"foo\" {\n\t\t\tt.Errorf(\"unmarshalTest: expected [nil, foo], got [%v, %v]\", tmp[0], tmp[1])\n\t\t}\n\t})\n}\n\nfunc TestMarshalUDTMap(t *testing.T) {\n\tt.Parallel()\n\n\ttypeInfo := UDTTypeInfo{\n\t\tKeySpace: \"\",\n\t\tName:     \"xyz\",\n\t\tElements: []UDTField{\n\t\t\t{Name: \"x\", Type: NativeType{proto: protoVersion3, typ: TypeInt}},\n\t\t\t{Name: \"y\", Type: NativeType{proto: protoVersion3, typ: TypeInt}},\n\t\t\t{Name: \"z\", Type: NativeType{proto: protoVersion3, typ: TypeInt}},\n\t\t},\n\t\tNativeType: NativeType{proto: protoVersion3, typ: TypeUDT},\n\t}\n\n\tt.Run(\"partially bound\", func(t *testing.T) {\n\t\tvalue := map[string]any{\n\t\t\t\"y\": 2,\n\t\t\t\"z\": 3,\n\t\t}\n\t\texpected := []byte(\"\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x03\")\n\n\t\tdata, err := Marshal(typeInfo, value)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"got error %#v\", err)\n\t\t}\n\t\tif !bytes.Equal(data, expected) {\n\t\t\tt.Errorf(\"got value %x\", data)\n\t\t}\n\t})\n\tt.Run(\"partially bound from the beginning\", func(t *testing.T) {\n\t\tvalue := map[string]any{\n\t\t\t\"x\": 1,\n\t\t\t\"y\": 2,\n\t\t}\n\t\texpected := []byte(\"\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x02\\xff\\xff\\xff\\xff\")\n\n\t\tdata, err := Marshal(typeInfo, value)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"got error %#v\", err)\n\t\t}\n\t\tif !bytes.Equal(data, expected) {\n\t\t\tt.Errorf(\"got value %x\", data)\n\t\t}\n\t})\n\tt.Run(\"fully bound\", func(t *testing.T) {\n\t\tvalue := map[string]any{\n\t\t\t\"x\": 1,\n\t\t\t\"y\": 2,\n\t\t\t\"z\": 3,\n\t\t}\n\t\texpected := []byte(\"\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x03\")\n\n\t\tdata, err := Marshal(typeInfo, value)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"got error %#v\", err)\n\t\t}\n\t\tif !bytes.Equal(data, expected) {\n\t\t\tt.Errorf(\"got value %x\", data)\n\t\t}\n\t})\n}\n\nfunc TestMarshalUDTStruct(t *testing.T) {\n\tt.Parallel()\n\n\ttypeInfo := UDTTypeInfo{\n\t\tKeySpace: \"\",\n\t\tName:     \"xyz\",\n\t\tElements: []UDTField{\n\t\t\t{Name: \"x\", Type: NativeType{proto: protoVersion3, typ: TypeInt}},\n\t\t\t{Name: \"y\", Type: NativeType{proto: protoVersion3, typ: TypeInt}},\n\t\t\t{Name: \"z\", Type: NativeType{proto: protoVersion3, typ: TypeInt}},\n\t\t},\n\t\tNativeType: NativeType{proto: protoVersion3, typ: TypeUDT},\n\t}\n\n\ttype xyzStruct struct {\n\t\tX int32 `cql:\"x\"`\n\t\tY int32 `cql:\"y\"`\n\t\tZ int32 `cql:\"z\"`\n\t}\n\ttype xyStruct struct {\n\t\tX int32 `cql:\"x\"`\n\t\tY int32 `cql:\"y\"`\n\t}\n\ttype yzStruct struct {\n\t\tY int32 `cql:\"y\"`\n\t\tZ int32 `cql:\"z\"`\n\t}\n\n\tt.Run(\"partially bound\", func(t *testing.T) {\n\t\tvalue := yzStruct{\n\t\t\tY: 2,\n\t\t\tZ: 3,\n\t\t}\n\t\texpected := []byte(\"\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x03\")\n\n\t\tdata, err := Marshal(typeInfo, value)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"got error %#v\", err)\n\t\t}\n\t\tif !bytes.Equal(data, expected) {\n\t\t\tt.Errorf(\"got value %x\", data)\n\t\t}\n\t})\n\tt.Run(\"partially bound from the beginning\", func(t *testing.T) {\n\t\tvalue := xyStruct{\n\t\t\tX: 1,\n\t\t\tY: 2,\n\t\t}\n\t\texpected := []byte(\"\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x02\\xff\\xff\\xff\\xff\")\n\n\t\tdata, err := Marshal(typeInfo, value)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"got error %#v\", err)\n\t\t}\n\t\tif !bytes.Equal(data, expected) {\n\t\t\tt.Errorf(\"got value %x\", data)\n\t\t}\n\t})\n\tt.Run(\"fully bound\", func(t *testing.T) {\n\t\tvalue := xyzStruct{\n\t\t\tX: 1,\n\t\t\tY: 2,\n\t\t\tZ: 3,\n\t\t}\n\t\texpected := []byte(\"\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x03\")\n\n\t\tdata, err := Marshal(typeInfo, value)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"got error %#v\", err)\n\t\t}\n\t\tif !bytes.Equal(data, expected) {\n\t\t\tt.Errorf(\"got value %x\", data)\n\t\t}\n\t})\n}\n\nfunc TestMarshalNil(t *testing.T) {\n\tt.Parallel()\n\n\ttypes := []Type{\n\t\tTypeAscii,\n\t\tTypeBlob,\n\t\tTypeBoolean,\n\t\tTypeBigInt,\n\t\tTypeCounter,\n\t\tTypeDecimal,\n\t\tTypeDouble,\n\t\tTypeFloat,\n\t\tTypeInt,\n\t\tTypeTimestamp,\n\t\tTypeUUID,\n\t\tTypeVarchar,\n\t\tTypeVarint,\n\t\tTypeTimeUUID,\n\t\tTypeInet,\n\t}\n\n\tfor _, typ := range types {\n\t\tdata, err := Marshal(NativeType{proto: protoVersion3, typ: typ}, nil)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unable to marshal nil %v: %v\\n\", typ, err)\n\t\t} else if data != nil {\n\t\t\tt.Errorf(\"expected to get nil byte for nil %v got % X\", typ, data)\n\t\t}\n\t}\n}\n\nfunc TestUnmarshalInetCopyBytes(t *testing.T) {\n\tt.Parallel()\n\n\tdata := []byte{127, 0, 0, 1}\n\tvar ip net.IP\n\tif err := unmarshalInet(data, &ip); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcopy(data, []byte{0xFF, 0xFF, 0xFF, 0xFF})\n\tip2 := net.IP(data)\n\tif !ip.Equal(net.IPv4(127, 0, 0, 1)) {\n\t\tt.Fatalf(\"IP memory shared with data: ip=%v ip2=%v\", ip, ip2)\n\t}\n}\n\nfunc BenchmarkUnmarshalVarchar(b *testing.B) {\n\tb.ReportAllocs()\n\tsrc := make([]byte, 1024)\n\tdst := make([]byte, len(src))\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tif err := unmarshalVarchar(src, &dst); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestReadCollectionSize(t *testing.T) {\n\tt.Parallel()\n\n\tlistV3 := CollectionType{\n\t\tNativeType: NativeType{proto: protoVersion3, typ: TypeList},\n\t\tElem:       NativeType{proto: protoVersion3, typ: TypeVarchar},\n\t}\n\n\ttests := []struct {\n\t\tname         string\n\t\tinfo         CollectionType\n\t\tdata         []byte\n\t\tisError      bool\n\t\texpectedSize int\n\t}{\n\t\t{\n\t\t\tname:    \"short read 0 proto 3\",\n\t\t\tinfo:    listV3,\n\t\t\tdata:    []byte{},\n\t\t\tisError: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"short read 1 proto 3\",\n\t\t\tinfo:    listV3,\n\t\t\tdata:    []byte{0x01},\n\t\t\tisError: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"short read 2 proto 3\",\n\t\t\tinfo:    listV3,\n\t\t\tdata:    []byte{0x01, 0x38},\n\t\t\tisError: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"short read 3 proto 3\",\n\t\t\tinfo:    listV3,\n\t\t\tdata:    []byte{0x01, 0x38, 0x42},\n\t\t\tisError: true,\n\t\t},\n\t\t{\n\t\t\tname:         \"good read proto 3\",\n\t\t\tinfo:         listV3,\n\t\t\tdata:         []byte{0x01, 0x38, 0x42, 0x22},\n\t\t\texpectedSize: 0x01384222,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tsize, _, err := readCollectionSize(test.info, test.data)\n\t\t\tif test.isError {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Fatal(\"Expected error, but it was nil\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Expected no error, got %v\", err)\n\t\t\t\t}\n\t\t\t\tif size != test.expectedSize {\n\t\t\t\t\tt.Fatalf(\"Expected size of %d, but got %d\", test.expectedSize, size)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestReadUnsignedVInt(t *testing.T) {\n\ttests := []struct {\n\t\tdecodedInt  uint64\n\t\tencodedVint []byte\n\t}{\n\t\t{\n\t\t\tdecodedInt:  0,\n\t\t\tencodedVint: []byte{0},\n\t\t},\n\t\t{\n\t\t\tdecodedInt:  100,\n\t\t\tencodedVint: []byte{100},\n\t\t},\n\t\t{\n\t\t\tdecodedInt:  256000,\n\t\t\tencodedVint: []byte{195, 232, 0},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(fmt.Sprintf(\"%d\", test.decodedInt), func(t *testing.T) {\n\t\t\tactual, _, err := readUnsignedVInt(test.encodedVint)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Expected no error, got %v\", err)\n\t\t\t}\n\t\t\tif actual != test.decodedInt {\n\t\t\t\tt.Fatalf(\"Expected %d, but got %d\", test.decodedInt, actual)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc BenchmarkUnmarshalUUID(b *testing.B) {\n\tb.ReportAllocs()\n\tsrc := make([]byte, 16)\n\tdst := UUID{}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tif err := unmarshalUUID(src, &dst); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestUnmarshalUDT(t *testing.T) {\n\tt.Parallel()\n\n\tinfo := UDTTypeInfo{\n\t\tNativeType: NativeType{proto: protoVersion4, typ: TypeUDT},\n\t\tName:       \"myudt\",\n\t\tKeySpace:   \"myks\",\n\t\tElements: []UDTField{\n\t\t\t{\n\t\t\t\tName: \"first\",\n\t\t\t\tType: NativeType{proto: protoVersion4, typ: TypeAscii},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"second\",\n\t\t\t\tType: NativeType{proto: protoVersion4, typ: TypeSmallInt},\n\t\t\t},\n\t\t},\n\t}\n\tdata := bytesWithLength( // UDT\n\t\tbytesWithLength([]byte(\"Hello\")),    // first\n\t\tbytesWithLength([]byte(\"\\x00\\x2a\")), // second\n\t)\n\tvalue := map[string]any{}\n\texpectedErr := unmarshalErrorf(\"can not unmarshal into non-pointer map[string]interface {}\")\n\n\tif err := Unmarshal(info, data, value); err != expectedErr {\n\t\tt.Errorf(\"(%v=>%T): %#v returned error %#v, want %#v.\",\n\t\t\tinfo, value, value, err, expectedErr)\n\t}\n}\n\n// TestUnmarshalListIntoInterface tests that lists can be unmarshaled into *any\n// This is used by MapScan and SliceMap functions.\nfunc TestUnmarshalListIntoInterface(t *testing.T) {\n\tt.Parallel()\n\n\t// Create a list of ints: [1, 2]\n\t// Format: [list_size (4 bytes), element_length (4 bytes), element_data, ...]\n\t// Reference: line 63 shows format: \\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x02\n\tdata := []byte{\n\t\t0, 0, 0, 2, // list size: 2 elements\n\t\t0, 0, 0, 4, // element 0 length: 4 bytes\n\t\t0, 0, 0, 1, // element 0 value: 1\n\t\t0, 0, 0, 4, // element 1 length: 4 bytes\n\t\t0, 0, 0, 2, // element 1 value: 2\n\t}\n\n\tinfo := CollectionType{\n\t\tNativeType: NativeType{proto: protoVersion4, typ: TypeList},\n\t\tElem:       NativeType{proto: protoVersion4, typ: TypeInt},\n\t}\n\n\tvar result any\n\tif err := Unmarshal(info, data, &result); err != nil {\n\t\tt.Fatalf(\"Unmarshal failed: %v\", err)\n\t}\n\n\t// Verify the result is a []int\n\tslice, ok := result.([]int)\n\tif !ok {\n\t\tt.Fatalf(\"Expected []int, got %T\", result)\n\t}\n\tif len(slice) != 2 {\n\t\tt.Fatalf(\"Expected 2 elements, got %d\", len(slice))\n\t}\n\texpected := []int{1, 2}\n\tfor i, v := range expected {\n\t\tif slice[i] != v {\n\t\t\tt.Errorf(\"Element %d: expected %d, got %d\", i, v, slice[i])\n\t\t}\n\t}\n}\n\n// TestUnmarshalMapIntoInterface tests that maps can be unmarshaled into *any\n// This is used by MapScan and SliceMap functions.\nfunc TestUnmarshalMapIntoInterface(t *testing.T) {\n\tt.Parallel()\n\n\t// Create a map: {\"a\": 1, \"b\": 2}\n\t// Format: [map_size (4 bytes), key_length, key_data, value_length, value_data, ...]\n\tdata := []byte{\n\t\t0, 0, 0, 2, // map size: 2 entries\n\t\t0, 0, 0, 1, // key 0 length: 1 byte\n\t\t'a',        // key 0 value: \"a\"\n\t\t0, 0, 0, 4, // value 0 length: 4 bytes\n\t\t0, 0, 0, 1, // value 0: 1\n\t\t0, 0, 0, 1, // key 1 length: 1 byte\n\t\t'b',        // key 1 value: \"b\"\n\t\t0, 0, 0, 4, // value 1 length: 4 bytes\n\t\t0, 0, 0, 2, // value 1: 2\n\t}\n\n\tinfo := CollectionType{\n\t\tNativeType: NativeType{proto: protoVersion4, typ: TypeMap},\n\t\tKey:        NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t\tElem:       NativeType{proto: protoVersion4, typ: TypeInt},\n\t}\n\n\tvar result any\n\tif err := Unmarshal(info, data, &result); err != nil {\n\t\tt.Fatalf(\"Unmarshal failed: %v\", err)\n\t}\n\n\t// Verify the result is a map[string]int\n\tm, ok := result.(map[string]int)\n\tif !ok {\n\t\tt.Fatalf(\"Expected map[string]int, got %T\", result)\n\t}\n\tif len(m) != 2 {\n\t\tt.Fatalf(\"Expected 2 entries, got %d\", len(m))\n\t}\n\tif m[\"a\"] != 1 {\n\t\tt.Errorf(\"Expected m[\\\"a\\\"] = 1, got %d\", m[\"a\"])\n\t}\n\tif m[\"b\"] != 2 {\n\t\tt.Errorf(\"Expected m[\\\"b\\\"] = 2, got %d\", m[\"b\"])\n\t}\n}\n\n// TestUnmarshalListWithVectorIntoInterface tests that lists containing vectors\n// can be unmarshaled into *any (issue #692)\nfunc TestUnmarshalListWithVectorIntoInterface(t *testing.T) {\n\tt.Parallel()\n\n\t// Create a list of vectors: [[1.0, 2.0], [3.0, 4.0]]\n\t// Vector elements are fixed-size floats (4 bytes each)\n\t// Format: [list_size (4 bytes), element_length, element_data (vector), ...]\n\t// Each vector is 8 bytes (2 floats * 4 bytes)\n\tvar data []byte\n\n\t// List size: 2 vectors\n\tdata = append(data, 0, 0, 0, 2)\n\n\t// Vector 1: [1.0, 2.0]\n\tdata = append(data, 0, 0, 0, 8) // vector length: 8 bytes\n\tfloat1Bytes := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(float1Bytes, math.Float32bits(1.0))\n\tdata = append(data, float1Bytes...)\n\tfloat2Bytes := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(float2Bytes, math.Float32bits(2.0))\n\tdata = append(data, float2Bytes...)\n\n\t// Vector 2: [3.0, 4.0]\n\tdata = append(data, 0, 0, 0, 8) // vector length: 8 bytes\n\tfloat3Bytes := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(float3Bytes, math.Float32bits(3.0))\n\tdata = append(data, float3Bytes...)\n\tfloat4Bytes := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(float4Bytes, math.Float32bits(4.0))\n\tdata = append(data, float4Bytes...)\n\n\tinfo := CollectionType{\n\t\tNativeType: NativeType{proto: protoVersion4, typ: TypeList},\n\t\tElem: VectorType{\n\t\t\tNativeType: NativeType{proto: protoVersion4, typ: TypeCustom, custom: apacheCassandraTypePrefix + \"VectorType\"},\n\t\t\tSubType:    NativeType{proto: protoVersion4, typ: TypeFloat},\n\t\t\tDimensions: 2,\n\t\t},\n\t}\n\n\tvar result any\n\tif err := Unmarshal(info, data, &result); err != nil {\n\t\tt.Fatalf(\"Unmarshal failed: %v\", err)\n\t}\n\n\t// Verify the result is a [][]float32\n\tslice, ok := result.([][]float32)\n\tif !ok {\n\t\tt.Fatalf(\"Expected [][]float32, got %T\", result)\n\t}\n\tif len(slice) != 2 {\n\t\tt.Fatalf(\"Expected 2 elements, got %d\", len(slice))\n\t}\n\tif len(slice[0]) != 2 || slice[0][0] != 1.0 || slice[0][1] != 2.0 {\n\t\tt.Errorf(\"Expected slice[0] = [1.0, 2.0], got %v\", slice[0])\n\t}\n\tif len(slice[1]) != 2 || slice[1][0] != 3.0 || slice[1][1] != 4.0 {\n\t\tt.Errorf(\"Expected slice[1] = [3.0, 4.0], got %v\", slice[1])\n\t}\n}\n\n// bytesWithLength concatenates all data slices and prepends the total length as uint32.\n// The length does not count the size of the uint32 used for writing the size.\nfunc bytesWithLength(data ...[]byte) []byte {\n\ttotalLen := 0\n\tfor i := range data {\n\t\ttotalLen += len(data[i])\n\t}\n\tif totalLen > math.MaxUint32 {\n\t\tpanic(\"total length overflows\")\n\t}\n\tret := make([]byte, totalLen+4)\n\tbinary.BigEndian.PutUint32(ret[:4], uint32(totalLen))\n\tbuf := ret[4:]\n\tfor i := range data {\n\t\tn := copy(buf, data[i])\n\t\tbuf = buf[n:]\n\t}\n\treturn ret\n}\n\nfunc TestUnmarshalVectorZeroDimensions(t *testing.T) {\n\tinfo := VectorType{\n\t\tNativeType: NewCustomType(protoVersion4, TypeCustom, apacheCassandraTypePrefix+\"VectorType\"),\n\t\tSubType:    NativeType{proto: protoVersion4, typ: TypeFloat},\n\t\tDimensions: 0,\n\t}\n\n\tt.Run(\"nil_data\", func(t *testing.T) {\n\t\tvar result []float32\n\t\tif err := unmarshalVector(info, nil, &result); err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"empty_data\", func(t *testing.T) {\n\t\tvar result []float32\n\t\tif err := unmarshalVector(info, []byte{}, &result); err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif result == nil {\n\t\t\tt.Fatal(\"expected non-nil empty slice\")\n\t\t}\n\t\tif len(result) != 0 {\n\t\t\tt.Fatalf(\"expected len 0, got %d\", len(result))\n\t\t}\n\t})\n\n\tt.Run(\"nonempty_data_errors\", func(t *testing.T) {\n\t\tvar result []float32\n\t\terr := unmarshalVector(info, []byte{0x01, 0x02}, &result)\n\t\tif err == nil {\n\t\t\tt.Fatal(\"expected error for non-empty data with 0 dimensions\")\n\t\t}\n\t\tif !strings.Contains(err.Error(), \"0-dimension\") {\n\t\t\tt.Fatalf(\"expected error mentioning 0-dimension, got: %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"empty_data_into_zero_length_array\", func(t *testing.T) {\n\t\tvar result [0]float32\n\t\tif err := unmarshalVector(info, []byte{}, &result); err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"empty_data_into_nonzero_length_array_errors\", func(t *testing.T) {\n\t\tvar result [5]float32\n\t\terr := unmarshalVector(info, []byte{}, &result)\n\t\tif err == nil {\n\t\t\tt.Fatal(\"expected error for 0-dimension vector into non-zero-length array\")\n\t\t}\n\t\tif !strings.Contains(err.Error(), \"array of size 5\") {\n\t\t\tt.Fatalf(\"expected error mentioning array size, got: %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"empty_data_into_interface\", func(t *testing.T) {\n\t\tvar result any\n\t\tif err := unmarshalVector(info, []byte{}, &result); err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t})\n}\n\n// TestNativeNewWithErrorConsistentWithGoType verifies that the fast-path type mapping\n// in NativeType.NewWithError() stays consistent with the canonical goType() mapping.\n// This guards against future changes to one mapping that forget to update the other.\nfunc TestNativeNewWithErrorConsistentWithGoType(t *testing.T) {\n\t// All NativeType type codes that goType handles (excluding collection/tuple/UDT\n\t// which are separate TypeInfo implementations).\n\tnativeTypes := []Type{\n\t\tTypeVarchar, TypeAscii, TypeText, TypeInet,\n\t\tTypeBigInt, TypeCounter,\n\t\tTypeTime,\n\t\tTypeTimestamp,\n\t\tTypeBlob,\n\t\tTypeBoolean,\n\t\tTypeFloat,\n\t\tTypeDouble,\n\t\tTypeInt,\n\t\tTypeSmallInt,\n\t\tTypeTinyInt,\n\t\tTypeDecimal,\n\t\tTypeUUID, TypeTimeUUID,\n\t\tTypeVarint,\n\t\tTypeDate,\n\t\tTypeDuration,\n\t}\n\n\tfor _, typ := range nativeTypes {\n\t\tnt := NativeType{typ: typ, proto: protoVersion4}\n\n\t\t// Get the fast-path result from NewWithError\n\t\tfastVal, err := nt.NewWithError()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"NewWithError(%s): unexpected error: %v\", typ, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Get the canonical type from goType\n\t\tcanonicalType, err := goType(nt)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"goType(%s): unexpected error: %v\", typ, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// NewWithError returns a pointer (reflect.New(typ).Interface()), so the\n\t\t// underlying type is reflect.TypeOf(val).Elem()\n\t\tfastType := reflect.TypeOf(fastVal)\n\t\tif fastType.Kind() != reflect.Ptr {\n\t\t\tt.Errorf(\"NewWithError(%s): expected pointer, got %s\", typ, fastType.Kind())\n\t\t\tcontinue\n\t\t}\n\t\tfastElemType := fastType.Elem()\n\n\t\tif fastElemType != canonicalType {\n\t\t\tt.Errorf(\"NewWithError(%s) fast-path type %s does not match goType() canonical type %s\",\n\t\t\t\ttyp, fastElemType, canonicalType)\n\t\t}\n\t}\n}\n\n// TestCollectionNewWithErrorConsistentWithGoType verifies that the fast-path type mapping\n// in CollectionType.NewWithError() stays consistent with the canonical goType() mapping.\nfunc TestCollectionNewWithErrorConsistentWithGoType(t *testing.T) {\n\telemTypes := []Type{\n\t\tTypeInt, TypeBigInt, TypeCounter,\n\t\tTypeText, TypeVarchar, TypeAscii,\n\t\tTypeBoolean,\n\t\tTypeFloat, TypeDouble,\n\t\tTypeUUID, TypeTimeUUID,\n\t\tTypeTimestamp, TypeDate,\n\t\tTypeSmallInt, TypeTinyInt,\n\t\tTypeBlob,\n\t}\n\n\t// Test list and set types\n\tfor _, collTyp := range []Type{TypeList, TypeSet} {\n\t\tfor _, elemTyp := range elemTypes {\n\t\t\tct := CollectionType{\n\t\t\t\tNativeType: NativeType{typ: collTyp, proto: protoVersion4},\n\t\t\t\tElem:       NativeType{typ: elemTyp, proto: protoVersion4},\n\t\t\t}\n\n\t\t\tfastVal, err := ct.NewWithError()\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"NewWithError(%s<%s>): unexpected error: %v\", collTyp, elemTyp, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcanonicalType, err := goType(ct)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"goType(%s<%s>): unexpected error: %v\", collTyp, elemTyp, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfastType := reflect.TypeOf(fastVal)\n\t\t\tif fastType.Kind() != reflect.Ptr {\n\t\t\t\tt.Errorf(\"NewWithError(%s<%s>): expected pointer, got %s\", collTyp, elemTyp, fastType.Kind())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif fastType.Elem() != canonicalType {\n\t\t\t\tt.Errorf(\"NewWithError(%s<%s>) fast-path type %s does not match goType() canonical type %s\",\n\t\t\t\t\tcollTyp, elemTyp, fastType.Elem(), canonicalType)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Test map types with common key/value combinations\n\tkeyTypes := []Type{TypeText, TypeVarchar, TypeInt}\n\tvalTypes := []Type{\n\t\tTypeInt, TypeBigInt,\n\t\tTypeText, TypeVarchar,\n\t\tTypeBoolean,\n\t\tTypeFloat, TypeDouble,\n\t\tTypeUUID,\n\t}\n\n\tfor _, keyTyp := range keyTypes {\n\t\tfor _, valTyp := range valTypes {\n\t\t\tct := CollectionType{\n\t\t\t\tNativeType: NativeType{typ: TypeMap, proto: protoVersion4},\n\t\t\t\tKey:        NativeType{typ: keyTyp, proto: protoVersion4},\n\t\t\t\tElem:       NativeType{typ: valTyp, proto: protoVersion4},\n\t\t\t}\n\n\t\t\tfastVal, err := ct.NewWithError()\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"NewWithError(map<%s, %s>): unexpected error: %v\", keyTyp, valTyp, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcanonicalType, err := goType(ct)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"goType(map<%s, %s>): unexpected error: %v\", keyTyp, valTyp, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfastType := reflect.TypeOf(fastVal)\n\t\t\tif fastType.Kind() != reflect.Ptr {\n\t\t\t\tt.Errorf(\"NewWithError(map<%s, %s>): expected pointer, got %s\", keyTyp, valTyp, fastType.Kind())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif fastType.Elem() != canonicalType {\n\t\t\t\tt.Errorf(\"NewWithError(map<%s, %s>) fast-path type %s does not match goType() canonical type %s\",\n\t\t\t\t\tkeyTyp, valTyp, fastType.Elem(), canonicalType)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "metadata_scylla.go",
    "content": "// Copyright (c) 2015 The gocql Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gocql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"maps\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\n\t\"golang.org/x/sync/errgroup\"\n\t\"golang.org/x/sync/singleflight\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n\t\"github.com/gocql/gocql/tablets\"\n)\n\n// schema metadata for a keyspace\ntype KeyspaceMetadata struct {\n\tStrategyOptions   map[string]any\n\tTables            map[string]*TableMetadata\n\tFunctions         map[string]*FunctionMetadata\n\tAggregates        map[string]*AggregateMetadata\n\tTypes             map[string]*TypeMetadata\n\tIndexes           map[string]*IndexMetadata\n\tViews             map[string]*ViewMetadata\n\ttablesInvalidated map[string]struct{}\n\tName              string\n\tStrategyClass     string\n\tCreateStmts       string\n\tDurableWrites     bool\n}\n\n// Clone returns a shallow copy of the keyspace metadata with\n// cloned Tables, Indexes, Views, and tablesInvalidated maps so that mutations\n// do not race with concurrent readers of the original.\nfunc (ks *KeyspaceMetadata) Clone() *KeyspaceMetadata {\n\tcloned := &KeyspaceMetadata{\n\t\tName:            ks.Name,\n\t\tDurableWrites:   ks.DurableWrites,\n\t\tStrategyClass:   ks.StrategyClass,\n\t\tStrategyOptions: maps.Clone(ks.StrategyOptions),\n\t\tTables:          maps.Clone(ks.Tables),\n\t\tFunctions:       maps.Clone(ks.Functions),\n\t\tAggregates:      maps.Clone(ks.Aggregates),\n\t\tTypes:           maps.Clone(ks.Types),\n\t\tIndexes:         maps.Clone(ks.Indexes),\n\t\tViews:           maps.Clone(ks.Views),\n\t\tCreateStmts:     ks.CreateStmts,\n\t}\n\tif ks.tablesInvalidated != nil {\n\t\tcloned.tablesInvalidated = maps.Clone(ks.tablesInvalidated)\n\t}\n\treturn cloned\n}\n\nfunc (ks *KeyspaceMetadata) removeTableData(tableName string) {\n\tif ks.Tables != nil {\n\t\tdelete(ks.Tables, tableName)\n\t}\n\tfor name, idx := range ks.Indexes {\n\t\tif idx != nil && idx.TableName == tableName {\n\t\t\tdelete(ks.Indexes, name)\n\t\t}\n\t}\n\tfor name, view := range ks.Views {\n\t\tif view != nil && view.BaseTableName == tableName {\n\t\t\tdelete(ks.Views, name)\n\t\t}\n\t}\n}\n\nfunc (ks *KeyspaceMetadata) invalidateTable(tableName string) {\n\tks.removeTableData(tableName)\n\tif ks.tablesInvalidated == nil {\n\t\tks.tablesInvalidated = make(map[string]struct{})\n\t}\n\tks.tablesInvalidated[tableName] = struct{}{}\n}\n\nfunc (ks *KeyspaceMetadata) removeTable(tableName string) {\n\tks.removeTableData(tableName)\n\tif ks.tablesInvalidated != nil {\n\t\tdelete(ks.tablesInvalidated, tableName)\n\t}\n}\n\n// schema metadata for a table (a.k.a. column family)\ntype TableMetadata struct {\n\tColumns           map[string]*ColumnMetadata\n\tExtensions        map[string]any\n\tKeyspace          string\n\tName              string\n\tPartitionKey      []*ColumnMetadata\n\tClusteringColumns []*ColumnMetadata\n\tOrderedColumns    []string\n\tFlags             []string\n\tOptions           TableMetadataOptions\n}\n\ntype TableMetadataOptions struct {\n\tCaching                 map[string]string\n\tCompaction              map[string]string\n\tCompression             map[string]string\n\tCDC                     map[string]string\n\tSpeculativeRetry        string\n\tComment                 string\n\tVersion                 string\n\tPartitioner             string\n\tGcGraceSeconds          int\n\tMaxIndexInterval        int\n\tMemtableFlushPeriodInMs int\n\tMinIndexInterval        int\n\tReadRepairChance        float64\n\tBloomFilterFpChance     float64\n\tDefaultTimeToLive       int\n\tDcLocalReadRepairChance float64\n\tCrcCheckChance          float64\n\tInMemory                bool\n}\n\nfunc (t *TableMetadataOptions) Equals(other *TableMetadataOptions) bool {\n\tif t == nil || other == nil {\n\t\treturn t == other // Both must be nil to be equal\n\t}\n\n\tif t.BloomFilterFpChance != other.BloomFilterFpChance ||\n\t\tt.Comment != other.Comment ||\n\t\tt.CrcCheckChance != other.CrcCheckChance ||\n\t\tt.DcLocalReadRepairChance != other.DcLocalReadRepairChance ||\n\t\tt.DefaultTimeToLive != other.DefaultTimeToLive ||\n\t\tt.GcGraceSeconds != other.GcGraceSeconds ||\n\t\tt.MaxIndexInterval != other.MaxIndexInterval ||\n\t\tt.MemtableFlushPeriodInMs != other.MemtableFlushPeriodInMs ||\n\t\tt.MinIndexInterval != other.MinIndexInterval ||\n\t\tt.ReadRepairChance != other.ReadRepairChance ||\n\t\tt.SpeculativeRetry != other.SpeculativeRetry ||\n\t\tt.InMemory != other.InMemory ||\n\t\tt.Partitioner != other.Partitioner ||\n\t\tt.Version != other.Version {\n\t\treturn false\n\t}\n\n\tif !compareStringMaps(t.Caching, other.Caching) ||\n\t\t!compareStringMaps(t.Compaction, other.Compaction) ||\n\t\t!compareStringMaps(t.Compression, other.Compression) ||\n\t\t!compareStringMaps(t.CDC, other.CDC) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\ntype ViewMetadata struct {\n\tColumns                 map[string]*ColumnMetadata\n\tExtensions              map[string]any\n\tWhereClause             string\n\tBaseTableName           string\n\tID                      string\n\tKeyspaceName            string\n\tBaseTableID             string\n\tViewName                string\n\tOrderedColumns          []string\n\tPartitionKey            []*ColumnMetadata\n\tClusteringColumns       []*ColumnMetadata\n\tOptions                 TableMetadataOptions\n\tDcLocalReadRepairChance float64 // After Scylla 4.2 by default read_repair turned off\n\tReadRepairChance        float64 // After Scylla 4.2 by default read_repair turned off\n\tIncludeAllColumns       bool\n}\n\ntype ColumnMetadata struct {\n\tIndex           ColumnIndexMetadata\n\tKeyspace        string\n\tTable           string\n\tName            string\n\tType            string\n\tClusteringOrder string\n\tComponentIndex  int\n\tKind            ColumnKind\n\tOrder           ColumnOrder\n}\n\nfunc (c *ColumnMetadata) Equals(other *ColumnMetadata) bool {\n\tif c == nil || other == nil {\n\t\treturn c == other\n\t}\n\n\treturn c.Keyspace == other.Keyspace &&\n\t\tc.Table == other.Table &&\n\t\tc.Name == other.Name &&\n\t\tc.ComponentIndex == other.ComponentIndex &&\n\t\tc.Kind == other.Kind &&\n\t\tc.Type == other.Type &&\n\t\tc.ClusteringOrder == other.ClusteringOrder &&\n\t\tc.Order == other.Order &&\n\t\tc.Index.Equals(&other.Index)\n}\n\n// FunctionMetadata holds metadata for function constructs\ntype FunctionMetadata struct {\n\tKeyspace          string\n\tName              string\n\tBody              string\n\tLanguage          string\n\tReturnType        string\n\tArgumentTypes     []string\n\tArgumentNames     []string\n\tCalledOnNullInput bool\n}\n\n// AggregateMetadata holds metadata for aggregate constructs\ntype AggregateMetadata struct {\n\tKeyspace      string\n\tName          string\n\tInitCond      string\n\tReturnType    string\n\tStateType     string\n\tstateFunc     string\n\tfinalFunc     string\n\tArgumentTypes []string\n\tFinalFunc     FunctionMetadata\n\tStateFunc     FunctionMetadata\n}\n\n// TypeMetadata holds the metadata for views.\ntype TypeMetadata struct {\n\tKeyspace   string\n\tName       string\n\tFieldNames []string\n\tFieldTypes []string\n}\n\ntype IndexMetadata struct {\n\tName              string\n\tKeyspaceName      string\n\tTableName         string // Name of corresponding view.\n\tKind              string\n\tOptions           map[string]string\n\tColumns           map[string]*ColumnMetadata\n\tOrderedColumns    []string\n\tPartitionKey      []*ColumnMetadata\n\tClusteringColumns []*ColumnMetadata\n}\n\nfunc (t *TableMetadata) Equals(other *TableMetadata) bool {\n\tif t == nil || other == nil {\n\t\treturn t == other\n\t}\n\n\tif t.Keyspace != other.Keyspace || t.Name != other.Name {\n\t\treturn false\n\t}\n\n\tif len(t.PartitionKey) != len(other.PartitionKey) || !compareColumnSlices(t.PartitionKey, other.PartitionKey) {\n\t\treturn false\n\t}\n\n\tif len(t.ClusteringColumns) != len(other.ClusteringColumns) || !compareColumnSlices(t.ClusteringColumns, other.ClusteringColumns) {\n\t\treturn false\n\t}\n\n\tif len(t.Columns) != len(other.Columns) || !compareColumnsMap(t.Columns, other.Columns) {\n\t\treturn false\n\t}\n\n\tif len(t.OrderedColumns) != len(other.OrderedColumns) || !compareStringSlices(t.OrderedColumns, other.OrderedColumns) {\n\t\treturn false\n\t}\n\n\tif !t.Options.Equals(&other.Options) {\n\t\treturn false\n\t}\n\n\tif len(t.Flags) != len(other.Flags) || !compareStringSlices(t.Flags, other.Flags) {\n\t\treturn false\n\t}\n\n\tif len(t.Extensions) != len(other.Extensions) || !compareInterfaceMaps(t.Extensions, other.Extensions) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc compareColumnSlices(a, b []*ColumnMetadata) bool {\n\tfor i := range a {\n\t\tif !a[i].Equals(b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc compareColumnsMap(a, b map[string]*ColumnMetadata) bool {\n\tfor k, v := range a {\n\t\totherValue, exists := b[k]\n\t\tif !exists || !v.Equals(otherValue) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc compareStringSlices(a, b []string) bool {\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc compareStringMaps(a, b map[string]string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor k, v := range a {\n\t\tif otherValue, exists := b[k]; !exists || v != otherValue {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc compareInterfaceMaps(a, b map[string]any) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor k, v := range a {\n\t\totherValue, exists := b[k]\n\t\tif !exists || !reflect.DeepEqual(v, otherValue) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// cowTabletList implements a copy on write keyspace metadata map, its equivalent type is map[string]*KeyspaceMetadata\ntype cowKeyspaceMetadataMap struct {\n\tkeyspaceMap atomic.Value\n\tmu          sync.Mutex\n}\n\nfunc (c *cowKeyspaceMetadataMap) get() map[string]*KeyspaceMetadata {\n\tl, ok := c.keyspaceMap.Load().(map[string]*KeyspaceMetadata)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn l\n}\n\nfunc (c *cowKeyspaceMetadataMap) getKeyspace(keyspaceName string) (*KeyspaceMetadata, bool) {\n\tm, ok := c.keyspaceMap.Load().(map[string]*KeyspaceMetadata)\n\tif !ok {\n\t\treturn nil, ok\n\t}\n\tval, ok := m[keyspaceName]\n\treturn val, ok\n}\n\nfunc (c *cowKeyspaceMetadataMap) set(keyspaceName string, keyspaceMetadata *KeyspaceMetadata) bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tm := c.get()\n\n\tnewM := map[string]*KeyspaceMetadata{}\n\tfor name, metadata := range m {\n\t\tnewM[name] = metadata\n\t}\n\tnewM[keyspaceName] = keyspaceMetadata\n\n\tc.keyspaceMap.Store(newM)\n\treturn true\n}\n\nfunc (c *cowKeyspaceMetadataMap) invalidateTable(keyspaceName, tableName string) {\n\tc.updateKeyspace(keyspaceName, func(ks *KeyspaceMetadata) {\n\t\tks.invalidateTable(tableName)\n\t})\n}\n\nfunc (c *cowKeyspaceMetadataMap) removeKeyspace(keyspaceName string) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tm := c.get()\n\tnewM := maps.Clone(m)\n\tdelete(newM, keyspaceName)\n\n\tc.keyspaceMap.Store(newM)\n}\n\n// updateKeyspace atomically clones a keyspace's mutable maps, applies fn to\n// the clone, and publishes the result. This prevents data races between\n// concurrent readers and writers of the same KeyspaceMetadata.\n// Returns false if the keyspace was not found (no update applied).\nfunc (c *cowKeyspaceMetadataMap) updateKeyspace(keyspaceName string, fn func(ks *KeyspaceMetadata)) bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tm := c.get()\n\tks, ok := m[keyspaceName]\n\tif !ok || ks == nil {\n\t\treturn false\n\t}\n\n\tcloned := ks.Clone()\n\tfn(cloned)\n\n\tnewM := maps.Clone(m)\n\tnewM[keyspaceName] = cloned\n\tc.keyspaceMap.Store(newM)\n\treturn true\n}\n\nconst (\n\tIndexKindCustom = \"CUSTOM\"\n)\n\n// the ordering of the column with regard to its comparator\ntype ColumnOrder bool\n\nconst (\n\tASC  ColumnOrder = false\n\tDESC             = true\n)\n\ntype ColumnIndexMetadata struct {\n\tOptions map[string]any\n\tName    string\n\tType    string\n}\n\nfunc (c *ColumnIndexMetadata) Equals(other *ColumnIndexMetadata) bool {\n\tif c == nil || other == nil {\n\t\treturn c == other\n\t}\n\n\tif c.Name != other.Name || c.Type != other.Type {\n\t\treturn false\n\t}\n\n\t// Compare the Options map\n\tif len(c.Options) != len(other.Options) {\n\t\treturn false\n\t}\n\tfor k, v := range c.Options {\n\t\totherValue, exists := other.Options[k]\n\t\tif !exists || !reflect.DeepEqual(v, otherValue) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\ntype ColumnKind int\n\nconst (\n\tColumnUnkownKind ColumnKind = iota\n\tColumnPartitionKey\n\tColumnClusteringKey\n\tColumnRegular\n\tColumnCompact\n\tColumnStatic\n)\n\nfunc (c ColumnKind) String() string {\n\tswitch c {\n\tcase ColumnPartitionKey:\n\t\treturn \"partition_key\"\n\tcase ColumnClusteringKey:\n\t\treturn \"clustering_key\"\n\tcase ColumnRegular:\n\t\treturn \"regular\"\n\tcase ColumnCompact:\n\t\treturn \"compact\"\n\tcase ColumnStatic:\n\t\treturn \"static\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown_column_%d\", c)\n\t}\n}\n\nfunc (c *ColumnKind) UnmarshalCQL(typ TypeInfo, p []byte) error {\n\tif typ.Type() != TypeVarchar {\n\t\treturn unmarshalErrorf(\"unable to marshall %s into ColumnKind, expected Varchar\", typ)\n\t}\n\n\tkind, err := columnKindFromSchema(string(p))\n\tif err != nil {\n\t\treturn err\n\t}\n\t*c = kind\n\n\treturn nil\n}\n\nfunc columnKindFromSchema(kind string) (ColumnKind, error) {\n\tswitch kind {\n\tcase \"partition_key\":\n\t\treturn ColumnPartitionKey, nil\n\tcase \"clustering_key\", \"clustering\":\n\t\treturn ColumnClusteringKey, nil\n\tcase \"regular\":\n\t\treturn ColumnRegular, nil\n\tcase \"compact_value\":\n\t\treturn ColumnCompact, nil\n\tcase \"static\":\n\t\treturn ColumnStatic, nil\n\tdefault:\n\t\treturn -1, fmt.Errorf(\"unknown column kind: %q\", kind)\n\t}\n}\n\ntype Metadata struct {\n\ttabletsMetadata  *tablets.CowTabletList\n\tkeyspaceMetadata cowKeyspaceMetadataMap\n}\n\n// queries the cluster for schema information for a specific keyspace and for tablets\ntype metadataDescriber struct {\n\tkeyspaceGroup singleflight.Group\n\ttableGroup    singleflight.Group\n\tsession       *Session\n\tmetadata      *Metadata\n\n\t// mu serialises refreshAllSchema calls so the snapshot-compare-refresh\n\t// cycle runs as an atomic batch.  Individual keyspace/table refreshes\n\t// are deduplicated by the singleflight groups above and do NOT need\n\t// this lock.\n\t//\n\t// Lock ordering: s.mu → cowKeyspaceMetadataMap.mu (never reversed).\n\tmu sync.Mutex\n}\n\n// creates a session bound schema describer which will query and cache\n// keyspace metadata and tablets metadata\nfunc newMetadataDescriber(session *Session) *metadataDescriber {\n\treturn &metadataDescriber{\n\t\tsession: session,\n\t\tmetadata: &Metadata{\n\t\t\ttabletsMetadata: tablets.NewCowTabletList(),\n\t\t},\n\t}\n}\n\nfunc (s *metadataDescriber) getKeyspaceInternal(keyspaceName string) (metadata *KeyspaceMetadata, wasReloaded bool, err error) {\n\tvar found bool\n\tmetadata, found = s.metadata.keyspaceMetadata.getKeyspace(keyspaceName)\n\tif !found {\n\t\twasReloaded = true\n\t\terr = s.deduplicatedRefreshKeyspace(keyspaceName)\n\t\tif err != nil {\n\t\t\treturn metadata, wasReloaded, err\n\t\t}\n\n\t\tmetadata, found = s.metadata.keyspaceMetadata.getKeyspace(keyspaceName)\n\t\tif !found {\n\t\t\treturn nil, true, fmt.Errorf(\"keyspace %s: %w\", keyspaceName, ErrNotFound)\n\t\t}\n\t}\n\n\treturn metadata, wasReloaded, nil\n}\n\nfunc (s *metadataDescriber) GetKeyspace(keyspaceName string) (*KeyspaceMetadata, error) {\n\tmetadata, _, err := s.getKeyspaceInternal(keyspaceName)\n\treturn metadata, err\n}\n\nfunc tableNotFoundError(keyspaceName, tableName string) error {\n\treturn fmt.Errorf(\"table %s.%s: %w\", keyspaceName, tableName, ErrNotFound)\n}\n\n// getTableFromSnapshot resolves a table lookup against a keyspace snapshot.\n// It re-reads the latest published keyspace metadata before deciding that an\n// invalidated table still needs a refresh, which avoids duplicate refreshes\n// for callers holding a stale snapshot.\nfunc (s *metadataDescriber) getTableFromSnapshot(\n\tkeyspaceName, tableName string,\n\tkeyspaceMetadata *KeyspaceMetadata,\n\twasReloaded bool,\n) (tableMetadata *TableMetadata, refreshNeeded bool, err error) {\n\tif tableMetadata, found := keyspaceMetadata.Tables[tableName]; found {\n\t\treturn tableMetadata, false, nil\n\t}\n\n\tif latestMetadata, found := s.metadata.keyspaceMetadata.getKeyspace(keyspaceName); found && latestMetadata != keyspaceMetadata {\n\t\tkeyspaceMetadata = latestMetadata\n\t\tif tableMetadata, found := keyspaceMetadata.Tables[tableName]; found {\n\t\t\treturn tableMetadata, false, nil\n\t\t}\n\t}\n\n\tif wasReloaded {\n\t\treturn nil, false, tableNotFoundError(keyspaceName, tableName)\n\t}\n\n\tif _, ok := keyspaceMetadata.tablesInvalidated[tableName]; !ok {\n\t\treturn nil, false, tableNotFoundError(keyspaceName, tableName)\n\t}\n\n\treturn nil, true, nil\n}\n\nfunc (s *metadataDescriber) GetTable(keyspaceName, tableName string) (*TableMetadata, error) {\n\tkeyspaceMetadata, wasReloaded, err := s.getKeyspaceInternal(keyspaceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttableMetadata, refreshNeeded, err := s.getTableFromSnapshot(keyspaceName, tableName, keyspaceMetadata, wasReloaded)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !refreshNeeded {\n\t\treturn tableMetadata, nil\n\t}\n\n\terr = s.deduplicatedRefreshTable(keyspaceName, tableName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkeyspaceMetadata, found := s.metadata.keyspaceMetadata.getKeyspace(keyspaceName)\n\tif !found {\n\t\treturn nil, tableNotFoundError(keyspaceName, tableName)\n\t}\n\n\ttableMetadata, found = keyspaceMetadata.Tables[tableName]\n\tif !found {\n\t\treturn nil, tableNotFoundError(keyspaceName, tableName)\n\t}\n\n\treturn tableMetadata, nil\n}\n\nfunc (s *metadataDescriber) getTablets() tablets.TabletInfoList {\n\treturn s.metadata.tabletsMetadata.Get()\n}\n\nfunc (s *metadataDescriber) getTableTablets(keyspace, table string) tablets.TabletEntryList {\n\treturn s.metadata.tabletsMetadata.GetTableTablets(keyspace, table)\n}\n\nfunc (s *metadataDescriber) forEachTablet(fn func(keyspace, table string, entries tablets.TabletEntryList) bool) {\n\ts.metadata.tabletsMetadata.ForEach(fn)\n}\n\nfunc (s *metadataDescriber) AddTablet(tablet tablets.TabletInfo) {\n\ts.metadata.tabletsMetadata.AddTablet(tablet)\n}\n\n// RemoveTabletsWithHost removes tablets that contains given host.\n// to be used outside the metadataDescriber\nfunc (s *metadataDescriber) RemoveTabletsWithHost(host *HostInfo) {\n\ts.metadata.tabletsMetadata.RemoveTabletsWithHost(tablets.HostUUID(host.hostUUID()))\n}\n\n// RemoveTabletsWithKeyspace removes tablets for given keyspace.\n// to be used outside the metadataDescriber\nfunc (s *metadataDescriber) RemoveTabletsWithKeyspace(keyspace string) {\n\ts.metadata.tabletsMetadata.RemoveTabletsWithKeyspace(keyspace)\n}\n\n// RemoveTabletsWithTable removes tablets for given table.\n// to be used outside the metadataDescriber\nfunc (s *metadataDescriber) RemoveTabletsWithTable(keyspace string, table string) {\n\ts.metadata.tabletsMetadata.RemoveTabletsWithTable(keyspace, table)\n}\n\n// invalidateKeyspaceSchema clears the cached keyspace metadata\nfunc (s *metadataDescriber) invalidateKeyspaceSchema(keyspaceName string) {\n\ts.metadata.keyspaceMetadata.removeKeyspace(keyspaceName)\n}\n\nfunc (s *metadataDescriber) invalidateTableSchema(keyspaceName, tableName string) {\n\ts.metadata.keyspaceMetadata.invalidateTable(keyspaceName, tableName)\n}\n\n// deduplicatedRefreshKeyspace collapses concurrent refreshKeyspaceSchema calls\n// for the same keyspace into a single in-flight operation.\nfunc (s *metadataDescriber) deduplicatedRefreshKeyspace(keyspaceName string) error {\n\t_, err, _ := s.keyspaceGroup.Do(keyspaceName, func() (any, error) {\n\t\treturn nil, s.refreshKeyspaceSchema(keyspaceName)\n\t})\n\treturn err\n}\n\n// deduplicatedRefreshTable collapses concurrent refreshTableSchema calls\n// for the same keyspace/table into a single in-flight operation.\nfunc (s *metadataDescriber) deduplicatedRefreshTable(keyspaceName, tableName string) error {\n\tkey := keyspaceName + \"\\x00\" + tableName\n\t_, err, _ := s.tableGroup.Do(key, func() (any, error) {\n\t\treturn nil, s.refreshTableSchema(keyspaceName, tableName)\n\t})\n\treturn err\n}\n\nfunc (s *metadataDescriber) refreshAllSchema() error {\n\t// mu serialises concurrent refreshAllSchema calls so each one sees a\n\t// consistent snapshot before deciding what changed.  Individual keyspace\n\t// refreshes inside the loop go through singleflight, so two overlapping\n\t// refreshAllSchema calls will not duplicate network queries — the second\n\t// caller blocks on mu while the first finishes.\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tcopiedMap := make(map[string]*KeyspaceMetadata)\n\tfor key, value := range s.metadata.keyspaceMetadata.get() {\n\t\tif value != nil {\n\t\t\tcopiedMap[key] = value.Clone()\n\t\t} else {\n\t\t\tcopiedMap[key] = nil\n\t\t}\n\t}\n\n\tfor keyspaceName, metadata := range copiedMap {\n\t\t// Route through singleflight to dedup concurrent refreshes.\n\t\terr := s.deduplicatedRefreshKeyspace(keyspaceName)\n\t\tif errors.Is(err, ErrKeyspaceDoesNotExist) {\n\t\t\ts.invalidateKeyspaceSchema(keyspaceName)\n\t\t\ts.RemoveTabletsWithKeyspace(keyspaceName)\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tupdatedMetadata, err := s.GetKeyspace(keyspaceName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !compareInterfaceMaps(metadata.StrategyOptions, updatedMetadata.StrategyOptions) {\n\t\t\ts.RemoveTabletsWithKeyspace(keyspaceName)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor tableName, tableMetadata := range metadata.Tables {\n\t\t\tif updatedTableMetadata, ok := updatedMetadata.Tables[tableName]; !ok || !tableMetadata.Equals(updatedTableMetadata) {\n\t\t\t\ts.RemoveTabletsWithTable(keyspaceName, tableName)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// forcibly updates the current KeyspaceMetadata held by the schema describer\n// for a given named keyspace.\n//\n// All system schema queries are issued concurrently since none of them\n// depend on each other's results. The results are only combined in\n// compileMetadata after all queries complete.\nfunc (s *metadataDescriber) refreshKeyspaceSchema(keyspaceName string) error {\n\tvar (\n\t\tkeyspace    *KeyspaceMetadata\n\t\ttables      []TableMetadata\n\t\tcolumns     []ColumnMetadata\n\t\tfunctions   []FunctionMetadata\n\t\taggregates  []AggregateMetadata\n\t\ttypes       []TypeMetadata\n\t\tindexes     []IndexMetadata\n\t\tviews       []ViewMetadata\n\t\tcreateStmts []byte\n\t)\n\n\t// Each goroutine writes to its own dedicated variable, so no\n\t// synchronisation is needed beyond errgroup itself.\n\tvar g errgroup.Group\n\n\tg.Go(func() error {\n\t\tvar err error\n\t\tkeyspace, err = getKeyspaceMetadata(s.session, keyspaceName)\n\t\treturn err\n\t})\n\tg.Go(func() error {\n\t\tvar err error\n\t\ttables, err = getTableMetadata(s.session, keyspaceName)\n\t\treturn err\n\t})\n\tg.Go(func() error {\n\t\tvar err error\n\t\tcolumns, err = getColumnMetadata(s.session, keyspaceName)\n\t\treturn err\n\t})\n\tg.Go(func() error {\n\t\tvar err error\n\t\tfunctions, err = getFunctionsMetadata(s.session, keyspaceName)\n\t\treturn err\n\t})\n\tg.Go(func() error {\n\t\tvar err error\n\t\taggregates, err = getAggregatesMetadata(s.session, keyspaceName)\n\t\treturn err\n\t})\n\tg.Go(func() error {\n\t\tvar err error\n\t\ttypes, err = getTypeMetadata(s.session, keyspaceName)\n\t\treturn err\n\t})\n\tg.Go(func() error {\n\t\tvar err error\n\t\tindexes, err = getIndexMetadata(s.session, keyspaceName)\n\t\treturn err\n\t})\n\tg.Go(func() error {\n\t\tvar err error\n\t\tviews, err = getViewMetadata(s.session, keyspaceName)\n\t\treturn err\n\t})\n\tg.Go(func() error {\n\t\tvar err error\n\t\tcreateStmts, err = getCreateStatements(s.session, keyspaceName)\n\t\treturn err\n\t})\n\n\tif err := g.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\tcompileMetadata(keyspace, tables, columns, functions, aggregates, types, indexes, views, createStmts)\n\n\ts.metadata.keyspaceMetadata.set(keyspaceName, keyspace)\n\n\treturn nil\n}\n\nfunc (s *metadataDescriber) refreshTableSchema(keyspaceName, tableName string) error {\n\t_, found := s.metadata.keyspaceMetadata.getKeyspace(keyspaceName)\n\tif !found {\n\t\treturn s.deduplicatedRefreshKeyspace(keyspaceName)\n\t}\n\n\t// Perform network queries outside the lock.\n\ttables, err := getTableMetadataByName(s.session, keyspaceName, tableName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcolumns, err := getColumnMetadataByTable(s.session, keyspaceName, tableName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tindexes, err := getIndexMetadataByTable(s.session, keyspaceName, tableName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tviews, err := getViewMetadataByTable(s.session, keyspaceName, tableName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Atomically clone-and-swap the keyspace metadata to avoid data races\n\t// with concurrent readers.\n\tapplied := s.metadata.keyspaceMetadata.updateKeyspace(keyspaceName, func(ks *KeyspaceMetadata) {\n\t\tif len(tables) == 0 {\n\t\t\tks.removeTable(tableName)\n\t\t} else {\n\t\t\tcompileTableMetadata(ks, tables, columns, indexes, views)\n\t\t\tif ks.tablesInvalidated != nil {\n\t\t\t\tdelete(ks.tablesInvalidated, tableName)\n\t\t\t}\n\t\t}\n\t})\n\tif !applied {\n\t\t// Keyspace was removed between the initial check and the update.\n\t\t// Fall back to a full keyspace refresh to recover.\n\t\treturn s.deduplicatedRefreshKeyspace(keyspaceName)\n\t}\n\treturn nil\n}\n\n// \"compiles\" derived information about keyspace, table, and column metadata\n// for a keyspace from the basic queried metadata objects returned by\n// getKeyspaceMetadata, getTableMetadata, and getColumnMetadata respectively;\n// Links the metadata objects together and derives the column composition of\n// the partition key and clustering key for a table.\nfunc compileMetadata(\n\tkeyspace *KeyspaceMetadata,\n\ttables []TableMetadata,\n\tcolumns []ColumnMetadata,\n\tfunctions []FunctionMetadata,\n\taggregates []AggregateMetadata,\n\ttypes []TypeMetadata,\n\tindexes []IndexMetadata,\n\tviews []ViewMetadata,\n\tcreateStmts []byte,\n) {\n\tkeyspace.Tables = make(map[string]*TableMetadata)\n\tfor i := range tables {\n\t\ttables[i].Columns = make(map[string]*ColumnMetadata)\n\t\tkeyspace.Tables[tables[i].Name] = &tables[i]\n\t}\n\tkeyspace.Functions = make(map[string]*FunctionMetadata, len(functions))\n\tfor i := range functions {\n\t\tkeyspace.Functions[functions[i].Name] = &functions[i]\n\t}\n\tkeyspace.Aggregates = make(map[string]*AggregateMetadata, len(aggregates))\n\tfor _, aggregate := range aggregates {\n\t\taggregate.FinalFunc = *keyspace.Functions[aggregate.finalFunc]\n\t\taggregate.StateFunc = *keyspace.Functions[aggregate.stateFunc]\n\t\tkeyspace.Aggregates[aggregate.Name] = &aggregate\n\t}\n\tkeyspace.Types = make(map[string]*TypeMetadata, len(types))\n\tfor i := range types {\n\t\tkeyspace.Types[types[i].Name] = &types[i]\n\t}\n\tkeyspace.Indexes = make(map[string]*IndexMetadata, len(indexes))\n\tfor i := range indexes {\n\t\tindexes[i].Columns = make(map[string]*ColumnMetadata)\n\t\tkeyspace.Indexes[indexes[i].Name] = &indexes[i]\n\n\t}\n\tkeyspace.Views = make(map[string]*ViewMetadata, len(views))\n\tfor i := range views {\n\t\tv := &views[i]\n\t\tif _, ok := keyspace.Indexes[strings.TrimSuffix(v.ViewName, \"_index\")]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tv.Columns = make(map[string]*ColumnMetadata)\n\t\tkeyspace.Views[v.ViewName] = v\n\t}\n\n\t// add columns from the schema data\n\tfor i := range columns {\n\t\tcol := &columns[i]\n\t\tcol.Order = ASC\n\t\tif col.ClusteringOrder == \"desc\" {\n\t\t\tcol.Order = DESC\n\t\t}\n\n\t\ttable, ok := keyspace.Tables[col.Table]\n\t\tif !ok {\n\t\t\t// If column owned by a table that the table name ends with `_index`\n\t\t\t// suffix then the table is a view corresponding to some index.\n\t\t\tif indexName, found := strings.CutSuffix(col.Table, \"_index\"); found {\n\t\t\t\tix, ok := keyspace.Indexes[indexName]\n\t\t\t\tif ok {\n\t\t\t\t\tix.Columns[col.Name] = col\n\t\t\t\t\tix.OrderedColumns = append(ix.OrderedColumns, col.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tview, ok := keyspace.Views[col.Table]\n\t\t\tif !ok {\n\t\t\t\t// if the schema is being updated we will race between seeing\n\t\t\t\t// the metadata be complete. Potentially we should check for\n\t\t\t\t// schema versions before and after reading the metadata and\n\t\t\t\t// if they dont match try again.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tview.Columns[col.Name] = col\n\t\t\tview.OrderedColumns = append(view.OrderedColumns, col.Name)\n\t\t\tcontinue\n\t\t}\n\n\t\ttable.Columns[col.Name] = col\n\t\ttable.OrderedColumns = append(table.OrderedColumns, col.Name)\n\t}\n\n\tfor i := range tables {\n\t\tt := &tables[i]\n\t\tt.PartitionKey, t.ClusteringColumns, t.OrderedColumns = compileColumns(t.Columns, t.OrderedColumns)\n\t}\n\tfor i := range views {\n\t\tv := &views[i]\n\t\tv.PartitionKey, v.ClusteringColumns, v.OrderedColumns = compileColumns(v.Columns, v.OrderedColumns)\n\t}\n\tfor i := range indexes {\n\t\tix := &indexes[i]\n\t\tix.PartitionKey, ix.ClusteringColumns, ix.OrderedColumns = compileColumns(ix.Columns, ix.OrderedColumns)\n\t}\n\n\tkeyspace.CreateStmts = string(createStmts)\n}\n\nfunc compileTableMetadata(\n\tkeyspace *KeyspaceMetadata,\n\ttables []TableMetadata,\n\tcolumns []ColumnMetadata,\n\tindexes []IndexMetadata,\n\tviews []ViewMetadata,\n) {\n\tif keyspace.Tables == nil {\n\t\tkeyspace.Tables = make(map[string]*TableMetadata)\n\t}\n\tfor i := range tables {\n\t\ttables[i].Columns = make(map[string]*ColumnMetadata)\n\t\tkeyspace.Tables[tables[i].Name] = &tables[i]\n\t}\n\n\tif keyspace.Indexes == nil {\n\t\tkeyspace.Indexes = make(map[string]*IndexMetadata)\n\t}\n\tfor name, ix := range keyspace.Indexes {\n\t\tfor i := range tables {\n\t\t\tif ix.TableName == tables[i].Name {\n\t\t\t\tdelete(keyspace.Indexes, name)\n\t\t\t}\n\t\t}\n\t}\n\tfor i := range indexes {\n\t\tindexes[i].Columns = make(map[string]*ColumnMetadata)\n\t\tkeyspace.Indexes[indexes[i].Name] = &indexes[i]\n\t}\n\n\tif keyspace.Views == nil {\n\t\tkeyspace.Views = make(map[string]*ViewMetadata)\n\t}\n\tfor name, v := range keyspace.Views {\n\t\tfor i := range tables {\n\t\t\tif v.BaseTableName == tables[i].Name {\n\t\t\t\tdelete(keyspace.Views, name)\n\t\t\t}\n\t\t}\n\t}\n\tfor i := range views {\n\t\tv := &views[i]\n\t\tif _, ok := keyspace.Indexes[strings.TrimSuffix(v.ViewName, \"_index\")]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tv.Columns = make(map[string]*ColumnMetadata)\n\t\tkeyspace.Views[v.ViewName] = v\n\t}\n\n\tfor i := range columns {\n\t\tcol := &columns[i]\n\t\tcol.Order = ASC\n\t\tif col.ClusteringOrder == \"desc\" {\n\t\t\tcol.Order = DESC\n\t\t}\n\n\t\ttable, ok := keyspace.Tables[col.Table]\n\t\tif !ok {\n\t\t\tif indexName, found := strings.CutSuffix(col.Table, \"_index\"); found {\n\t\t\t\tix, ok := keyspace.Indexes[indexName]\n\t\t\t\tif ok {\n\t\t\t\t\tix.Columns[col.Name] = col\n\t\t\t\t\tix.OrderedColumns = append(ix.OrderedColumns, col.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tview, ok := keyspace.Views[col.Table]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tview.Columns[col.Name] = col\n\t\t\tview.OrderedColumns = append(view.OrderedColumns, col.Name)\n\t\t\tcontinue\n\t\t}\n\n\t\ttable.Columns[col.Name] = col\n\t\ttable.OrderedColumns = append(table.OrderedColumns, col.Name)\n\t}\n\n\tfor i := range tables {\n\t\tt := &tables[i]\n\t\tt.PartitionKey, t.ClusteringColumns, t.OrderedColumns = compileColumns(t.Columns, t.OrderedColumns)\n\t}\n\tfor i := range views {\n\t\tv := &views[i]\n\t\tv.PartitionKey, v.ClusteringColumns, v.OrderedColumns = compileColumns(v.Columns, v.OrderedColumns)\n\t}\n\tfor i := range indexes {\n\t\tix := &indexes[i]\n\t\tix.PartitionKey, ix.ClusteringColumns, ix.OrderedColumns = compileColumns(ix.Columns, ix.OrderedColumns)\n\t}\n}\n\nfunc compileColumns(columns map[string]*ColumnMetadata, orderedColumns []string) (\n\tpartitionKey, clusteringColumns []*ColumnMetadata, sortedColumns []string) {\n\tclusteringColumnCount := componentColumnCountOfType(columns, ColumnClusteringKey)\n\tclusteringColumns = make([]*ColumnMetadata, clusteringColumnCount)\n\n\tpartitionKeyCount := componentColumnCountOfType(columns, ColumnPartitionKey)\n\tpartitionKey = make([]*ColumnMetadata, partitionKeyCount)\n\n\tvar otherColumns []string\n\tfor _, columnName := range orderedColumns {\n\t\tcolumn := columns[columnName]\n\t\tif column.Kind == ColumnPartitionKey {\n\t\t\tpartitionKey[column.ComponentIndex] = column\n\t\t} else if column.Kind == ColumnClusteringKey {\n\t\t\tclusteringColumns[column.ComponentIndex] = column\n\t\t} else {\n\t\t\totherColumns = append(otherColumns, columnName)\n\t\t}\n\t}\n\n\tsortedColumns = orderedColumns[:0]\n\tfor _, pk := range partitionKey {\n\t\tsortedColumns = append(sortedColumns, pk.Name)\n\t}\n\tfor _, ck := range clusteringColumns {\n\t\tsortedColumns = append(sortedColumns, ck.Name)\n\t}\n\tfor _, oc := range otherColumns {\n\t\tsortedColumns = append(sortedColumns, oc)\n\t}\n\n\treturn\n}\n\n// returns the count of coluns with the given \"kind\" value.\nfunc componentColumnCountOfType(columns map[string]*ColumnMetadata, kind ColumnKind) int {\n\tmaxComponentIndex := -1\n\tfor _, column := range columns {\n\t\tif column.Kind == kind && column.ComponentIndex > maxComponentIndex {\n\t\t\tmaxComponentIndex = column.ComponentIndex\n\t\t}\n\t}\n\treturn maxComponentIndex + 1\n}\n\n// query for keyspace metadata in the system_schema.keyspaces\nfunc getKeyspaceMetadata(session *Session, keyspaceName string) (*KeyspaceMetadata, error) {\n\tif !session.useSystemSchema {\n\t\treturn nil, ErrKeyspaceDoesNotExist\n\t}\n\tkeyspace := &KeyspaceMetadata{Name: keyspaceName}\n\n\tconst stmt = `SELECT durable_writes, replication FROM system_schema.keyspaces WHERE keyspace_name = ?`\n\n\tvar replication map[string]string\n\n\titer := session.control.querySystem(stmt, keyspaceName)\n\tif iter.NumRows() == 0 {\n\t\titer.Close()\n\t\treturn nil, ErrKeyspaceDoesNotExist\n\t}\n\titer.Scan(&keyspace.DurableWrites, &replication)\n\terr := iter.Close()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error querying keyspace schema: %v\", err)\n\t}\n\n\tkeyspace.StrategyClass = replication[\"class\"]\n\tdelete(replication, \"class\")\n\n\tkeyspace.StrategyOptions = make(map[string]any, len(replication))\n\tfor k, v := range replication {\n\t\tkeyspace.StrategyOptions[k] = v\n\t}\n\n\treturn keyspace, nil\n}\n\n// query for table metadata in the system_schema.tables, and system_schema.scylla_tables\n// if connected to ScyllaDB\nfunc getTableMetadata(session *Session, keyspaceName string) ([]TableMetadata, error) {\n\tif !session.useSystemSchema {\n\t\treturn nil, nil\n\t}\n\n\tstmt := `SELECT * FROM system_schema.tables WHERE keyspace_name = ?`\n\titer := session.control.querySystem(stmt, keyspaceName)\n\n\tvar tables []TableMetadata\n\ttable := TableMetadata{Keyspace: keyspaceName}\n\tfor iter.MapScan(map[string]any{\n\t\t\"table_name\":                  &table.Name,\n\t\t\"bloom_filter_fp_chance\":      &table.Options.BloomFilterFpChance,\n\t\t\"caching\":                     &table.Options.Caching,\n\t\t\"comment\":                     &table.Options.Comment,\n\t\t\"compaction\":                  &table.Options.Compaction,\n\t\t\"compression\":                 &table.Options.Compression,\n\t\t\"crc_check_chance\":            &table.Options.CrcCheckChance,\n\t\t\"default_time_to_live\":        &table.Options.DefaultTimeToLive,\n\t\t\"gc_grace_seconds\":            &table.Options.GcGraceSeconds,\n\t\t\"max_index_interval\":          &table.Options.MaxIndexInterval,\n\t\t\"memtable_flush_period_in_ms\": &table.Options.MemtableFlushPeriodInMs,\n\t\t\"min_index_interval\":          &table.Options.MinIndexInterval,\n\t\t\"speculative_retry\":           &table.Options.SpeculativeRetry,\n\t\t\"flags\":                       &table.Flags,\n\t\t\"extensions\":                  &table.Extensions,\n\t}) {\n\t\ttables = append(tables, table)\n\t\ttable = TableMetadata{Keyspace: keyspaceName}\n\t}\n\n\terr := iter.Close()\n\tif err != nil && err != ErrNotFound {\n\t\treturn nil, fmt.Errorf(\"error querying table schema: %v\", err)\n\t}\n\n\tconn := session.getConn()\n\tif conn == nil || !conn.isScyllaConn() {\n\t\treturn tables, nil\n\t}\n\n\t// Fetch all ScyllaDB-specific table properties in a single query\n\t// instead of issuing one query per table (N+1 elimination).\n\tstmt = `SELECT * FROM system_schema.scylla_tables WHERE keyspace_name = ?`\n\titer = session.control.querySystem(stmt, keyspaceName)\n\n\tscyllaOpts := make(map[string]TableMetadataOptions, len(tables))\n\tvar opts TableMetadataOptions\n\tvar tblName string\n\tfor iter.MapScan(map[string]any{\n\t\t\"table_name\":  &tblName,\n\t\t\"cdc\":         &opts.CDC,\n\t\t\"in_memory\":   &opts.InMemory,\n\t\t\"partitioner\": &opts.Partitioner,\n\t\t\"version\":     &opts.Version,\n\t}) {\n\t\tscyllaOpts[tblName] = opts\n\t\topts = TableMetadataOptions{}\n\t\ttblName = \"\"\n\t}\n\tif err := iter.Close(); err != nil && err != ErrNotFound {\n\t\treturn nil, fmt.Errorf(\"error querying scylla table schema: %v\", err)\n\t}\n\n\tfor i, t := range tables {\n\t\tif sopts, ok := scyllaOpts[t.Name]; ok {\n\t\t\ttables[i].Options.CDC = sopts.CDC\n\t\t\ttables[i].Options.InMemory = sopts.InMemory\n\t\t\ttables[i].Options.Partitioner = sopts.Partitioner\n\t\t\ttables[i].Options.Version = sopts.Version\n\t\t}\n\t}\n\n\treturn tables, nil\n}\n\nfunc getTableMetadataByName(session *Session, keyspaceName, tableName string) ([]TableMetadata, error) {\n\tif !session.useSystemSchema {\n\t\treturn nil, nil\n\t}\n\n\tstmt := `SELECT * FROM system_schema.tables WHERE keyspace_name = ? AND table_name = ?`\n\titer := session.control.querySystem(stmt, keyspaceName, tableName)\n\n\tvar tables []TableMetadata\n\ttable := TableMetadata{Keyspace: keyspaceName}\n\tfor iter.MapScan(map[string]any{\n\t\t\"table_name\":                  &table.Name,\n\t\t\"bloom_filter_fp_chance\":      &table.Options.BloomFilterFpChance,\n\t\t\"caching\":                     &table.Options.Caching,\n\t\t\"comment\":                     &table.Options.Comment,\n\t\t\"compaction\":                  &table.Options.Compaction,\n\t\t\"compression\":                 &table.Options.Compression,\n\t\t\"crc_check_chance\":            &table.Options.CrcCheckChance,\n\t\t\"default_time_to_live\":        &table.Options.DefaultTimeToLive,\n\t\t\"gc_grace_seconds\":            &table.Options.GcGraceSeconds,\n\t\t\"max_index_interval\":          &table.Options.MaxIndexInterval,\n\t\t\"memtable_flush_period_in_ms\": &table.Options.MemtableFlushPeriodInMs,\n\t\t\"min_index_interval\":          &table.Options.MinIndexInterval,\n\t\t\"speculative_retry\":           &table.Options.SpeculativeRetry,\n\t\t\"flags\":                       &table.Flags,\n\t\t\"extensions\":                  &table.Extensions,\n\t}) {\n\t\ttables = append(tables, table)\n\t\ttable = TableMetadata{Keyspace: keyspaceName}\n\t}\n\n\terr := iter.Close()\n\tif err != nil && err != ErrNotFound {\n\t\treturn nil, fmt.Errorf(\"error querying table schema: %w\", err)\n\t}\n\n\tif conn := session.getConn(); conn == nil || !conn.isScyllaConn() {\n\t\treturn tables, nil\n\t}\n\n\tstmt = `SELECT * FROM system_schema.scylla_tables WHERE keyspace_name = ? AND table_name = ?`\n\tfor i, t := range tables {\n\t\titer := session.control.querySystem(stmt, keyspaceName, t.Name)\n\n\t\ttable := TableMetadata{}\n\t\tif iter.MapScan(map[string]any{\n\t\t\t\"cdc\":         &table.Options.CDC,\n\t\t\t\"in_memory\":   &table.Options.InMemory,\n\t\t\t\"partitioner\": &table.Options.Partitioner,\n\t\t\t\"version\":     &table.Options.Version,\n\t\t}) {\n\t\t\ttables[i].Options.CDC = table.Options.CDC\n\t\t\ttables[i].Options.Version = table.Options.Version\n\t\t\ttables[i].Options.Partitioner = table.Options.Partitioner\n\t\t\ttables[i].Options.InMemory = table.Options.InMemory\n\t\t}\n\t\tif err := iter.Close(); err != nil && err != ErrNotFound {\n\t\t\treturn nil, fmt.Errorf(\"error querying scylla table schema: %w\", err)\n\t\t}\n\t}\n\n\treturn tables, nil\n}\n\n// columnMetadataColumns lists the columns consumed by getColumnMetadata and\n// getColumnMetadataByTable. Selecting only these columns (instead of SELECT *)\n// avoids deserializing unused fields such as keyspace_name (already known from\n// the WHERE clause) and column_name_bytes (ScyllaDB-specific), which can add\n// over 50 KB of wasted payload per keyspace with 80+ tables.\nconst columnMetadataColumns = `table_name, column_name, clustering_order, type, kind, position`\n\nfunc getColumnMetadataByTable(session *Session, keyspaceName, tableName string) ([]ColumnMetadata, error) {\n\tconst stmt = `SELECT ` + columnMetadataColumns + ` FROM system_schema.columns WHERE keyspace_name = ? AND table_name = ?`\n\n\tvar columns []ColumnMetadata\n\n\titer := session.control.querySystem(stmt, keyspaceName, tableName)\n\tcolumn := ColumnMetadata{Keyspace: keyspaceName}\n\n\tfor iter.MapScan(map[string]any{\n\t\t\"table_name\":       &column.Table,\n\t\t\"column_name\":      &column.Name,\n\t\t\"clustering_order\": &column.ClusteringOrder,\n\t\t\"type\":             &column.Type,\n\t\t\"kind\":             &column.Kind,\n\t\t\"position\":         &column.ComponentIndex,\n\t}) {\n\t\tcolumns = append(columns, column)\n\t\tcolumn = ColumnMetadata{Keyspace: keyspaceName}\n\t}\n\n\tif err := iter.Close(); err != nil && err != ErrNotFound {\n\t\treturn nil, fmt.Errorf(\"error querying column schema: %w\", err)\n\t}\n\n\treturn columns, nil\n}\n\nfunc getIndexMetadataByTable(session *Session, keyspaceName, tableName string) ([]IndexMetadata, error) {\n\tif !session.useSystemSchema {\n\t\treturn nil, nil\n\t}\n\n\tconst stmt = `SELECT * FROM system_schema.indexes WHERE keyspace_name = ? AND table_name = ?`\n\n\tvar indexes []IndexMetadata\n\tindex := IndexMetadata{}\n\n\titer := session.control.querySystem(stmt, keyspaceName, tableName)\n\tfor iter.MapScan(map[string]any{\n\t\t\"index_name\":    &index.Name,\n\t\t\"keyspace_name\": &index.KeyspaceName,\n\t\t\"table_name\":    &index.TableName,\n\t\t\"kind\":          &index.Kind,\n\t\t\"options\":       &index.Options,\n\t}) {\n\t\tindexes = append(indexes, index)\n\t\tindex = IndexMetadata{}\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn indexes, nil\n}\n\nfunc getViewMetadataByTable(session *Session, keyspaceName, tableName string) ([]ViewMetadata, error) {\n\tif !session.useSystemSchema {\n\t\treturn nil, nil\n\t}\n\n\tstmt := `SELECT * FROM system_schema.views WHERE keyspace_name = ? AND base_table_name = ? ALLOW FILTERING`\n\n\titer := session.control.querySystem(stmt, keyspaceName, tableName)\n\n\tvar views []ViewMetadata\n\tview := ViewMetadata{KeyspaceName: keyspaceName}\n\n\tfor iter.MapScan(map[string]any{\n\t\t\"id\":                          &view.ID,\n\t\t\"view_name\":                   &view.ViewName,\n\t\t\"base_table_id\":               &view.BaseTableID,\n\t\t\"base_table_name\":             &view.BaseTableName,\n\t\t\"include_all_columns\":         &view.IncludeAllColumns,\n\t\t\"where_clause\":                &view.WhereClause,\n\t\t\"bloom_filter_fp_chance\":      &view.Options.BloomFilterFpChance,\n\t\t\"caching\":                     &view.Options.Caching,\n\t\t\"comment\":                     &view.Options.Comment,\n\t\t\"compaction\":                  &view.Options.Compaction,\n\t\t\"compression\":                 &view.Options.Compression,\n\t\t\"crc_check_chance\":            &view.Options.CrcCheckChance,\n\t\t\"default_time_to_live\":        &view.Options.DefaultTimeToLive,\n\t\t\"gc_grace_seconds\":            &view.Options.GcGraceSeconds,\n\t\t\"max_index_interval\":          &view.Options.MaxIndexInterval,\n\t\t\"memtable_flush_period_in_ms\": &view.Options.MemtableFlushPeriodInMs,\n\t\t\"min_index_interval\":          &view.Options.MinIndexInterval,\n\t\t\"speculative_retry\":           &view.Options.SpeculativeRetry,\n\t\t\"extensions\":                  &view.Extensions,\n\t\t\"dclocal_read_repair_chance\":  &view.DcLocalReadRepairChance,\n\t\t\"read_repair_chance\":          &view.ReadRepairChance,\n\t}) {\n\t\tviews = append(views, view)\n\t\tview = ViewMetadata{KeyspaceName: keyspaceName}\n\t}\n\n\terr := iter.Close()\n\tif err != nil && err != ErrNotFound {\n\t\treturn nil, fmt.Errorf(\"error querying view schema: %w\", err)\n\t}\n\n\treturn views, nil\n}\n\n// query for column metadata in the system_schema.columns\nfunc getColumnMetadata(session *Session, keyspaceName string) ([]ColumnMetadata, error) {\n\tconst stmt = `SELECT ` + columnMetadataColumns + ` FROM system_schema.columns WHERE keyspace_name = ?`\n\n\tvar columns []ColumnMetadata\n\n\titer := session.control.querySystem(stmt, keyspaceName)\n\tcolumn := ColumnMetadata{Keyspace: keyspaceName}\n\n\tfor iter.MapScan(map[string]any{\n\t\t\"table_name\":       &column.Table,\n\t\t\"column_name\":      &column.Name,\n\t\t\"clustering_order\": &column.ClusteringOrder,\n\t\t\"type\":             &column.Type,\n\t\t\"kind\":             &column.Kind,\n\t\t\"position\":         &column.ComponentIndex,\n\t}) {\n\t\tcolumns = append(columns, column)\n\t\tcolumn = ColumnMetadata{Keyspace: keyspaceName}\n\t}\n\n\tif err := iter.Close(); err != nil && err != ErrNotFound {\n\t\treturn nil, fmt.Errorf(\"error querying column schema: %v\", err)\n\t}\n\n\treturn columns, nil\n}\n\n// query for type metadata in the system_schema.types\nfunc getTypeMetadata(session *Session, keyspaceName string) ([]TypeMetadata, error) {\n\tif !session.useSystemSchema {\n\t\treturn nil, nil\n\t}\n\n\tstmt := `SELECT * FROM system_schema.types WHERE keyspace_name = ?`\n\titer := session.control.querySystem(stmt, keyspaceName)\n\n\tvar types []TypeMetadata\n\ttm := TypeMetadata{Keyspace: keyspaceName}\n\n\tfor iter.MapScan(map[string]any{\n\t\t\"type_name\":   &tm.Name,\n\t\t\"field_names\": &tm.FieldNames,\n\t\t\"field_types\": &tm.FieldTypes,\n\t}) {\n\t\ttypes = append(types, tm)\n\t\ttm = TypeMetadata{Keyspace: keyspaceName}\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn types, nil\n}\n\n// query for function metadata in the system_schema.functions\nfunc getFunctionsMetadata(session *Session, keyspaceName string) ([]FunctionMetadata, error) {\n\tif !session.hasAggregatesAndFunctions || !session.useSystemSchema {\n\t\treturn nil, nil\n\t}\n\tstmt := `SELECT * FROM system_schema.functions WHERE keyspace_name = ?`\n\n\tvar functions []FunctionMetadata\n\tfunction := FunctionMetadata{Keyspace: keyspaceName}\n\n\titer := session.control.querySystem(stmt, keyspaceName)\n\tfor iter.MapScan(map[string]any{\n\t\t\"function_name\":        &function.Name,\n\t\t\"argument_types\":       &function.ArgumentTypes,\n\t\t\"argument_names\":       &function.ArgumentNames,\n\t\t\"body\":                 &function.Body,\n\t\t\"called_on_null_input\": &function.CalledOnNullInput,\n\t\t\"language\":             &function.Language,\n\t\t\"return_type\":          &function.ReturnType,\n\t}) {\n\t\tfunctions = append(functions, function)\n\t\tfunction = FunctionMetadata{Keyspace: keyspaceName}\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn functions, nil\n}\n\n// query for aggregate metadata in the system_schema.aggregates\nfunc getAggregatesMetadata(session *Session, keyspaceName string) ([]AggregateMetadata, error) {\n\tif !session.hasAggregatesAndFunctions || !session.useSystemSchema {\n\t\treturn nil, nil\n\t}\n\n\tconst stmt = `SELECT * FROM system_schema.aggregates WHERE keyspace_name = ?`\n\n\tvar aggregates []AggregateMetadata\n\taggregate := AggregateMetadata{Keyspace: keyspaceName}\n\n\titer := session.control.querySystem(stmt, keyspaceName)\n\tfor iter.MapScan(map[string]any{\n\t\t\"aggregate_name\": &aggregate.Name,\n\t\t\"argument_types\": &aggregate.ArgumentTypes,\n\t\t\"final_func\":     &aggregate.finalFunc,\n\t\t\"initcond\":       &aggregate.InitCond,\n\t\t\"return_type\":    &aggregate.ReturnType,\n\t\t\"state_func\":     &aggregate.stateFunc,\n\t\t\"state_type\":     &aggregate.StateType,\n\t}) {\n\t\taggregates = append(aggregates, aggregate)\n\t\taggregate = AggregateMetadata{Keyspace: keyspaceName}\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn aggregates, nil\n}\n\n// query for index metadata in the system_schema.indexes\nfunc getIndexMetadata(session *Session, keyspaceName string) ([]IndexMetadata, error) {\n\tif !session.useSystemSchema {\n\t\treturn nil, nil\n\t}\n\n\tconst stmt = `SELECT * FROM system_schema.indexes WHERE keyspace_name = ?`\n\n\tvar indexes []IndexMetadata\n\tindex := IndexMetadata{}\n\n\titer := session.control.querySystem(stmt, keyspaceName)\n\tfor iter.MapScan(map[string]any{\n\t\t\"index_name\":    &index.Name,\n\t\t\"keyspace_name\": &index.KeyspaceName,\n\t\t\"table_name\":    &index.TableName,\n\t\t\"kind\":          &index.Kind,\n\t\t\"options\":       &index.Options,\n\t}) {\n\t\tindexes = append(indexes, index)\n\t\tindex = IndexMetadata{}\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn indexes, nil\n}\n\n// get create statements for the keyspace\nfunc getCreateStatements(session *Session, keyspaceName string) ([]byte, error) {\n\tif !session.useSystemSchema {\n\t\treturn nil, nil\n\t}\n\titer := session.control.query(fmt.Sprintf(`DESCRIBE KEYSPACE %s WITH INTERNALS`, keyspaceName))\n\n\tvar createStatements []string\n\n\tvar stmt string\n\tfor iter.Scan(nil, nil, nil, &stmt) {\n\t\tif stmt == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tcreateStatements = append(createStatements, stmt)\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\tif errFrame, ok := err.(frm.ErrorFrame); ok && errFrame.Code == ErrCodeSyntax {\n\t\t\t// DESCRIBE KEYSPACE is not supported on older versions of Cassandra and Scylla\n\t\t\t// For such case schema statement is going to be recreated on the client side\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"error querying keyspace schema: %v\", err)\n\t}\n\n\treturn []byte(strings.Join(createStatements, \"\\n\")), nil\n}\n\n// query for view metadata in the system_schema.views\nfunc getViewMetadata(session *Session, keyspaceName string) ([]ViewMetadata, error) {\n\tif !session.useSystemSchema {\n\t\treturn nil, nil\n\t}\n\n\tstmt := `SELECT * FROM system_schema.views WHERE keyspace_name = ?`\n\n\titer := session.control.querySystem(stmt, keyspaceName)\n\n\tvar views []ViewMetadata\n\tview := ViewMetadata{KeyspaceName: keyspaceName}\n\n\tfor iter.MapScan(map[string]any{\n\t\t\"id\":                          &view.ID,\n\t\t\"view_name\":                   &view.ViewName,\n\t\t\"base_table_id\":               &view.BaseTableID,\n\t\t\"base_table_name\":             &view.BaseTableName,\n\t\t\"include_all_columns\":         &view.IncludeAllColumns,\n\t\t\"where_clause\":                &view.WhereClause,\n\t\t\"bloom_filter_fp_chance\":      &view.Options.BloomFilterFpChance,\n\t\t\"caching\":                     &view.Options.Caching,\n\t\t\"comment\":                     &view.Options.Comment,\n\t\t\"compaction\":                  &view.Options.Compaction,\n\t\t\"compression\":                 &view.Options.Compression,\n\t\t\"crc_check_chance\":            &view.Options.CrcCheckChance,\n\t\t\"default_time_to_live\":        &view.Options.DefaultTimeToLive,\n\t\t\"gc_grace_seconds\":            &view.Options.GcGraceSeconds,\n\t\t\"max_index_interval\":          &view.Options.MaxIndexInterval,\n\t\t\"memtable_flush_period_in_ms\": &view.Options.MemtableFlushPeriodInMs,\n\t\t\"min_index_interval\":          &view.Options.MinIndexInterval,\n\t\t\"speculative_retry\":           &view.Options.SpeculativeRetry,\n\t\t\"extensions\":                  &view.Extensions,\n\t\t\"dclocal_read_repair_chance\":  &view.DcLocalReadRepairChance,\n\t\t\"read_repair_chance\":          &view.ReadRepairChance,\n\t}) {\n\t\tviews = append(views, view)\n\t\tview = ViewMetadata{KeyspaceName: keyspaceName}\n\t}\n\n\terr := iter.Close()\n\tif err != nil && err != ErrNotFound {\n\t\treturn nil, fmt.Errorf(\"error querying view schema: %v\", err)\n\t}\n\n\treturn views, nil\n}\n"
  },
  {
    "path": "metadata_scylla_test.go",
    "content": "//go:build unit\n// +build unit\n\n// Copyright (c) 2015 The gocql Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gocql\n\nimport (\n\t\"testing\"\n)\n\nfunc TestGetKeyspaceMetadataMissingKeyspaceClosesIter(t *testing.T) {\n\tt.Parallel()\n\n\tframer := &trackingMockFramer{}\n\tsession := &Session{\n\t\tuseSystemSchema: true,\n\t\tcontrol: &systemSchemaTestControl{\n\t\t\titer: &Iter{framer: framer},\n\t\t},\n\t}\n\n\t_, err := getKeyspaceMetadata(session, \"missing_keyspace\")\n\n\tif err != ErrKeyspaceDoesNotExist {\n\t\tt.Fatalf(\"getKeyspaceMetadata() error = %v, want %v\", err, ErrKeyspaceDoesNotExist)\n\t}\n\tif !framer.released {\n\t\tt.Fatal(\"expected iterator framer to be released on missing keyspace\")\n\t}\n}\n\n// Tests metadata \"compilation\" from example data which might be returned\n// from metadata schema queries (see getKeyspaceMetadata, getTableMetadata, and getColumnMetadata)\nfunc TestCompileMetadata(t *testing.T) {\n\tt.Parallel()\n\n\tkeyspace := &KeyspaceMetadata{\n\t\tName: \"V2Keyspace\",\n\t}\n\ttables := []TableMetadata{\n\t\t{\n\t\t\tKeyspace: \"V2Keyspace\",\n\t\t\tName:     \"Table1\",\n\t\t},\n\t\t{\n\t\t\tKeyspace: \"V2Keyspace\",\n\t\t\tName:     \"Table2\",\n\t\t},\n\t}\n\tcolumns := []ColumnMetadata{\n\t\t{\n\t\t\tKeyspace:       \"V2Keyspace\",\n\t\t\tTable:          \"Table1\",\n\t\t\tName:           \"KEY1\",\n\t\t\tKind:           ColumnPartitionKey,\n\t\t\tComponentIndex: 0,\n\t\t\tType:           \"text\",\n\t\t},\n\t\t{\n\t\t\tKeyspace:       \"V2Keyspace\",\n\t\t\tTable:          \"Table1\",\n\t\t\tName:           \"Key1\",\n\t\t\tKind:           ColumnPartitionKey,\n\t\t\tComponentIndex: 0,\n\t\t\tType:           \"text\",\n\t\t},\n\t\t{\n\t\t\tKeyspace:       \"V2Keyspace\",\n\t\t\tTable:          \"Table2\",\n\t\t\tName:           \"Column1\",\n\t\t\tKind:           ColumnPartitionKey,\n\t\t\tComponentIndex: 0,\n\t\t\tType:           \"text\",\n\t\t},\n\t\t{\n\t\t\tKeyspace:       \"V2Keyspace\",\n\t\t\tTable:          \"Table2\",\n\t\t\tName:           \"Column2\",\n\t\t\tKind:           ColumnClusteringKey,\n\t\t\tComponentIndex: 0,\n\t\t\tType:           \"text\",\n\t\t},\n\t\t{\n\t\t\tKeyspace:        \"V2Keyspace\",\n\t\t\tTable:           \"Table2\",\n\t\t\tName:            \"Column3\",\n\t\t\tKind:            ColumnClusteringKey,\n\t\t\tComponentIndex:  1,\n\t\t\tType:            \"text\",\n\t\t\tClusteringOrder: \"desc\",\n\t\t},\n\t\t{\n\t\t\tKeyspace: \"V2Keyspace\",\n\t\t\tTable:    \"Table2\",\n\t\t\tName:     \"Column4\",\n\t\t\tKind:     ColumnRegular,\n\t\t\tType:     \"text\",\n\t\t},\n\t\t{\n\t\t\tKeyspace: \"V2Keyspace\",\n\t\t\tTable:    \"view\",\n\t\t\tName:     \"ColReg\",\n\t\t\tKind:     ColumnRegular,\n\t\t\tType:     \"text\",\n\t\t},\n\t\t{\n\t\t\tKeyspace: \"V2Keyspace\",\n\t\t\tTable:    \"view\",\n\t\t\tName:     \"ColCK\",\n\t\t\tKind:     ColumnClusteringKey,\n\t\t\tType:     \"text\",\n\t\t},\n\t\t{\n\t\t\tKeyspace: \"V2Keyspace\",\n\t\t\tTable:    \"view\",\n\t\t\tName:     \"ColPK\",\n\t\t\tKind:     ColumnPartitionKey,\n\t\t\tType:     \"text\",\n\t\t},\n\t\t{\n\t\t\tKeyspace:        \"V2Keyspace\",\n\t\t\tTable:           \"buckets_by_owner_index\",\n\t\t\tName:            \"idx_token\",\n\t\t\tKind:            ColumnClusteringKey,\n\t\t\tComponentIndex:  0,\n\t\t\tType:            \"bigint\",\n\t\t\tClusteringOrder: \"asc\",\n\t\t},\n\t\t{\n\t\t\tKeyspace:        \"V2Keyspace\",\n\t\t\tTable:           \"buckets_by_owner_index\",\n\t\t\tName:            \"name\",\n\t\t\tKind:            ColumnClusteringKey,\n\t\t\tComponentIndex:  1,\n\t\t\tType:            \"text\",\n\t\t\tClusteringOrder: \"asc\",\n\t\t},\n\t\t{\n\t\t\tKeyspace: \"V2Keyspace\",\n\t\t\tTable:    \"buckets_by_owner_index\",\n\t\t\tName:     \"owner\",\n\t\t\tKind:     ColumnPartitionKey,\n\t\t\tType:     \"text\",\n\t\t},\n\t}\n\t// Consider an index by column `owner` on the base table `buckets` with\n\t// partition key `name`.\n\t//\n\t// CREATE INDEX buckets_by_owner ON stg_msk_a.buckets(owner);\n\tindexes := []IndexMetadata{\n\t\t{Name: \"buckets_by_owner\"},\n\t}\n\tviews := []ViewMetadata{\n\t\t{\n\t\t\tKeyspaceName: \"V2Keyspace\",\n\t\t\tViewName:     \"view\",\n\t\t},\n\t\t{\n\t\t\tKeyspaceName: \"V2Keyspace\",\n\t\t\tViewName:     \"buckets_by_owner_index\",\n\t\t},\n\t}\n\tcompileMetadata(keyspace, tables, columns, nil, nil, nil, indexes, views, nil)\n\tassertKeyspaceMetadata(\n\t\tt,\n\t\tkeyspace,\n\t\t&KeyspaceMetadata{\n\t\t\tName: \"V2Keyspace\",\n\t\t\tViews: map[string]*ViewMetadata{\n\t\t\t\t\"view\": {\n\t\t\t\t\tPartitionKey: []*ColumnMetadata{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"ColPK\",\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tClusteringColumns: []*ColumnMetadata{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"ColCK\",\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tOrderedColumns: []string{\n\t\t\t\t\t\t\"ColPK\", \"ColCK\", \"ColReg\",\n\t\t\t\t\t},\n\t\t\t\t\tColumns: map[string]*ColumnMetadata{\n\t\t\t\t\t\t\"ColPK\": {\n\t\t\t\t\t\t\tName: \"ColPK\",\n\t\t\t\t\t\t\tKind: ColumnPartitionKey,\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"ColCK\": {\n\t\t\t\t\t\t\tName: \"ColCK\",\n\t\t\t\t\t\t\tKind: ColumnClusteringKey,\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"ColReg\": {\n\t\t\t\t\t\t\tName: \"ColReg\",\n\t\t\t\t\t\t\tKind: ColumnRegular,\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tTables: map[string]*TableMetadata{\n\t\t\t\t\"Table1\": {\n\t\t\t\t\tPartitionKey: []*ColumnMetadata{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"Key1\",\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tClusteringColumns: []*ColumnMetadata{},\n\t\t\t\t\tColumns: map[string]*ColumnMetadata{\n\t\t\t\t\t\t\"KEY1\": {\n\t\t\t\t\t\t\tName: \"KEY1\",\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t\tKind: ColumnPartitionKey,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"Key1\": {\n\t\t\t\t\t\t\tName: \"Key1\",\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t\tKind: ColumnPartitionKey,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tOrderedColumns: []string{\n\t\t\t\t\t\t\"Key1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"Table2\": {\n\t\t\t\t\tPartitionKey: []*ColumnMetadata{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"Column1\",\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tClusteringColumns: []*ColumnMetadata{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"Column2\",\n\t\t\t\t\t\t\tType:  \"text\",\n\t\t\t\t\t\t\tOrder: ASC,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"Column3\",\n\t\t\t\t\t\t\tType:  \"text\",\n\t\t\t\t\t\t\tOrder: DESC,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tColumns: map[string]*ColumnMetadata{\n\t\t\t\t\t\t\"Column1\": {\n\t\t\t\t\t\t\tName: \"Column1\",\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t\tKind: ColumnPartitionKey,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"Column2\": {\n\t\t\t\t\t\t\tName:  \"Column2\",\n\t\t\t\t\t\t\tType:  \"text\",\n\t\t\t\t\t\t\tOrder: ASC,\n\t\t\t\t\t\t\tKind:  ColumnClusteringKey,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"Column3\": {\n\t\t\t\t\t\t\tName:  \"Column3\",\n\t\t\t\t\t\t\tType:  \"text\",\n\t\t\t\t\t\t\tOrder: DESC,\n\t\t\t\t\t\t\tKind:  ColumnClusteringKey,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"Column4\": {\n\t\t\t\t\t\t\tName: \"Column4\",\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t\tKind: ColumnRegular,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tOrderedColumns: []string{\n\t\t\t\t\t\t\"Column1\", \"Column2\", \"Column3\", \"Column4\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tIndexes: map[string]*IndexMetadata{\n\t\t\t\t\"buckets_by_owner\": {\n\t\t\t\t\tName:      \"buckets_by_owner\",\n\t\t\t\t\tTableName: \"buckets_by_owner_index\",\n\t\t\t\t\tPartitionKey: []*ColumnMetadata{\n\t\t\t\t\t\t{Name: \"owner\", Type: \"text\"},\n\t\t\t\t\t},\n\t\t\t\t\tClusteringColumns: []*ColumnMetadata{\n\t\t\t\t\t\t{Name: \"idx_token\", Type: \"bigint\"},\n\t\t\t\t\t\t{Name: \"name\", Type: \"text\"},\n\t\t\t\t\t},\n\t\t\t\t\tOrderedColumns: []string{\n\t\t\t\t\t\t\"owner\", \"idx_token\", \"name\",\n\t\t\t\t\t},\n\t\t\t\t\tColumns: map[string]*ColumnMetadata{\n\t\t\t\t\t\t\"owner\": {\n\t\t\t\t\t\t\tName: \"owner\",\n\t\t\t\t\t\t\tType: \"text\",\n\t\t\t\t\t\t\tKind: ColumnPartitionKey,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"idx_token\": {\n\t\t\t\t\t\t\tName:  \"idx_token\",\n\t\t\t\t\t\t\tType:  \"bigint\",\n\t\t\t\t\t\t\tOrder: ASC,\n\t\t\t\t\t\t\tKind:  ColumnClusteringKey,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"name\": {\n\t\t\t\t\t\t\tName:  \"name\",\n\t\t\t\t\t\t\tType:  \"text\",\n\t\t\t\t\t\t\tOrder: ASC,\n\t\t\t\t\t\t\tKind:  ColumnClusteringKey,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n}\n\nfunc assertPartitionKey(t *testing.T, keyspaceName, tableName string, actual, expected []*ColumnMetadata) {\n\tif len(expected) != len(actual) {\n\t\tt.Errorf(\"Expected len(%s.Tables[%s].PartitionKey) to be %v but was %v\", keyspaceName, tableName, len(expected), len(actual))\n\t} else {\n\t\tfor i := range expected {\n\t\t\tif expected[i].Name != actual[i].Name {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].PartitionKey[%d].Name to be '%v' but was '%v'\", keyspaceName, tableName, i, expected[i].Name, actual[i].Name)\n\t\t\t}\n\t\t\tif keyspaceName != actual[i].Keyspace {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].PartitionKey[%d].Keyspace to be '%v' but was '%v'\", keyspaceName, tableName, i, keyspaceName, actual[i].Keyspace)\n\t\t\t}\n\t\t\tif tableName != actual[i].Table {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].PartitionKey[%d].Table to be '%v' but was '%v'\", keyspaceName, tableName, i, tableName, actual[i].Table)\n\t\t\t}\n\t\t\tif expected[i].Type != actual[i].Type {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].PartitionKey[%d].Type.Type to be %v but was %v\", keyspaceName, tableName, i, expected[i].Type, actual[i].Type)\n\t\t\t}\n\t\t\tif i != actual[i].ComponentIndex {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].PartitionKey[%d].ComponentIndex to be %v but was %v\", keyspaceName, tableName, i, i, actual[i].ComponentIndex)\n\t\t\t}\n\t\t\tif ColumnPartitionKey != actual[i].Kind {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].PartitionKey[%d].Kind to be '%v' but was '%v'\", keyspaceName, tableName, i, ColumnPartitionKey, actual[i].Kind)\n\t\t\t}\n\t\t}\n\t}\n}\nfunc assertClusteringColumns(t *testing.T, keyspaceName, tableName string, actual, expected []*ColumnMetadata) {\n\tif len(expected) != len(actual) {\n\t\tt.Errorf(\"Expected len(%s.Tables[%s].ClusteringColumns) to be %v but was %v\", keyspaceName, tableName, len(expected), len(actual))\n\t} else {\n\t\tfor i := range expected {\n\t\t\tif actual[i] == nil {\n\t\t\t\tt.Fatalf(\"Unexpected nil value: %s.Tables[%s].ClusteringColumns[%d]\", keyspaceName, tableName, i)\n\t\t\t}\n\t\t\tif expected[i].Name != actual[i].Name {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].ClusteringColumns[%d].Name to be '%v' but was '%v'\", keyspaceName, tableName, i, expected[i].Name, actual[i].Name)\n\t\t\t}\n\t\t\tif keyspaceName != actual[i].Keyspace {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].ClusteringColumns[%d].Keyspace to be '%v' but was '%v'\", keyspaceName, tableName, i, keyspaceName, actual[i].Keyspace)\n\t\t\t}\n\t\t\tif tableName != actual[i].Table {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].ClusteringColumns[%d].Table to be '%v' but was '%v'\", keyspaceName, tableName, i, tableName, actual[i].Table)\n\t\t\t}\n\t\t\tif expected[i].Type != actual[i].Type {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].ClusteringColumns[%d].Type.Type to be %v but was %v\", keyspaceName, tableName, i, expected[i].Type, actual[i].Type)\n\t\t\t}\n\t\t\tif i != actual[i].ComponentIndex {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].ClusteringColumns[%d].ComponentIndex to be %v but was %v\", keyspaceName, tableName, i, i, actual[i].ComponentIndex)\n\t\t\t}\n\t\t\tif expected[i].Order != actual[i].Order {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].ClusteringColumns[%d].Order to be %v but was %v\", keyspaceName, tableName, i, expected[i].Order, actual[i].Order)\n\t\t\t}\n\t\t\tif ColumnClusteringKey != actual[i].Kind {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].ClusteringColumns[%d].Kind to be '%v' but was '%v'\", keyspaceName, tableName, i, ColumnClusteringKey, actual[i].Kind)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc assertColumns(t *testing.T, keyspaceName, tableName string, actual, expected map[string]*ColumnMetadata) {\n\tif len(expected) != len(actual) {\n\t\teKeys := make([]string, 0, len(expected))\n\t\tfor key := range expected {\n\t\t\teKeys = append(eKeys, key)\n\t\t}\n\t\taKeys := make([]string, 0, len(actual))\n\t\tfor key := range actual {\n\t\t\taKeys = append(aKeys, key)\n\t\t}\n\t\tt.Errorf(\"Expected len(%s.Tables[%s].Columns) to be %v (keys:%v) but was %v (keys:%v)\", keyspaceName, tableName, len(expected), eKeys, len(actual), aKeys)\n\t} else {\n\t\tfor keyC := range expected {\n\t\t\tec := expected[keyC]\n\t\t\tac, found := actual[keyC]\n\n\t\t\tif !found {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].Columns[%s] but was not found\", keyspaceName, tableName, keyC)\n\t\t\t} else {\n\t\t\t\tif keyC != ac.Name {\n\t\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].Columns[%s].Name to be '%v' but was '%v'\", keyspaceName, tableName, keyC, keyC, tableName)\n\t\t\t\t}\n\t\t\t\tif keyspaceName != ac.Keyspace {\n\t\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].Columns[%s].Keyspace to be '%v' but was '%v'\", keyspaceName, tableName, keyC, keyspaceName, ac.Keyspace)\n\t\t\t\t}\n\t\t\t\tif tableName != ac.Table {\n\t\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].Columns[%s].Table to be '%v' but was '%v'\", keyspaceName, tableName, keyC, tableName, ac.Table)\n\t\t\t\t}\n\t\t\t\tif ec.Type != ac.Type {\n\t\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].Columns[%s].Type.Type to be %v but was %v\", keyspaceName, tableName, keyC, ec.Type, ac.Type)\n\t\t\t\t}\n\t\t\t\tif ec.Order != ac.Order {\n\t\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].Columns[%s].Order to be %v but was %v\", keyspaceName, tableName, keyC, ec.Order, ac.Order)\n\t\t\t\t}\n\t\t\t\tif ec.Kind != ac.Kind {\n\t\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].Columns[%s].Kind to be '%v' but was '%v'\", keyspaceName, tableName, keyC, ec.Kind, ac.Kind)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc assertOrderedColumns(t *testing.T, keyspaceName, tableName string, actual, expected []string) {\n\tif len(expected) != len(actual) {\n\t\tt.Errorf(\"Expected len(%s.Tables[%s].OrderedColumns to be %v but was %v\", keyspaceName, tableName, len(expected), len(actual))\n\t} else {\n\t\tfor i, eoc := range expected {\n\t\t\taoc := actual[i]\n\t\t\tif eoc != aoc {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].OrderedColumns[%d] to be %s, but was %s\", keyspaceName, tableName, i, eoc, aoc)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc assertTableMetadata(t *testing.T, keyspaceName string, actual, expected map[string]*TableMetadata) {\n\tif len(expected) != len(actual) {\n\t\tt.Errorf(\"Expected len(%s.Tables) to be %v but was %v\", keyspaceName, len(expected), len(actual))\n\t}\n\tfor keyT := range expected {\n\t\tet := expected[keyT]\n\t\tat, found := actual[keyT]\n\n\t\tif !found {\n\t\t\tt.Errorf(\"Expected %s.Tables[%s] but was not found\", keyspaceName, keyT)\n\t\t} else {\n\t\t\tif keyT != at.Name {\n\t\t\t\tt.Errorf(\"Expected %s.Tables[%s].Name to be %v but was %v\", keyspaceName, keyT, keyT, at.Name)\n\t\t\t}\n\t\t\tassertPartitionKey(t, keyspaceName, keyT, at.PartitionKey, et.PartitionKey)\n\t\t\tassertClusteringColumns(t, keyspaceName, keyT, at.ClusteringColumns, et.ClusteringColumns)\n\t\t\tassertColumns(t, keyspaceName, keyT, at.Columns, et.Columns)\n\t\t\tassertOrderedColumns(t, keyspaceName, keyT, at.OrderedColumns, et.OrderedColumns)\n\t\t}\n\t}\n}\n\nfunc assertViewsMetadata(t *testing.T, keyspaceName string, actual, expected map[string]*ViewMetadata) {\n\tif len(expected) != len(actual) {\n\t\tt.Errorf(\"Expected len(%s.Views) to be %v but was %v\", keyspaceName, len(expected), len(actual))\n\t}\n\tfor keyT := range expected {\n\t\tet := expected[keyT]\n\t\tat, found := actual[keyT]\n\n\t\tif !found {\n\t\t\tt.Errorf(\"Expected %s.Views[%s] but was not found\", keyspaceName, keyT)\n\t\t} else {\n\t\t\tif keyT != at.ViewName {\n\t\t\t\tt.Errorf(\"Expected %s.Views[%s].Name to be %v but was %v\", keyspaceName, keyT, keyT, at.ViewName)\n\t\t\t}\n\t\t\tassertPartitionKey(t, keyspaceName, keyT, at.PartitionKey, et.PartitionKey)\n\t\t\tassertClusteringColumns(t, keyspaceName, keyT, at.ClusteringColumns, et.ClusteringColumns)\n\t\t\tassertColumns(t, keyspaceName, keyT, at.Columns, et.Columns)\n\t\t\tassertOrderedColumns(t, keyspaceName, keyT, at.OrderedColumns, et.OrderedColumns)\n\t\t}\n\t}\n}\n\nfunc assertIndicesMetadata(t *testing.T, keyspaceName string, actual, expected map[string]*IndexMetadata) {\n\tif len(expected) != len(actual) {\n\t\tt.Errorf(\"Expected len(%s.Indexes) to be %v but was %v\", keyspaceName, len(expected), len(actual))\n\t}\n\tfor key := range expected {\n\t\tviewName := key + \"_index\"\n\t\tet := expected[key]\n\t\tat, found := actual[key]\n\n\t\tif !found {\n\t\t\tt.Errorf(\"Expected %s.Indexes[%s] but was not found\", keyspaceName, key)\n\t\t} else {\n\t\t\tif et.Name != at.Name {\n\t\t\t\tt.Errorf(\"Expected %s.Indexes[%s].Name to be %v but was %v\", keyspaceName, key, et.Name, at.Name)\n\t\t\t}\n\t\t\tassertPartitionKey(t, keyspaceName, viewName, at.PartitionKey, et.PartitionKey)\n\t\t\tassertClusteringColumns(t, keyspaceName, viewName, at.ClusteringColumns, et.ClusteringColumns)\n\t\t\tassertColumns(t, keyspaceName, viewName, at.Columns, et.Columns)\n\t\t\tassertOrderedColumns(t, keyspaceName, viewName, at.OrderedColumns, et.OrderedColumns)\n\t\t}\n\t}\n}\n\n// Helper function for asserting that actual metadata returned was as expected\nfunc assertKeyspaceMetadata(t *testing.T, actual, expected *KeyspaceMetadata) {\n\tassertTableMetadata(t, expected.Name, actual.Tables, expected.Tables)\n\tassertViewsMetadata(t, expected.Name, actual.Views, expected.Views)\n\tassertIndicesMetadata(t, expected.Name, actual.Indexes, expected.Indexes)\n}\n"
  },
  {
    "path": "policies.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\n// This file will be the future home for more policies\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"math/rand\"\n\trandv2 \"math/rand/v2\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\n// cowHostList implements a copy on write host list, its equivalent type is []*HostInfo\ntype cowHostList struct {\n\tlist atomic.Value\n\tmu   sync.Mutex\n}\n\nfunc (c *cowHostList) String() string {\n\treturn fmt.Sprintf(\"%+v\", c.get())\n}\n\nfunc (c *cowHostList) get() []*HostInfo {\n\t// TODO(zariel): should we replace this with []*HostInfo?\n\tl, ok := c.list.Load().(*[]*HostInfo)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn *l\n}\n\n// add will add a host if it not already in the list\nfunc (c *cowHostList) add(host *HostInfo) bool {\n\tc.mu.Lock()\n\tl := c.get()\n\n\tif n := len(l); n == 0 {\n\t\tl = []*HostInfo{host}\n\t} else {\n\t\tnewL := make([]*HostInfo, n+1)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tif host.Equal(l[i]) {\n\t\t\t\tc.mu.Unlock()\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tnewL[i] = l[i]\n\t\t}\n\t\tnewL[n] = host\n\t\tl = newL\n\t}\n\n\tc.list.Store(&l)\n\tc.mu.Unlock()\n\treturn true\n}\n\nfunc (c *cowHostList) remove(host *HostInfo) bool {\n\tc.mu.Lock()\n\tl := c.get()\n\tsize := len(l)\n\tif size == 0 {\n\t\tc.mu.Unlock()\n\t\treturn false\n\t}\n\n\tfound := false\n\tnewL := make([]*HostInfo, 0, size)\n\tfor i := 0; i < len(l); i++ {\n\t\tif !l[i].Equal(host) {\n\t\t\tnewL = append(newL, l[i])\n\t\t} else {\n\t\t\tfound = true\n\t\t}\n\t}\n\n\tif !found {\n\t\tc.mu.Unlock()\n\t\treturn false\n\t}\n\n\tnewL = newL[: size-1 : size-1]\n\tc.list.Store(&newL)\n\tc.mu.Unlock()\n\n\treturn true\n}\n\n// RetryableQuery is an interface that represents a query or batch statement that\n// exposes the correct functions for the retry policy logic to evaluate correctly.\ntype RetryableQuery interface {\n\tAttempts() int\n\tSetConsistency(c Consistency)\n\tGetConsistency() Consistency\n\tContext() context.Context\n}\n\ntype RetryType uint16\n\nconst (\n\tRetry         RetryType = 0x00 // retry on same connection\n\tRetryNextHost RetryType = 0x01 // retry on another connection\n\tIgnore        RetryType = 0x02 // same as Rethrow\n\tRethrow       RetryType = 0x03 // raise error and stop retrying\n)\n\n// ErrUnknownRetryType is returned if the retry policy returns a retry type\n// unknown to the query executor.\nvar ErrUnknownRetryType = errors.New(\"unknown retry type returned by retry policy\")\n\n// RetryPolicy interface is used by gocql to determine if a query can be attempted\n// again after a retryable error has been received. The interface allows gocql\n// users to implement their own logic to determine if a query can be attempted\n// again.\n//\n// See SimpleRetryPolicy as an example of implementing and using a RetryPolicy\n// interface.\ntype RetryPolicy interface {\n\tAttempt(RetryableQuery) bool\n\tGetRetryType(error) RetryType\n}\n\n// LWTRetryPolicy is a similar interface to RetryPolicy\n// If a query is recognized as an LWT query and its RetryPolicy satisfies this\n// interface, then this interface will be used instead of RetryPolicy.\ntype LWTRetryPolicy interface {\n\tAttemptLWT(RetryableQuery) bool\n\tGetRetryTypeLWT(error) RetryType\n}\n\n// SimpleRetryPolicy has simple logic for attempting a query a fixed number of times.\n//\n// See below for examples of usage:\n//\n//\t//Assign to the cluster\n//\tcluster.RetryPolicy = &gocql.SimpleRetryPolicy{NumRetries: 3}\n//\n//\t//Assign to a query\n//\tquery.RetryPolicy(&gocql.SimpleRetryPolicy{NumRetries: 1})\ntype SimpleRetryPolicy struct {\n\tNumRetries int // Number of times to retry a query\n}\n\n// Attempt tells gocql to attempt the query again based on query.Attempts being less\n// than the NumRetries defined in the policy.\nfunc (s *SimpleRetryPolicy) Attempt(q RetryableQuery) bool {\n\treturn q.Attempts() <= s.NumRetries\n}\n\nfunc (s *SimpleRetryPolicy) AttemptLWT(q RetryableQuery) bool {\n\treturn s.Attempt(q)\n}\n\nfunc (s *SimpleRetryPolicy) GetRetryType(err error) RetryType {\n\tvar executedErr *QueryError\n\tif errors.As(err, &executedErr) && executedErr.PotentiallyExecuted() && !executedErr.IsIdempotent() {\n\t\treturn Rethrow\n\t}\n\treturn RetryNextHost\n}\n\n// Retrying on a different host is fine for normal (non-LWT) queries,\n// but in case of LWTs it will cause Paxos contention and possibly\n// even timeouts if other clients send statements touching the same\n// partition to the original node at the same time.\nfunc (s *SimpleRetryPolicy) GetRetryTypeLWT(err error) RetryType {\n\tvar executedErr *QueryError\n\tif errors.As(err, &executedErr) && executedErr.PotentiallyExecuted() && !executedErr.IsIdempotent() {\n\t\treturn Rethrow\n\t}\n\treturn Retry\n}\n\n// ExponentialBackoffRetryPolicy sleeps between attempts\ntype ExponentialBackoffRetryPolicy struct {\n\tNumRetries int\n\tMin, Max   time.Duration\n}\n\nfunc (e *ExponentialBackoffRetryPolicy) Attempt(q RetryableQuery) bool {\n\tif q.Attempts() > e.NumRetries {\n\t\treturn false\n\t}\n\ttime.Sleep(e.napTime(q.Attempts()))\n\treturn true\n}\n\nfunc (e *ExponentialBackoffRetryPolicy) AttemptLWT(q RetryableQuery) bool {\n\treturn e.Attempt(q)\n}\n\n// used to calculate exponentially growing time\nfunc getExponentialTime(min time.Duration, max time.Duration, attempts int) time.Duration {\n\tif min <= 0 {\n\t\tmin = 100 * time.Millisecond\n\t}\n\tif max <= 0 {\n\t\tmax = 10 * time.Second\n\t}\n\tminFloat := float64(min)\n\tnapDuration := minFloat * math.Pow(2, float64(attempts-1))\n\t// add some jitter\n\tnapDuration += rand.Float64()*minFloat - (minFloat / 2)\n\tif napDuration > float64(max) {\n\t\treturn time.Duration(max)\n\t}\n\treturn time.Duration(napDuration)\n}\n\nfunc (e *ExponentialBackoffRetryPolicy) GetRetryType(err error) RetryType {\n\tvar executedErr *QueryError\n\tif errors.As(err, &executedErr) && executedErr.PotentiallyExecuted() && !executedErr.IsIdempotent() {\n\t\treturn Rethrow\n\t}\n\treturn RetryNextHost\n}\n\n// Retrying on a different host is fine for normal (non-LWT) queries,\n// but in case of LWTs it will cause Paxos contention and possibly\n// even timeouts if other clients send statements touching the same\n// partition to the original node at the same time.\nfunc (e *ExponentialBackoffRetryPolicy) GetRetryTypeLWT(err error) RetryType {\n\tvar executedErr *QueryError\n\tif errors.As(err, &executedErr) && executedErr.PotentiallyExecuted() && !executedErr.IsIdempotent() {\n\t\treturn Rethrow\n\t}\n\treturn Retry\n}\n\n// DowngradingConsistencyRetryPolicy: Next retry will be with the next consistency level\n// provided in the slice\n//\n// On a read timeout: the operation is retried with the next provided consistency\n// level.\n//\n// On a write timeout: if the operation is an :attr:`~.UNLOGGED_BATCH`\n// and at least one replica acknowledged the write, the operation is\n// retried with the next consistency level.  Furthermore, for other\n// write types, if at least one replica acknowledged the write, the\n// timeout is ignored.\n//\n// On an unavailable exception: if at least one replica is alive, the\n// operation is retried with the next provided consistency level.\n\ntype DowngradingConsistencyRetryPolicy struct {\n\tConsistencyLevelsToTry []Consistency\n}\n\nfunc (d *DowngradingConsistencyRetryPolicy) Attempt(q RetryableQuery) bool {\n\tcurrentAttempt := q.Attempts()\n\n\tif currentAttempt > len(d.ConsistencyLevelsToTry) {\n\t\treturn false\n\t} else if currentAttempt > 0 {\n\t\tq.SetConsistency(d.ConsistencyLevelsToTry[currentAttempt-1])\n\t}\n\treturn true\n}\n\nfunc (d *DowngradingConsistencyRetryPolicy) GetRetryType(err error) RetryType {\n\tvar executedErr *QueryError\n\tif errors.As(err, &executedErr) {\n\t\terr = executedErr.err\n\t\tif executedErr.PotentiallyExecuted() && !executedErr.IsIdempotent() {\n\t\t\treturn Rethrow\n\t\t}\n\t}\n\n\tswitch t := err.(type) {\n\tcase *RequestErrUnavailable:\n\t\tif t.Alive > 0 {\n\t\t\treturn Retry\n\t\t}\n\t\treturn Rethrow\n\tcase *RequestErrWriteTimeout:\n\t\tif t.WriteType == \"SIMPLE\" || t.WriteType == \"BATCH\" || t.WriteType == \"COUNTER\" {\n\t\t\tif t.Received > 0 {\n\t\t\t\treturn Ignore\n\t\t\t}\n\t\t\treturn Rethrow\n\t\t}\n\t\tif t.WriteType == \"UNLOGGED_BATCH\" {\n\t\t\treturn Retry\n\t\t}\n\t\treturn Rethrow\n\tcase *RequestErrReadTimeout:\n\t\treturn Retry\n\tdefault:\n\t\treturn RetryNextHost\n\t}\n}\n\nfunc (e *ExponentialBackoffRetryPolicy) napTime(attempts int) time.Duration {\n\treturn getExponentialTime(e.Min, e.Max, attempts)\n}\n\ntype HostStateNotifier interface {\n\tAddHost(host *HostInfo)\n\tRemoveHost(host *HostInfo)\n\tHostUp(host *HostInfo)\n\tHostDown(host *HostInfo)\n}\n\ntype KeyspaceUpdateEvent struct {\n\tKeyspace string\n\tChange   string\n}\n\ntype HostTierer interface {\n\t// HostTier returns an integer specifying how far a host is from the client.\n\t// Tier must start at 0.\n\t// The value is used to prioritize closer hosts during host selection.\n\t// For example this could be:\n\t// 0 - local rack, 1 - local DC, 2 - remote DC\n\t// or:\n\t// 0 - local DC, 1 - remote DC\n\tHostTier(host *HostInfo) uint\n\n\t// This function returns the maximum possible host tier\n\tMaxHostTier() uint\n}\n\n// HostSelectionPolicy is an interface for selecting\n// the most appropriate host to execute a given query.\n// HostSelectionPolicy instances cannot be shared between sessions.\ntype HostSelectionPolicy interface {\n\tHostStateNotifier\n\tSetPartitioner\n\tKeyspaceChanged(KeyspaceUpdateEvent)\n\tInit(*Session)\n\t// Reset is opprotunity to reset HostSelectionPolicy if Session initilization failed and we want to\n\t// call HostSelectionPolicy.Init() again with new Session\n\tReset()\n\tIsLocal(host *HostInfo) bool\n\t// Pick returns an iteration function over selected hosts.\n\t// Multiple attempts of a single query execution won't call the returned NextHost function concurrently,\n\t// so it's safe to have internal state without additional synchronization as long as every call to Pick returns\n\t// a different instance of NextHost.\n\tPick(ExecutableQuery) NextHost\n\t// IsOperational checks if host policy can properly work with given Session/Cluster/ClusterConfig\n\tIsOperational(*Session) error\n}\n\n// SelectedHost is an interface returned when picking a host from a host\n// selection policy.\ntype SelectedHost interface {\n\tInfo() *HostInfo\n\tToken() Token\n\tMark(error)\n}\n\ntype selectedHost struct {\n\tinfo  *HostInfo\n\ttoken Token\n}\n\nfunc (host selectedHost) Info() *HostInfo {\n\treturn host.info\n}\n\nfunc (host selectedHost) Token() Token {\n\treturn host.token\n}\n\nfunc (host selectedHost) Mark(err error) {}\n\nfunc newSingleHost(info *HostInfo, maxRetries byte, retryDelay time.Duration) *singleHost {\n\treturn &singleHost{info: info, maxRetries: maxRetries, delay: retryDelay}\n}\n\ntype singleHost struct {\n\tinfo       *HostInfo\n\tdelay      time.Duration\n\tretry      byte\n\tmaxRetries byte\n}\n\nfunc (s *singleHost) selectHost() SelectedHost {\n\tif s.retry >= s.maxRetries {\n\t\treturn nil\n\t}\n\tif s.retry > 0 && s.delay > 0 {\n\t\ttime.Sleep(s.delay)\n\t}\n\ts.retry++\n\treturn s\n}\n\nfunc (s singleHost) Info() *HostInfo { return s.info }\n\nfunc (s singleHost) Token() Token { return nil }\n\nfunc (s singleHost) Mark(error) {}\n\n// NextHost is an iteration function over picked hosts\ntype NextHost func() SelectedHost\n\n// RoundRobinHostPolicy is a round-robin load balancing policy, where each host\n// is tried sequentially for each query.\nfunc RoundRobinHostPolicy() HostSelectionPolicy {\n\treturn &roundRobinHostPolicy{}\n}\n\ntype roundRobinHostPolicy struct {\n\thosts           cowHostList\n\tlastUsedHostIdx uint64\n}\n\nfunc (r *roundRobinHostPolicy) IsLocal(*HostInfo) bool              { return true }\nfunc (r *roundRobinHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {}\nfunc (r *roundRobinHostPolicy) SetPartitioner(partitioner string)   {}\nfunc (r *roundRobinHostPolicy) Init(*Session)                       {}\nfunc (r *roundRobinHostPolicy) Reset()                              {}\nfunc (r *roundRobinHostPolicy) IsOperational(*Session) error        { return nil }\n\nfunc (r *roundRobinHostPolicy) Pick(qry ExecutableQuery) NextHost {\n\tnextStartOffset := atomic.AddUint64(&r.lastUsedHostIdx, 1)\n\treturn roundRobbin(int(nextStartOffset), r.hosts.get())\n}\n\nfunc (r *roundRobinHostPolicy) AddHost(host *HostInfo) {\n\tr.hosts.add(host)\n}\n\nfunc (r *roundRobinHostPolicy) RemoveHost(host *HostInfo) {\n\tr.hosts.remove(host)\n}\n\nfunc (r *roundRobinHostPolicy) HostUp(host *HostInfo) {\n\tr.AddHost(host)\n}\n\nfunc (r *roundRobinHostPolicy) HostDown(host *HostInfo) {\n\tr.RemoveHost(host)\n}\n\nfunc ShuffleReplicas() func(*tokenAwareHostPolicy) {\n\treturn func(t *tokenAwareHostPolicy) {\n\t\tt.shuffleReplicas = true\n\t}\n}\n\nfunc DontShuffleReplicas() func(*tokenAwareHostPolicy) {\n\treturn func(t *tokenAwareHostPolicy) {\n\t\tt.shuffleReplicas = false\n\t}\n}\n\n// AvoidSlowReplicas enabled avoiding slow replicas\n//\n// TokenAwareHostPolicy normally does not check how busy replica is, with avoidSlowReplicas enabled it avoids replicas\n// if they have equal or more than MAX_IN_FLIGHT_THRESHOLD requests in flight\nfunc AvoidSlowReplicas(max_in_flight_threshold int) func(policy *tokenAwareHostPolicy) {\n\treturn func(t *tokenAwareHostPolicy) {\n\t\tt.avoidSlowReplicas = true\n\t\tMAX_IN_FLIGHT_THRESHOLD = max_in_flight_threshold\n\t}\n}\n\n// NonLocalReplicasFallback enables fallback to replicas that are not considered local.\n//\n// TokenAwareHostPolicy used with DCAwareHostPolicy fallback first selects replicas by partition key in local DC, then\n// falls back to other nodes in the local DC. Enabling NonLocalReplicasFallback causes TokenAwareHostPolicy\n// to first select replicas by partition key in local DC, then replicas by partition key in remote DCs and fall back\n// to other nodes in local DC.\nfunc NonLocalReplicasFallback() func(policy *tokenAwareHostPolicy) {\n\treturn func(t *tokenAwareHostPolicy) {\n\t\tt.nonLocalReplicasFallback = true\n\t}\n}\n\n// TokenAwareHostPolicy is a token aware host selection policy, where hosts are\n// selected based on the partition key, so queries are sent to the host which\n// owns the partition. Fallback is used when routing information is not available.\nfunc TokenAwareHostPolicy(fallback HostSelectionPolicy, opts ...func(*tokenAwareHostPolicy)) HostSelectionPolicy {\n\tp := &tokenAwareHostPolicy{\n\t\tfallback:        fallback,\n\t\tshuffleReplicas: true,\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\treturn p\n}\n\n// clusterMeta holds metadata about cluster topology.\n// It is used inside atomic.Value and shallow copies are used when replacing it,\n// so fields should not be modified in-place. Instead, to modify a field a copy of the field should be made\n// and the pointer in clusterMeta updated to point to the new value.\ntype clusterMeta struct {\n\t// replicas is map[keyspace]map[token]hosts\n\treplicas  map[string]tokenRingReplicas\n\ttokenRing *tokenRing\n}\n\nvar MAX_IN_FLIGHT_THRESHOLD int = 10\n\ntype tokenAwareHostPolicy struct {\n\tfallback HostSelectionPolicy\n\t// atomic store for *clusterMeta\n\tmetadata            atomic.Value\n\tlogger              StdLogger\n\tgetKeyspaceMetadata func(keyspace string) (*KeyspaceMetadata, error)\n\tgetKeyspaceName     func() string\n\thosts               cowHostList\n\tpartitioner         string\n\t// mu protects writes to hosts, partitioner, metadata.\n\t// reads can be unlocked as long as they are not used for updating state later.\n\tmu                       sync.Mutex\n\tshuffleReplicas          bool\n\tnonLocalReplicasFallback bool\n\tavoidSlowReplicas        bool\n}\n\nfunc (t *tokenAwareHostPolicy) Init(s *Session) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tif t.getKeyspaceMetadata != nil {\n\t\t// Init was already called.\n\t\t// See https://github.com/scylladb/gocql/issues/94.\n\t\tpanic(\"sharing token aware host selection policy between sessions is not supported\")\n\t}\n\tt.getKeyspaceMetadata = func(keyspace string) (*KeyspaceMetadata, error) {\n\t\tif keyspace == \"\" {\n\t\t\treturn nil, ErrNoKeyspace\n\t\t}\n\t\treturn s.metadataDescriber.GetKeyspace(keyspace)\n\t}\n\tt.getKeyspaceName = func() string { return s.cfg.Keyspace }\n\tt.logger = s.logger\n}\n\nfunc (t *tokenAwareHostPolicy) Reset() {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\t// Sharing token aware host selection policy between sessions is not supported\n\t// but session initialization can failed for some reasons. So in our application\n\t// may be we want to create new session again.\n\t// Reset method should be called in Session.Close method\n\tt.getKeyspaceMetadata = nil\n\tt.getKeyspaceName = nil\n\tt.logger = nil\n}\n\nfunc (t *tokenAwareHostPolicy) IsOperational(session *Session) error {\n\treturn t.fallback.IsOperational(session)\n}\n\nfunc (t *tokenAwareHostPolicy) IsLocal(host *HostInfo) bool {\n\treturn t.fallback.IsLocal(host)\n}\n\nfunc (t *tokenAwareHostPolicy) KeyspaceChanged(update KeyspaceUpdateEvent) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tmeta := t.getMetadataForUpdate()\n\tt.updateReplicas(meta, update.Keyspace)\n\tt.metadata.Store(meta)\n}\n\n// updateReplicas updates replicas in clusterMeta.\n// It must be called with t.mu mutex locked.\n// meta must not be nil and it's replicas field will be updated.\nfunc (t *tokenAwareHostPolicy) updateReplicas(meta *clusterMeta, keyspace string) {\n\tif keyspace == \"\" || meta == nil || meta.tokenRing == nil {\n\t\treturn\n\t}\n\tnewReplicas := make(map[string]tokenRingReplicas, len(meta.replicas))\n\n\tks, err := t.getKeyspaceMetadata(keyspace)\n\tif err == nil {\n\t\tstrat := getStrategy(ks, t.logger)\n\t\tif strat != nil {\n\t\t\tnewReplicas[keyspace] = strat.replicaMap(meta.tokenRing)\n\t\t}\n\t}\n\n\tfor ks, replicas := range meta.replicas {\n\t\tif ks != keyspace {\n\t\t\tnewReplicas[ks] = replicas\n\t\t}\n\t}\n\n\tmeta.replicas = newReplicas\n}\n\nfunc (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tif t.partitioner != partitioner {\n\t\tt.fallback.SetPartitioner(partitioner)\n\t\tt.partitioner = partitioner\n\t\tmeta := t.getMetadataForUpdate()\n\t\tmeta.resetTokenRing(t.partitioner, t.hosts.get(), t.logger)\n\t\tt.updateReplicas(meta, t.getKeyspaceName())\n\t\tt.metadata.Store(meta)\n\t}\n}\n\nfunc (t *tokenAwareHostPolicy) AddHost(host *HostInfo) {\n\tt.mu.Lock()\n\tif t.hosts.add(host) {\n\t\tmeta := t.getMetadataForUpdate()\n\t\tmeta.resetTokenRing(t.partitioner, t.hosts.get(), t.logger)\n\t\tt.updateReplicas(meta, t.getKeyspaceName())\n\t\tt.metadata.Store(meta)\n\t}\n\tt.mu.Unlock()\n\n\tt.fallback.AddHost(host)\n}\n\nfunc (t *tokenAwareHostPolicy) AddHosts(hosts []*HostInfo) {\n\tt.mu.Lock()\n\n\tfor _, host := range hosts {\n\t\tt.hosts.add(host)\n\t}\n\n\tmeta := t.getMetadataForUpdate()\n\tmeta.resetTokenRing(t.partitioner, t.hosts.get(), t.logger)\n\tt.updateReplicas(meta, t.getKeyspaceName())\n\tt.metadata.Store(meta)\n\n\tt.mu.Unlock()\n\n\tfor _, host := range hosts {\n\t\tt.fallback.AddHost(host)\n\t}\n}\n\nfunc (t *tokenAwareHostPolicy) RemoveHost(host *HostInfo) {\n\tt.mu.Lock()\n\tif t.hosts.remove(host) {\n\t\tmeta := t.getMetadataForUpdate()\n\t\tmeta.resetTokenRing(t.partitioner, t.hosts.get(), t.logger)\n\t\tt.updateReplicas(meta, t.getKeyspaceName())\n\t\tt.metadata.Store(meta)\n\t}\n\tt.mu.Unlock()\n\n\tt.fallback.RemoveHost(host)\n}\n\nfunc (t *tokenAwareHostPolicy) HostUp(host *HostInfo) {\n\tt.fallback.HostUp(host)\n}\n\nfunc (t *tokenAwareHostPolicy) HostDown(host *HostInfo) {\n\tt.fallback.HostDown(host)\n}\n\n// getMetadataReadOnly returns current cluster metadata.\n// Metadata uses copy on write, so the returned value should be only used for reading.\n// To obtain a copy that could be updated, use getMetadataForUpdate instead.\nfunc (t *tokenAwareHostPolicy) getMetadataReadOnly() *clusterMeta {\n\tmeta, _ := t.metadata.Load().(*clusterMeta)\n\treturn meta\n}\n\n// getMetadataForUpdate returns clusterMeta suitable for updating.\n// It is a SHALLOW copy of current metadata in case it was already set or new empty clusterMeta otherwise.\n// This function should be called with t.mu mutex locked and the mutex should not be released before\n// storing the new metadata.\nfunc (t *tokenAwareHostPolicy) getMetadataForUpdate() *clusterMeta {\n\tmetaReadOnly := t.getMetadataReadOnly()\n\tmeta := new(clusterMeta)\n\tif metaReadOnly != nil {\n\t\t*meta = *metaReadOnly\n\t}\n\treturn meta\n}\n\n// resetTokenRing creates a new tokenRing.\n// It must be called with t.mu locked.\nfunc (m *clusterMeta) resetTokenRing(partitioner string, hosts []*HostInfo, logger StdLogger) {\n\tif partitioner == \"\" {\n\t\t// partitioner not yet set\n\t\treturn\n\t}\n\n\t// create a new token ring\n\ttokenRing, err := newTokenRing(partitioner, hosts)\n\tif err != nil {\n\t\tlogger.Printf(\"Unable to update the token ring due to error: %s\", err)\n\t\treturn\n\t}\n\n\t// replace the token ring\n\tm.tokenRing = tokenRing\n}\n\n// hostSet is a small set optimized for tracking hosts returned by the\n// token-aware iterator. Uses an inline array for RF <= 9 (3 DCs × RF=3),\n// spilling to a map for larger replica sets.\ntype hostSet struct {\n\toverflow map[*HostInfo]struct{}\n\tarr      [9]*HostInfo\n\tn        int\n}\n\nfunc (s *hostSet) add(h *HostInfo) {\n\tif s.n < len(s.arr) {\n\t\ts.arr[s.n] = h\n\t\ts.n++\n\t\treturn\n\t}\n\tif s.overflow == nil {\n\t\ts.overflow = make(map[*HostInfo]struct{})\n\t\tfor i := range s.n {\n\t\t\ts.overflow[s.arr[i]] = struct{}{}\n\t\t}\n\t}\n\ts.overflow[h] = struct{}{}\n}\n\nfunc (s *hostSet) contains(h *HostInfo) bool {\n\tif s.overflow != nil {\n\t\t_, ok := s.overflow[h]\n\t\treturn ok\n\t}\n\tfor i := range s.n {\n\t\tif s.arr[i] == h {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// shuffleHostsInPlace shuffles the given slice in-place using math/rand/v2.\nfunc shuffleHostsInPlace(hosts []*HostInfo) {\n\trandv2.Shuffle(len(hosts), func(i, j int) {\n\t\thosts[i], hosts[j] = hosts[j], hosts[i]\n\t})\n}\n\n// partitionHealthy performs an in-place stable partition of replicas, moving\n// healthy (non-busy) hosts to the front while preserving relative order.\nfunc partitionHealthy(replicas []*HostInfo, s *Session) {\n\tn := len(replicas)\n\tif n <= 1 {\n\t\treturn\n\t}\n\n\t// Snapshot IsBusy to avoid TOCTOU races between counting and placement.\n\tvar busyBuf [9]bool\n\tvar busy []bool\n\tif n <= len(busyBuf) {\n\t\tbusy = busyBuf[:n]\n\t} else {\n\t\tbusy = make([]bool, n)\n\t}\n\n\thealthyCount := 0\n\tfor i, h := range replicas {\n\t\tbusy[i] = h.IsBusy(s)\n\t\tif !busy[i] {\n\t\t\thealthyCount++\n\t\t}\n\t}\n\n\tif healthyCount == 0 || healthyCount == n {\n\t\treturn // all same category, nothing to do\n\t}\n\n\tvar buf [9]*HostInfo\n\tvar tmp []*HostInfo\n\tif n <= len(buf) {\n\t\ttmp = buf[:n]\n\t} else {\n\t\ttmp = make([]*HostInfo, n)\n\t}\n\tcopy(tmp, replicas)\n\n\thi, ui := 0, healthyCount\n\tfor i, h := range tmp {\n\t\tif !busy[i] {\n\t\t\treplicas[hi] = h\n\t\t\thi++\n\t\t} else {\n\t\t\treplicas[ui] = h\n\t\t\tui++\n\t\t}\n\t}\n}\n\nfunc (t *tokenAwareHostPolicy) Pick(qry ExecutableQuery) NextHost {\n\tif qry == nil {\n\t\treturn t.fallback.Pick(qry)\n\t}\n\n\troutingKey, err := qry.GetRoutingKey()\n\tif err != nil {\n\t\treturn t.fallback.Pick(qry)\n\t} else if routingKey == nil {\n\t\treturn t.fallback.Pick(qry)\n\t}\n\n\tmeta := t.getMetadataReadOnly()\n\tif meta == nil || meta.tokenRing == nil {\n\t\treturn t.fallback.Pick(qry)\n\t}\n\n\tpartitioner := qry.GetCustomPartitioner()\n\tif partitioner == nil {\n\t\tpartitioner = meta.tokenRing.partitioner\n\t}\n\n\ttoken := partitioner.Hash(routingKey)\n\ttokenCasted, isInt64Token := token.(int64Token)\n\n\tvar replicas []*HostInfo\n\n\tif session := qry.GetSession(); session != nil && session.tabletsRoutingV1 && isInt64Token {\n\t\ttabletReplicas := session.findTabletReplicasUnsafeForToken(qry.Keyspace(), qry.Table(), int64(tokenCasted))\n\t\tif len(tabletReplicas) != 0 {\n\t\t\thosts := t.hosts.get()\n\t\t\tfor _, replica := range tabletReplicas {\n\t\t\t\tfor _, host := range hosts {\n\t\t\t\t\tif host.hostId == UUID(replica.HostUUIDValue()) {\n\t\t\t\t\t\treplicas = append(replicas, host)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(replicas) == 0 {\n\t\tht := meta.replicas[qry.Keyspace()].replicasFor(token)\n\t\tif ht != nil {\n\t\t\tneedsMutation := t.shuffleReplicas || t.avoidSlowReplicas\n\t\t\tif needsMutation {\n\t\t\t\treplicas = make([]*HostInfo, len(ht.hosts))\n\t\t\t\tcopy(replicas, ht.hosts)\n\t\t\t} else {\n\t\t\t\t// Zero-copy: replicas must not be mutated below unless needsMutation is true.\n\t\t\t\treplicas = ht.hosts\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(replicas) == 0 {\n\t\thost, _ := meta.tokenRing.GetHostForToken(token)\n\t\treplicas = []*HostInfo{host}\n\t}\n\n\tif t.shuffleReplicas && !qry.IsLWT() && len(replicas) > 1 {\n\t\tshuffleHostsInPlace(replicas)\n\t}\n\n\tif s := qry.GetSession(); s != nil && !qry.IsLWT() && t.avoidSlowReplicas {\n\t\tpartitionHealthy(replicas, s)\n\t}\n\n\tvar (\n\t\tfallbackIter NextHost\n\t\ti, j, k      int\n\t\tremote       [][]*HostInfo\n\t\ttierer       HostTierer\n\t\ttiererOk     bool\n\t\tmaxTier      uint\n\t)\n\n\tif tierer, tiererOk = t.fallback.(HostTierer); tiererOk {\n\t\tmaxTier = tierer.MaxHostTier()\n\t} else {\n\t\tmaxTier = 1\n\t}\n\n\tif t.nonLocalReplicasFallback {\n\t\tremote = make([][]*HostInfo, maxTier)\n\t}\n\n\tvar used hostSet\n\treturn func() SelectedHost {\n\t\tfor i < len(replicas) {\n\t\t\th := replicas[i]\n\t\t\ti++\n\n\t\t\tvar tier uint\n\t\t\tif tiererOk {\n\t\t\t\ttier = tierer.HostTier(h)\n\t\t\t} else if t.fallback.IsLocal(h) {\n\t\t\t\ttier = 0\n\t\t\t} else {\n\t\t\t\ttier = 1\n\t\t\t}\n\n\t\t\tif tier != 0 {\n\t\t\t\tif t.nonLocalReplicasFallback {\n\t\t\t\t\tremote[tier-1] = append(remote[tier-1], h)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif h.IsUp() {\n\t\t\t\tused.add(h)\n\t\t\t\treturn selectedHost{info: h, token: token}\n\t\t\t}\n\t\t}\n\n\t\tif t.nonLocalReplicasFallback {\n\t\t\tfor j < len(remote) && k < len(remote[j]) {\n\t\t\t\th := remote[j][k]\n\t\t\t\tk++\n\n\t\t\t\tif k >= len(remote[j]) {\n\t\t\t\t\tj++\n\t\t\t\t\tk = 0\n\t\t\t\t}\n\n\t\t\t\tif h.IsUp() {\n\t\t\t\t\tused.add(h)\n\t\t\t\t\treturn selectedHost{info: h, token: token}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif fallbackIter == nil {\n\t\t\t// fallback\n\t\t\tfallbackIter = t.fallback.Pick(qry)\n\t\t}\n\n\t\t// filter the token aware selected hosts from the fallback hosts\n\t\tfor fallbackHost := fallbackIter(); fallbackHost != nil; fallbackHost = fallbackIter() {\n\t\t\tif !used.contains(fallbackHost.Info()) {\n\t\t\t\tused.add(fallbackHost.Info())\n\t\t\t\treturn fallbackHost\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\ntype dcAwareRR struct {\n\tlocal             string\n\tlocalHosts        cowHostList\n\tremoteHosts       cowHostList\n\tlastUsedHostIdx   uint64\n\tdisableDCFailover bool\n}\n\ntype dcFailoverDisabledPolicy interface {\n\tsetDCFailoverDisabled()\n}\n\ntype dcAwarePolicyOption func(p dcFailoverDisabledPolicy)\n\nfunc HostPolicyOptionDisableDCFailover(p dcFailoverDisabledPolicy) {\n\tp.setDCFailoverDisabled()\n}\n\n// DCAwareRoundRobinPolicy is a host selection policies which will prioritize and\n// return hosts which are in the local datacentre before returning hosts in all\n// other datercentres\nfunc DCAwareRoundRobinPolicy(localDC string, opts ...dcAwarePolicyOption) HostSelectionPolicy {\n\tp := &dcAwareRR{local: localDC, disableDCFailover: false}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\treturn p\n}\n\nfunc (d *dcAwareRR) setDCFailoverDisabled() {\n\td.disableDCFailover = true\n}\nfunc (d *dcAwareRR) Init(*Session)                       {}\nfunc (d *dcAwareRR) Reset()                              {}\nfunc (d *dcAwareRR) KeyspaceChanged(KeyspaceUpdateEvent) {}\nfunc (d *dcAwareRR) SetPartitioner(p string)             {}\n\nfunc (d *dcAwareRR) IsOperational(session *Session) error {\n\tif session.cfg.disableInit || session.cfg.disableControlConn {\n\t\treturn nil\n\t}\n\n\thosts := session.hostSource.getHostsList()\n\tfor _, host := range hosts {\n\t\tif !session.cfg.filterHost(host) && host.DataCenter() == d.local {\n\t\t\t// Policy can work properly only if there is at least one host from target DC\n\t\t\t// No need to check host status, since it could be down due to the outage\n\t\t\t// We only need to make sure that policy is not misconfigured with wrong DC\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"gocql: datacenter %s in the policy was not found in the topology - probable DC aware policy misconfiguration\", d.local)\n}\n\nfunc (d *dcAwareRR) IsLocal(host *HostInfo) bool {\n\treturn host.DataCenter() == d.local\n}\n\nfunc (d *dcAwareRR) AddHost(host *HostInfo) {\n\tif d.IsLocal(host) {\n\t\td.localHosts.add(host)\n\t} else {\n\t\td.remoteHosts.add(host)\n\t}\n}\n\nfunc (d *dcAwareRR) RemoveHost(host *HostInfo) {\n\tif d.IsLocal(host) {\n\t\td.localHosts.remove(host)\n\t} else {\n\t\td.remoteHosts.remove(host)\n\t}\n}\n\nfunc (d *dcAwareRR) HostUp(host *HostInfo)   { d.AddHost(host) }\nfunc (d *dcAwareRR) HostDown(host *HostInfo) { d.RemoveHost(host) }\n\n// This function is supposed to be called in a fashion\n// roundRobbin(offset, hostsPriority1, hostsPriority2, hostsPriority3 ... )\n//\n// E.g. for DC-naive strategy:\n// roundRobbin(offset, allHosts)\n//\n// For tiered and DC-aware strategy:\n// roundRobbin(offset, localHosts, remoteHosts)\nfunc roundRobbin(shift int, hosts ...[]*HostInfo) NextHost {\n\tcurrentLayer := 0\n\tcurrentlyObserved := 0\n\n\treturn func() SelectedHost {\n\t\t// iterate over layers\n\t\tfor {\n\t\t\tif currentLayer == len(hosts) {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tcurrentLayerSize := len(hosts[currentLayer])\n\n\t\t\t// iterate over hosts within a layer\n\t\t\tfor {\n\t\t\t\tcurrentlyObserved++\n\t\t\t\tif currentlyObserved > currentLayerSize {\n\t\t\t\t\tcurrentLayer++\n\t\t\t\t\tcurrentlyObserved = 0\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\th := hosts[currentLayer][(shift+currentlyObserved)%currentLayerSize]\n\n\t\t\t\tif h.IsUp() {\n\t\t\t\t\treturn selectedHost{info: h}\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *dcAwareRR) Pick(q ExecutableQuery) NextHost {\n\tnextStartOffset := atomic.AddUint64(&d.lastUsedHostIdx, 1)\n\tif d.disableDCFailover {\n\t\treturn roundRobbin(int(nextStartOffset), d.localHosts.get())\n\t}\n\treturn roundRobbin(int(nextStartOffset), d.localHosts.get(), d.remoteHosts.get())\n}\n\n// RackAwareRoundRobinPolicy is a host selection policies which will prioritize and\n// return hosts which are in the local rack, before hosts in the local datacenter but\n// a different rack, before hosts in all other datacenters\n\ntype rackAwareRR struct {\n\tlocalDC   string\n\tlocalRack string\n\thosts     []cowHostList\n\t// lastUsedHostIdx keeps the index of the last used host.\n\t// It is accessed atomically and needs to be aligned to 64 bits, so we\n\t// keep it first in the struct. Do not move it or add new struct members\n\t// before it.\n\tlastUsedHostIdx   uint64\n\tdisableDCFailover bool\n}\n\nfunc RackAwareRoundRobinPolicy(localDC string, localRack string, opts ...dcAwarePolicyOption) HostSelectionPolicy {\n\tp := &rackAwareRR{localDC: localDC, localRack: localRack, hosts: make([]cowHostList, 3), disableDCFailover: false}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\treturn p\n}\n\nfunc (d *rackAwareRR) Init(*Session)                       {}\nfunc (d *rackAwareRR) Reset()                              {}\nfunc (d *rackAwareRR) KeyspaceChanged(KeyspaceUpdateEvent) {}\nfunc (d *rackAwareRR) SetPartitioner(p string)             {}\n\nfunc (d *rackAwareRR) IsOperational(session *Session) error {\n\tif session.cfg.disableInit || session.cfg.disableControlConn {\n\t\treturn nil\n\t}\n\thosts := session.hostSource.getHostsList()\n\tfor _, host := range hosts {\n\t\tif !session.cfg.filterHost(host) && host.DataCenter() == d.localDC && host.Rack() == d.localRack {\n\t\t\t// Policy can work properly only if there is at least one host from target DC+Rack\n\t\t\t// No need to check host status, since it could be down due to the outage\n\t\t\t// We only need to make sure that policy is not misconfigured with wrong DC+Rack\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"gocql: rack %s/%s was not found in the topology - probable Rack aware policy misconfiguration\", d.localDC, d.localRack)\n}\n\nfunc (d *rackAwareRR) MaxHostTier() uint {\n\treturn 2\n}\n\nfunc (d *rackAwareRR) setDCFailoverDisabled() {\n\td.disableDCFailover = true\n}\n\nfunc (d *rackAwareRR) HostTier(host *HostInfo) uint {\n\tif host.DataCenter() == d.localDC {\n\t\tif host.Rack() == d.localRack {\n\t\t\treturn 0\n\t\t} else {\n\t\t\treturn 1\n\t\t}\n\t} else {\n\t\treturn 2\n\t}\n}\n\nfunc (d *rackAwareRR) IsLocal(host *HostInfo) bool {\n\treturn d.HostTier(host) == 0\n}\n\nfunc (d *rackAwareRR) AddHost(host *HostInfo) {\n\tdist := d.HostTier(host)\n\td.hosts[dist].add(host)\n}\n\nfunc (d *rackAwareRR) RemoveHost(host *HostInfo) {\n\tdist := d.HostTier(host)\n\td.hosts[dist].remove(host)\n}\n\nfunc (d *rackAwareRR) HostUp(host *HostInfo)   { d.AddHost(host) }\nfunc (d *rackAwareRR) HostDown(host *HostInfo) { d.RemoveHost(host) }\n\nfunc (d *rackAwareRR) Pick(q ExecutableQuery) NextHost {\n\tnextStartOffset := atomic.AddUint64(&d.lastUsedHostIdx, 1)\n\tif d.disableDCFailover {\n\t\treturn roundRobbin(int(nextStartOffset), d.hosts[0].get(), d.hosts[1].get())\n\t}\n\treturn roundRobbin(int(nextStartOffset), d.hosts[0].get(), d.hosts[1].get(), d.hosts[2].get())\n}\n\n// ReadyPolicy defines a policy for when a HostSelectionPolicy can be used. After\n// each host connects during session initialization, the Ready method will be\n// called. If you only need a single Host to be up you can wrap a\n// HostSelectionPolicy policy with SingleHostReadyPolicy.\ntype ReadyPolicy interface {\n\tReady() bool\n}\n\n// SingleHostReadyPolicy wraps a HostSelectionPolicy and returns Ready after a\n// single host has been added via HostUp\nfunc SingleHostReadyPolicy(p HostSelectionPolicy) *singleHostReadyPolicy {\n\treturn &singleHostReadyPolicy{\n\t\tHostSelectionPolicy: p,\n\t}\n}\n\ntype singleHostReadyPolicy struct {\n\tHostSelectionPolicy\n\tready    bool\n\treadyMux sync.Mutex\n}\n\nfunc (s *singleHostReadyPolicy) HostUp(host *HostInfo) {\n\ts.HostSelectionPolicy.HostUp(host)\n\n\ts.readyMux.Lock()\n\ts.ready = true\n\ts.readyMux.Unlock()\n}\n\nfunc (s *singleHostReadyPolicy) Ready() bool {\n\ts.readyMux.Lock()\n\tready := s.ready\n\ts.readyMux.Unlock()\n\tif !ready {\n\t\treturn false\n\t}\n\n\t// in case the wrapped policy is also a ReadyPolicy, defer to that\n\tif rdy, ok := s.HostSelectionPolicy.(ReadyPolicy); ok {\n\t\treturn rdy.Ready()\n\t}\n\treturn true\n}\n\n// ConvictionPolicy interface is used by gocql to determine if a host should be\n// marked as DOWN based on the error and host info\ntype ConvictionPolicy interface {\n\t// Implementations should return `true` if the host should be convicted, `false` otherwise.\n\tAddFailure(error error, host *HostInfo) bool\n\t// Implementations should clear out any convictions or state regarding the host.\n\tReset(host *HostInfo)\n}\n\n// SimpleConvictionPolicy implements a ConvictionPolicy which convicts all hosts\n// regardless of error\ntype SimpleConvictionPolicy struct{}\n\nfunc (e *SimpleConvictionPolicy) AddFailure(error error, host *HostInfo) bool {\n\treturn true\n}\n\nfunc (e *SimpleConvictionPolicy) Reset(host *HostInfo) {}\n\n// ReconnectionPolicy interface is used by gocql to determine if reconnection\n// can be attempted after connection error. The interface allows gocql users\n// to implement their own logic to determine how to attempt reconnection.\ntype ReconnectionPolicy interface {\n\tGetInterval(currentRetry int) time.Duration\n\tGetMaxRetries() int\n}\n\n// NoReconnectionPolicy is a policy to have no retry.\n//\n// Examples of usage:\n//\n//\tcluster.InitialReconnectionPolicy = &NoReconnectionPolicy{}\ntype NoReconnectionPolicy struct {\n}\n\nfunc (c *NoReconnectionPolicy) GetInterval(currentRetry int) time.Duration {\n\treturn time.Duration(0)\n}\n\nfunc (c *NoReconnectionPolicy) GetMaxRetries() int {\n\treturn 1\n}\n\n// ConstantReconnectionPolicy has simple logic for returning a fixed reconnection interval.\n//\n// Examples of usage:\n//\n//\tcluster.ReconnectionPolicy = &gocql.ConstantReconnectionPolicy{MaxRetries: 10, Interval: 8 * time.Second}\ntype ConstantReconnectionPolicy struct {\n\tMaxRetries int\n\tInterval   time.Duration\n}\n\nfunc (c *ConstantReconnectionPolicy) GetInterval(currentRetry int) time.Duration {\n\treturn c.Interval\n}\n\nfunc (c *ConstantReconnectionPolicy) GetMaxRetries() int {\n\treturn c.MaxRetries\n}\n\n// ExponentialReconnectionPolicy returns a growing reconnection interval.\ntype ExponentialReconnectionPolicy struct {\n\tMaxRetries      int\n\tInitialInterval time.Duration\n\tMaxInterval     time.Duration\n}\n\nfunc (e *ExponentialReconnectionPolicy) GetInterval(currentRetry int) time.Duration {\n\tmax := e.MaxInterval\n\tif max < e.InitialInterval {\n\t\tmax = math.MaxInt16 * time.Second\n\t}\n\treturn getExponentialTime(e.InitialInterval, max, currentRetry)\n}\n\nfunc (e *ExponentialReconnectionPolicy) GetMaxRetries() int {\n\treturn e.MaxRetries\n}\n\ntype SpeculativeExecutionPolicy interface {\n\tAttempts() int\n\tDelay() time.Duration\n}\n\ntype NonSpeculativeExecution struct{}\n\n// defaultNonSpecExec is a package-level singleton that avoids allocating a new\n// NonSpeculativeExecution every time a Query or Batch is initialised.\nvar defaultNonSpecExec SpeculativeExecutionPolicy = &NonSpeculativeExecution{}\n\nfunc (sp NonSpeculativeExecution) Attempts() int        { return 0 } // No additional attempts\nfunc (sp NonSpeculativeExecution) Delay() time.Duration { return 1 } // The delay. Must be positive to be used in a ticker.\n\ntype SimpleSpeculativeExecution struct {\n\tNumAttempts  int\n\tTimeoutDelay time.Duration\n}\n\nfunc (sp *SimpleSpeculativeExecution) Attempts() int        { return sp.NumAttempts }\nfunc (sp *SimpleSpeculativeExecution) Delay() time.Duration { return sp.TimeoutDelay }\n"
  },
  {
    "path": "policies_bench_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql/tablets\"\n)\n\n// setupTabletAwareBench creates a tokenAwareHostPolicy with the given number\n// of hosts and tablets, wired up to a mock session with tabletsRoutingV1.\n// It returns the policy, session, and a slice of pre-built queries that hit\n// different tokens spread across the tablet range.\nfunc setupTabletAwareBench(b *testing.B, numHosts, numTablets, rf int) (HostSelectionPolicy, *Session, []*Query) {\n\tb.Helper()\n\n\tconst keyspace = \"benchks\"\n\tconst table = \"benchtbl\"\n\n\tpolicy := TokenAwareHostPolicy(RoundRobinHostPolicy())\n\tpolicyInternal := policy.(*tokenAwareHostPolicy)\n\tpolicyInternal.getKeyspaceName = func() string { return keyspace }\n\n\tpolicyInternal.getKeyspaceMetadata = func(ks string) (*KeyspaceMetadata, error) {\n\t\treturn &KeyspaceMetadata{\n\t\t\tName:          keyspace,\n\t\t\tStrategyClass: \"SimpleStrategy\",\n\t\t\tStrategyOptions: map[string]any{\n\t\t\t\t\"class\":              \"SimpleStrategy\",\n\t\t\t\t\"replication_factor\": rf,\n\t\t\t},\n\t\t}, nil\n\t}\n\n\t// Create hosts with binary UUIDs (matching what tablets use).\n\thosts := make([]*HostInfo, numHosts)\n\tfor i := range hosts {\n\t\thosts[i] = &HostInfo{\n\t\t\thostId:         tUUID(i),\n\t\t\tconnectAddress: net.IPv4(10, 0, byte(i>>8), byte(i)),\n\t\t\ttokens:         []string{fmt.Sprintf(\"%d\", int64(math.MinInt64)+int64(i)*100)},\n\t\t}\n\t\tpolicy.AddHost(hosts[i])\n\t}\n\tpolicy.SetPartitioner(\"Murmur3Partitioner\")\n\tpolicy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: keyspace})\n\n\t// Set up mock session with tablet routing.\n\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\ts := newSchemaEventTestSession(ctrl, &trackingPolicy{}, \"\")\n\ts.useSystemSchema = true\n\ts.isInitialized = true\n\ts.tabletsRoutingV1 = true\n\n\t// Build tablets covering the full token range, each with `rf` replicas\n\t// drawn round-robin from the host list.\n\tstep := uint64(math.MaxUint64) / uint64(numTablets)\n\tfirstToken := int64(math.MinInt64)\n\n\ttabletList := make(tablets.TabletInfoList, numTablets)\n\tfor i := 0; i < numTablets; i++ {\n\t\tlastToken := firstToken + int64(step)\n\t\tif i == numTablets-1 {\n\t\t\tlastToken = math.MaxInt64\n\t\t}\n\n\t\treps := make([][]any, rf)\n\t\tfor r := 0; r < rf; r++ {\n\t\t\thostIdx := (i + r) % numHosts\n\t\t\treps[r] = []any{hosts[hostIdx].hostId, 0}\n\t\t}\n\t\tti, err := tablets.TabletInfoBuilder{\n\t\t\tKeyspaceName: keyspace,\n\t\t\tTableName:    table,\n\t\t\tFirstToken:   firstToken,\n\t\t\tLastToken:    lastToken,\n\t\t\tReplicas:     reps,\n\t\t}.Build()\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\ttabletList[i] = ti\n\t\tfirstToken = lastToken\n\t}\n\n\ts.metadataDescriber.metadata.tabletsMetadata.BulkAddTablets(tabletList)\n\ts.metadataDescriber.metadata.tabletsMetadata.Flush()\n\n\t// Pre-build queries that hit evenly spaced tokens.\n\tconst numQueries = 256\n\tqueries := make([]*Query, numQueries)\n\ttokenStep := uint64(math.MaxUint64) / uint64(numQueries)\n\tfor i := range queries {\n\t\ttoken := int64(math.MinInt64) + int64(uint64(i)*tokenStep) + 1\n\t\tqueries[i] = &Query{\n\t\t\troutingInfo: &queryRoutingInfo{\n\t\t\t\tkeyspace:    keyspace,\n\t\t\t\ttable:       table,\n\t\t\t\tpartitioner: fixedInt64Partitioner(token),\n\t\t\t},\n\t\t\tsession: s,\n\t\t}\n\t\tqueries[i].getKeyspace = func() string { return keyspace }\n\t\tqueries[i].routingKey = []byte(\"key\")\n\t}\n\n\treturn policy, s, queries\n}\n\n// BenchmarkTabletAwarePick benchmarks the full tokenAwareHostPolicy.Pick()\n// path with tablet routing, varying the number of hosts in the cluster.\n// This measures the O(RF * H) host resolution loop in Pick().\nfunc BenchmarkTabletAwarePick(b *testing.B) {\n\tfor _, numHosts := range []int{10, 50, 100} {\n\t\tb.Run(fmt.Sprintf(\"Hosts%d\", numHosts), func(b *testing.B) {\n\t\t\tconst numTablets = 10000\n\t\t\tconst rf = 3\n\t\t\tpolicy, s, queries := setupTabletAwareBench(b, numHosts, numTablets, rf)\n\t\t\tdefer s.Close()\n\n\t\t\truntime.GC()\n\t\t\tb.ResetTimer()\n\t\t\tb.ReportAllocs()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tqry := queries[i%len(queries)]\n\t\t\t\titer := policy.Pick(qry)\n\t\t\t\t// Consume the first host (happy path — one NextHost call).\n\t\t\t\th := iter()\n\t\t\t\tif h == nil {\n\t\t\t\t\tb.Fatal(\"Pick returned nil on first call\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkTabletAwarePickAllReplicas benchmarks exhausting all replicas\n// from Pick(), simulating the retry/unhappy path.\nfunc BenchmarkTabletAwarePickAllReplicas(b *testing.B) {\n\tfor _, numHosts := range []int{10, 50, 100} {\n\t\tb.Run(fmt.Sprintf(\"Hosts%d\", numHosts), func(b *testing.B) {\n\t\t\tconst numTablets = 10000\n\t\t\tconst rf = 3\n\t\t\tpolicy, s, queries := setupTabletAwareBench(b, numHosts, numTablets, rf)\n\t\t\tdefer s.Close()\n\n\t\t\truntime.GC()\n\t\t\tb.ResetTimer()\n\t\t\tb.ReportAllocs()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tqry := queries[i%len(queries)]\n\t\t\t\titer := policy.Pick(qry)\n\t\t\t\t// Exhaust all hosts from the iterator.\n\t\t\t\tfor h := iter(); h != nil; h = iter() {\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkHostIdComparison is a micro-benchmark for isolated host-ID\n// comparisons: string==string (current) baseline.\nfunc BenchmarkHostIdComparison(b *testing.B) {\n\tid1 := \"00000000-0000-0000-0000-000000000001\"\n\tid2 := \"00000000-0000-0000-0000-000000000001\"\n\n\tb.Run(\"StringEqual\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tif id1 != id2 {\n\t\t\t\tb.Fatal(\"should be equal\")\n\t\t\t}\n\t\t}\n\t})\n\n\t// Benchmark with UUIDs for comparison (what the proposed change uses).\n\tuuid1 := UUID{}\n\tuuid1[15] = 1\n\tuuid2 := UUID{}\n\tuuid2[15] = 1\n\n\tb.Run(\"UUIDEqual\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tif uuid1 != uuid2 {\n\t\t\t\tb.Fatal(\"should be equal\")\n\t\t\t}\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "policies_integration_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n)\n\n// Check if session fail to start if DC name provided in the policy is wrong\nfunc TestDCValidationTokenAware(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\n\tfallback := DCAwareRoundRobinPolicy(\"WRONG_DC\")\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(fallback)\n\n\t_, err := cluster.CreateSession()\n\tif err == nil {\n\t\tt.Fatal(\"createSession was expected to fail with wrong DC name provided.\")\n\t}\n}\n\nfunc TestDCValidationDCAware(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\tcluster.PoolConfig.HostSelectionPolicy = DCAwareRoundRobinPolicy(\"WRONG_DC\")\n\n\t_, err := cluster.CreateSession()\n\tif err == nil {\n\t\tt.Fatal(\"createSession was expected to fail with wrong DC name provided.\")\n\t}\n}\n\nfunc TestDCValidationRackAware(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\tcluster.PoolConfig.HostSelectionPolicy = RackAwareRoundRobinPolicy(\"WRONG_DC\", \"RACK\")\n\n\t_, err := cluster.CreateSession()\n\tif err == nil {\n\t\tt.Fatal(\"createSession was expected to fail with wrong DC name provided.\")\n\t}\n}\n\nfunc TestTokenAwareHostPolicy(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"keyspace\", func(t *testing.T) {\n\t\tks := testKeyspaceName(t)\n\t\tcreateKeyspace(t, createCluster(), ks, false)\n\n\t\tpolicy := TokenAwareHostPolicy(RoundRobinHostPolicy())\n\t\ttokenPolicy := policy.(*tokenAwareHostPolicy)\n\t\tcluster := createCluster()\n\t\tcluster.Keyspace = ks\n\t\tcluster.PoolConfig.HostSelectionPolicy = policy\n\t\ttestIfPolicyInitializedProperly(t, cluster, tokenPolicy)\n\t})\n\n\tt.Run(\"no-keyspace\", func(t *testing.T) {\n\t\tpolicy := TokenAwareHostPolicy(RoundRobinHostPolicy())\n\t\ttokenPolicy := policy.(*tokenAwareHostPolicy)\n\t\tcluster := createCluster()\n\t\tcluster.PoolConfig.HostSelectionPolicy = policy\n\t\ttestIfPolicyInitializedProperly(t, cluster, tokenPolicy)\n\t})\n}\n\nfunc testIfPolicyInitializedProperly(t *testing.T, cluster *ClusterConfig, policy *tokenAwareHostPolicy) {\n\t_, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"faled to create session: %v\", err)\n\t}\n\tmd := policy.getMetadataReadOnly()\n\tif md == nil {\n\t\tt.Fatalf(\"tokenAwareHostPolicy has no metadata\")\n\t}\n\tif len(md.tokenRing.tokens) == 0 {\n\t\tt.Fatalf(\"tokenAwareHostPolicy metadata has no tokens\")\n\t}\n\tif len(md.tokenRing.hosts) == 0 {\n\t\tt.Fatalf(\"tokenAwareHostPolicy metadata has no hosts\")\n\t}\n\tif md.tokenRing.partitioner == nil {\n\t\tt.Fatalf(\"tokenAwareHostPolicy metadata has no partitioner\")\n\t}\n\tif cluster.Keyspace != \"\" {\n\t\tif len(md.replicas[cluster.Keyspace]) == 0 {\n\t\t\tt.Fatalf(\"tokenAwareHostPolicy metadata has no replicas in target keyspace\")\n\t\t}\n\t}\n}\n\n// TestNoHangAllHostsDown ensures that when all hosts are down, the query execution does not hang.\n// WARNING: This test must NOT use t.Parallel(). It sets ALL hosts to NodeDown state,\n// which mutates shared HostInfo objects visible to all concurrent sessions.\n//\n//nolint:paralleltest // mutates shared HostInfo state (sets all hosts to NodeDown)\nfunc TestNoHangAllHostsDown(t *testing.T) {\n\tcluster := createCluster()\n\tsession := createSessionFromCluster(cluster, t)\n\n\thosts := session.GetHosts()\n\tdc := hosts[0].DataCenter()\n\track := hosts[0].Rack()\n\tsession.Close()\n\n\tpolicies := []HostSelectionPolicy{\n\t\tDCAwareRoundRobinPolicy(dc),\n\t\tDCAwareRoundRobinPolicy(dc, HostPolicyOptionDisableDCFailover),\n\t\tTokenAwareHostPolicy(DCAwareRoundRobinPolicy(dc)),\n\t\tTokenAwareHostPolicy(DCAwareRoundRobinPolicy(dc, HostPolicyOptionDisableDCFailover)),\n\t\tRackAwareRoundRobinPolicy(dc, rack),\n\t\tRackAwareRoundRobinPolicy(dc, rack, HostPolicyOptionDisableDCFailover),\n\t\tTokenAwareHostPolicy(RackAwareRoundRobinPolicy(dc, rack)),\n\t\tTokenAwareHostPolicy(RackAwareRoundRobinPolicy(dc, rack, HostPolicyOptionDisableDCFailover)),\n\t\tnil,\n\t}\n\n\tfor _, policy := range policies {\n\t\tcluster = createCluster()\n\t\tcluster.PoolConfig.HostSelectionPolicy = policy\n\t\tsession = createSessionFromCluster(cluster, t)\n\t\thosts = session.GetHosts()\n\n\t\t// simulating hosts are down\n\t\tfor _, host := range hosts {\n\t\t\tpool, _ := session.pool.getPoolByHostID(host.HostID())\n\t\t\tpool.host.setState(NodeDown)\n\t\t\tif policy != nil {\n\t\t\t\tpolicy.AddHost(host)\n\t\t\t}\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), 12*time.Second)\n\t\tdefer cancel()\n\t\t_ = session.Query(\"SELECT host_id FROM system.local\").WithContext(ctx).Exec()\n\t\tif ctx.Err() != nil {\n\t\t\tt.Errorf(\"policy %T should be no hangups when all hosts are down\", policy)\n\t\t}\n\n\t\t// remove all host except one\n\t\tif policy != nil {\n\t\t\tfor i, host := range hosts {\n\t\t\t\tif i != 0 {\n\t\t\t\t\tpolicy.RemoveHost(host)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tctx, cancel2 := context.WithTimeout(context.Background(), 12*time.Second)\n\t\tdefer cancel2()\n\t\t_ = session.Query(\"SELECT host_id FROM system.local\").WithContext(ctx).Exec()\n\t\tif ctx.Err() != nil {\n\t\t\tt.Errorf(\"policy %T should be no hangups when all hosts are down\", policy)\n\t\t}\n\t\tsession.Close()\n\t}\n}\n"
  },
  {
    "path": "policies_test.go",
    "content": "// Copyright (c) 2015 The gocql Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n\t\"github.com/gocql/gocql/tablets\"\n\n\t\"github.com/google/go-cmp/cmp\"\n)\n\n// tUUID returns a deterministic UUID for testing. Byte 0 is always set to\n// a non-zero sentinel (0xFE) so that even tUUID(0) is distinguishable from\n// the zero UUID, and the last two bytes encode n.\nfunc tUUID(n int) UUID {\n\tvar u UUID\n\tu[0] = 0xFE\n\tu[14] = byte(n >> 8)\n\tu[15] = byte(n)\n\treturn u\n}\n\n// tID returns the string representation of tUUID(n), suitable for passing to\n// expectHosts and other string-based comparisons.\nfunc tID(n int) string {\n\treturn tUUID(n).String()\n}\n\n// Tests of the round-robin host selection policy implementation\nfunc TestRoundRobbin(t *testing.T) {\n\tt.Parallel()\n\n\tpolicy := RoundRobinHostPolicy()\n\n\thosts := [...]*HostInfo{\n\t\t{hostId: tUUID(0), connectAddress: net.IPv4(0, 0, 0, 1)},\n\t\t{hostId: tUUID(1), connectAddress: net.IPv4(0, 0, 0, 2)},\n\t}\n\n\tfor _, host := range hosts {\n\t\tpolicy.AddHost(host)\n\t}\n\n\tgot := make(map[UUID]bool)\n\tit := policy.Pick(nil)\n\tfor h := it(); h != nil; h = it() {\n\t\tid := h.Info().hostId\n\t\tif got[id] {\n\t\t\tt.Fatalf(\"got duplicate host: %v\", id)\n\t\t}\n\t\tgot[id] = true\n\t}\n\tif len(got) != len(hosts) {\n\t\tt.Fatalf(\"expected %d hosts got %d\", len(hosts), len(got))\n\t}\n}\n\nfunc TestRoundRobbinSameConnectAddress(t *testing.T) {\n\tt.Parallel()\n\n\tpolicy := RoundRobinHostPolicy()\n\n\thosts := [...]*HostInfo{\n\t\t{hostId: tUUID(0), connectAddress: net.IPv4(0, 0, 0, 1), port: 9042},\n\t\t{hostId: tUUID(1), connectAddress: net.IPv4(0, 0, 0, 1), port: 9043},\n\t}\n\n\tfor _, host := range hosts {\n\t\tpolicy.AddHost(host)\n\t}\n\n\tgot := make(map[UUID]bool)\n\tit := policy.Pick(nil)\n\tfor h := it(); h != nil; h = it() {\n\t\tid := h.Info().hostId\n\t\tif got[id] {\n\t\t\tt.Fatalf(\"got duplicate host: %v\", id)\n\t\t}\n\t\tgot[id] = true\n\t}\n\tif len(got) != len(hosts) {\n\t\tt.Fatalf(\"expected %d hosts got %d\", len(hosts), len(got))\n\t}\n}\n\n// Tests of the token-aware host selection policy implementation with a\n// round-robin host selection policy fallback.\nfunc TestHostPolicy_TokenAware_SimpleStrategy(t *testing.T) {\n\tt.Parallel()\n\n\tconst keyspace = \"myKeyspace\"\n\tpolicy := TokenAwareHostPolicy(RoundRobinHostPolicy())\n\tpolicyInternal := policy.(*tokenAwareHostPolicy)\n\tpolicyInternal.getKeyspaceName = func() string { return keyspace }\n\tpolicyInternal.getKeyspaceMetadata = func(ks string) (*KeyspaceMetadata, error) {\n\t\treturn nil, errors.New(\"not initalized\")\n\t}\n\n\tquery := &Query{routingInfo: &queryRoutingInfo{}}\n\tquery.getKeyspace = func() string { return keyspace }\n\n\titer := policy.Pick(nil)\n\tif iter == nil {\n\t\tt.Fatal(\"host iterator was nil\")\n\t}\n\tactual := iter()\n\tif actual != nil {\n\t\tt.Fatalf(\"expected nil from iterator, but was %v\", actual)\n\t}\n\n\t// set the hosts\n\thosts := [...]*HostInfo{\n\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"00\"}},\n\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"25\"}},\n\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"50\"}},\n\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"75\"}},\n\t}\n\tfor _, host := range &hosts {\n\t\tpolicy.AddHost(host)\n\t}\n\n\tpolicy.SetPartitioner(\"OrderedPartitioner\")\n\n\tpolicyInternal.getKeyspaceMetadata = func(keyspaceName string) (*KeyspaceMetadata, error) {\n\t\tif keyspaceName != keyspace {\n\t\t\treturn nil, fmt.Errorf(\"unknown keyspace: %s\", keyspaceName)\n\t\t}\n\t\treturn &KeyspaceMetadata{\n\t\t\tName:          keyspace,\n\t\t\tStrategyClass: \"SimpleStrategy\",\n\t\t\tStrategyOptions: map[string]any{\n\t\t\t\t\"class\":              \"SimpleStrategy\",\n\t\t\t\t\"replication_factor\": 2,\n\t\t\t},\n\t\t}, nil\n\t}\n\tpolicy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: keyspace})\n\n\t// The SimpleStrategy above should generate the following replicas.\n\t// It's handy to have as reference here.\n\ttests.AssertDeepEqual(t, \"replicas\", map[string]tokenRingReplicas{\n\t\t\"myKeyspace\": {\n\t\t\t{orderedToken(\"00\"), []*HostInfo{hosts[0], hosts[1]}},\n\t\t\t{orderedToken(\"25\"), []*HostInfo{hosts[1], hosts[2]}},\n\t\t\t{orderedToken(\"50\"), []*HostInfo{hosts[2], hosts[3]}},\n\t\t\t{orderedToken(\"75\"), []*HostInfo{hosts[3], hosts[0]}},\n\t\t},\n\t}, policyInternal.getMetadataReadOnly().replicas)\n\n\t// now the token ring is configured\n\tquery.RoutingKey([]byte(\"20\"))\n\titer = policy.Pick(query)\n\t// shuffling is enabled by default, expecfing\n\texpectHosts(t, \"hosts[0]\", iter, tID(1), tID(2))\n\t// then rest of the hosts\n\texpectHosts(t, \"rest\", iter, tID(0), tID(3))\n\texpectNoMoreHosts(t, iter)\n}\n\nfunc TestHostPolicy_TokenAware_LWT_DisablesHostShuffling(t *testing.T) {\n\tt.Parallel()\n\n\ttests := map[string]struct {\n\t\thosts      []*HostInfo\n\t\troutingKey string\n\t\tlwt        bool\n\t\tshuffle    bool\n\t\twant       []string\n\t}{\n\t\t\"token 08 shuffling configured\": {hosts: []*HostInfo{\n\t\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(4), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t\t{hostId: tUUID(5), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t}, routingKey: \"8\", lwt: true, shuffle: true, want: []string{tID(0), tID(2), tID(3), tID(4), tID(5), tID(1)}},\n\t\t\"token 08 shuffling not configured\": {hosts: []*HostInfo{\n\t\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(4), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t\t{hostId: tUUID(5), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t}, routingKey: \"8\", lwt: true, shuffle: false, want: []string{tID(0), tID(2), tID(3), tID(4), tID(5), tID(1)}},\n\t\t\"token 30 shuffling configured\": {hosts: []*HostInfo{\n\t\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(4), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t\t{hostId: tUUID(5), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t}, routingKey: \"30\", lwt: true, shuffle: true, want: []string{tID(1), tID(3), tID(2), tID(4), tID(5), tID(0)}},\n\t\t\"token 30 shuffling not configured\": {hosts: []*HostInfo{\n\t\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(4), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t\t{hostId: tUUID(5), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t}, routingKey: \"30\", lwt: true, shuffle: false, want: []string{tID(1), tID(3), tID(2), tID(4), tID(5), tID(0)}},\n\t\t\"token 55 shuffling configured\": {hosts: []*HostInfo{\n\t\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(4), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t\t{hostId: tUUID(5), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t}, routingKey: \"55\", lwt: true, shuffle: true, want: []string{tID(4), tID(5), tID(2), tID(3), tID(0), tID(1)}},\n\t\t\"token 55 shuffling not configured\": {hosts: []*HostInfo{\n\t\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"00\", \"10\", \"20\"}},\n\t\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"25\", \"35\", \"45\"}},\n\t\t\t{hostId: tUUID(4), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t\t{hostId: tUUID(5), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"50\", \"60\", \"70\"}},\n\t\t}, routingKey: \"55\", lwt: true, shuffle: false, want: []string{tID(4), tID(5), tID(2), tID(3), tID(0), tID(1)}},\n\t}\n\tconst keyspace = \"myKeyspace\"\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tpolicy := createPolicy(keyspace, tc.shuffle)\n\t\t\tfor _, host := range tc.hosts {\n\t\t\t\tpolicy.AddHost(host)\n\t\t\t}\n\t\t\tquery := &Query{\n\t\t\t\troutingKey:  []byte(tc.routingKey),\n\t\t\t\troutingInfo: &queryRoutingInfo{lwt: tc.lwt},\n\t\t\t}\n\t\t\tquery.getKeyspace = func() string { return keyspace }\n\t\t\titer := policy.Pick(query)\n\t\t\tvar hostIds []string\n\t\t\tfor host := iter(); host != nil; host = iter() {\n\t\t\t\thostIds = append(hostIds, host.Info().HostID())\n\t\t\t}\n\t\t\tif diff := cmp.Diff(hostIds, tc.want); diff != \"\" {\n\t\t\t\tt.Errorf(\"expected %s, got %s, diff %s\", tc.want, hostIds, diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc createPolicy(keyspace string, shuffle bool) HostSelectionPolicy {\n\tpolicy := TokenAwareHostPolicy(RoundRobinHostPolicy())\n\tpolicyInternal := policy.(*tokenAwareHostPolicy)\n\tpolicyInternal.getKeyspaceName = func() string { return keyspace }\n\tpolicyInternal.getKeyspaceMetadata = func(ks string) (*KeyspaceMetadata, error) {\n\t\treturn nil, errors.New(\"not initalized\")\n\t}\n\tpolicy.SetPartitioner(\"OrderedPartitioner\")\n\n\tpolicyInternal.getKeyspaceMetadata = func(keyspaceName string) (*KeyspaceMetadata, error) {\n\t\tif keyspaceName != keyspace {\n\t\t\treturn nil, fmt.Errorf(\"unknown keyspace: %s\", keyspaceName)\n\t\t}\n\t\treturn &KeyspaceMetadata{\n\t\t\tName:          keyspace,\n\t\t\tStrategyClass: \"SimpleStrategy\",\n\t\t\tStrategyOptions: map[string]any{\n\t\t\t\t\"class\":              \"SimpleStrategy\",\n\t\t\t\t\"replication_factor\": 2,\n\t\t\t},\n\t\t}, nil\n\t}\n\tpolicyInternal.shuffleReplicas = shuffle\n\tpolicy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: keyspace})\n\treturn policy\n}\n\nfunc TestHostPolicy_RoundRobin_NilHostInfo(t *testing.T) {\n\tt.Parallel()\n\n\tpolicy := RoundRobinHostPolicy()\n\n\thost := &HostInfo{hostId: tUUID(1)}\n\tpolicy.AddHost(host)\n\n\titer := policy.Pick(nil)\n\tnext := iter()\n\tif next == nil {\n\t\tt.Fatal(\"got nil host\")\n\t} else if v := next.Info(); v == nil {\n\t\tt.Fatal(\"got nil HostInfo\")\n\t} else if v.HostID() != host.HostID() {\n\t\tt.Fatalf(\"expected host %v got %v\", host, v)\n\t}\n\n\tnext = iter()\n\tif next != nil {\n\t\tt.Errorf(\"expected to get nil host got %+v\", next)\n\t\tif next.Info() == nil {\n\t\t\tt.Fatalf(\"HostInfo is nil\")\n\t\t}\n\t}\n}\n\nfunc TestHostPolicy_TokenAware_NilHostInfo(t *testing.T) {\n\tt.Parallel()\n\n\tpolicy := TokenAwareHostPolicy(RoundRobinHostPolicy())\n\tpolicyInternal := policy.(*tokenAwareHostPolicy)\n\tpolicyInternal.getKeyspaceName = func() string { return \"myKeyspace\" }\n\tpolicyInternal.getKeyspaceMetadata = func(ks string) (*KeyspaceMetadata, error) {\n\t\treturn nil, errors.New(\"not initialized\")\n\t}\n\n\thosts := [...]*HostInfo{\n\t\t{connectAddress: net.IPv4(10, 0, 0, 0), tokens: []string{\"00\"}},\n\t\t{connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"25\"}},\n\t\t{connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"50\"}},\n\t\t{connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"75\"}},\n\t}\n\tfor _, host := range hosts {\n\t\tpolicy.AddHost(host)\n\t}\n\tpolicy.SetPartitioner(\"OrderedPartitioner\")\n\n\tquery := &Query{routingInfo: &queryRoutingInfo{}}\n\tquery.getKeyspace = func() string { return \"myKeyspace\" }\n\tquery.RoutingKey([]byte(\"20\"))\n\n\titer := policy.Pick(query)\n\tnext := iter()\n\tif next == nil {\n\t\tt.Fatal(\"got nil host\")\n\t} else if v := next.Info(); v == nil {\n\t\tt.Fatal(\"got nil HostInfo\")\n\t} else if !v.ConnectAddress().Equal(hosts[1].ConnectAddress()) {\n\t\tt.Fatalf(\"expected peer 1 got %v\", v.ConnectAddress())\n\t}\n\n\t// Empty the hosts to trigger the panic when using the fallback.\n\tfor _, host := range hosts {\n\t\tpolicy.RemoveHost(host)\n\t}\n\n\tnext = iter()\n\tif next != nil {\n\t\tt.Errorf(\"expected to get nil host got %+v\", next)\n\t\tif next.Info() == nil {\n\t\t\tt.Fatalf(\"HostInfo is nil\")\n\t\t}\n\t}\n}\n\nfunc TestCOWList_Add(t *testing.T) {\n\tt.Parallel()\n\n\tvar cow cowHostList\n\n\ttoAdd := [...]net.IP{net.IPv4(10, 0, 0, 1), net.IPv4(10, 0, 0, 2), net.IPv4(10, 0, 0, 3)}\n\n\tfor _, addr := range toAdd {\n\t\tif !cow.add(&HostInfo{connectAddress: addr}) {\n\t\t\tt.Fatal(\"did not add peer which was not in the set\")\n\t\t}\n\t}\n\n\thosts := cow.get()\n\tif len(hosts) != len(toAdd) {\n\t\tt.Fatalf(\"expected to have %d hosts got %d\", len(toAdd), len(hosts))\n\t}\n\n\tset := make(map[string]bool)\n\tfor _, host := range hosts {\n\t\tset[string(host.ConnectAddress())] = true\n\t}\n\n\tfor _, addr := range toAdd {\n\t\tif !set[string(addr)] {\n\t\t\tt.Errorf(\"addr was not in the host list: %q\", addr)\n\t\t}\n\t}\n}\n\n// TestSimpleRetryPolicy makes sure that we only allow 1 + numRetries attempts\nfunc TestSimpleRetryPolicy(t *testing.T) {\n\tt.Parallel()\n\n\tq := &Query{routingInfo: &queryRoutingInfo{}}\n\n\t// this should allow a total of 3 tries.\n\trt := &SimpleRetryPolicy{NumRetries: 2}\n\n\tregular_error := errors.New(\"regular error\")\n\n\tqe1 := &QueryError{\n\t\terr:                 errors.New(\"connection error\"),\n\t\tpotentiallyExecuted: false,\n\t\tisIdempotent:        false,\n\t}\n\n\tqe2 := &QueryError{\n\t\terr:                 errors.New(\"timeout error\"),\n\t\tpotentiallyExecuted: true,\n\t\tisIdempotent:        true,\n\t}\n\n\tqe3 := &QueryError{\n\t\terr:                 errors.New(\"write timeout\"),\n\t\tpotentiallyExecuted: true,\n\t\tisIdempotent:        false,\n\t}\n\n\tcases := []struct {\n\t\tattempts     int\n\t\tallow        bool\n\t\terr          error\n\t\tretryType    RetryType\n\t\tLWTRetryType RetryType\n\t}{\n\t\t{0, true, qe1, RetryNextHost, Retry},\n\t\t{1, true, qe2, RetryNextHost, Retry},\n\t\t{2, true, qe3, Rethrow, Rethrow},\n\t\t{3, false, regular_error, RetryNextHost, Retry},\n\t\t{4, false, regular_error, RetryNextHost, Retry},\n\t\t{5, false, regular_error, RetryNextHost, Retry},\n\t}\n\n\tfor _, c := range cases {\n\t\tq.metrics = preFilledQueryMetrics(map[UUID]*hostMetrics{TimeUUID(): {Attempts: c.attempts}})\n\t\tif c.retryType != rt.GetRetryType(c.err) {\n\t\t\tt.Fatalf(\"retry type for %v should be %v\", c.err, c.retryType)\n\t\t}\n\t\tif c.LWTRetryType != rt.GetRetryTypeLWT(c.err) {\n\t\t\tt.Fatalf(\"LWT retry type for %v should be %v\", c.err, c.LWTRetryType)\n\t\t}\n\t\tif c.allow && !rt.Attempt(q) {\n\t\t\tt.Fatalf(\"should allow retry after %d attempts\", c.attempts)\n\t\t}\n\t\tif !c.allow && rt.Attempt(q) {\n\t\t\tt.Fatalf(\"should not allow retry after %d attempts\", c.attempts)\n\t\t}\n\t}\n}\n\nfunc TestLWTSimpleRetryPolicy(t *testing.T) {\n\tt.Parallel()\n\n\tebrp := &SimpleRetryPolicy{NumRetries: 2}\n\t// Verify that SimpleRetryPolicy implements both interfaces\n\tvar _ RetryPolicy = ebrp\n\tvar lwt_rt LWTRetryPolicy = ebrp\n\ttests.AssertEqual(t, \"retry type of LWT policy\", lwt_rt.GetRetryTypeLWT(nil), Retry)\n}\n\nfunc TestExponentialBackoffPolicy(t *testing.T) {\n\tt.Parallel()\n\n\t// test with defaults\n\tsut := &ExponentialBackoffRetryPolicy{NumRetries: 2}\n\n\tregular_error := errors.New(\"regular error\")\n\n\tqe1 := &QueryError{\n\t\terr:                 errors.New(\"connection error\"),\n\t\tpotentiallyExecuted: false,\n\t\tisIdempotent:        false,\n\t}\n\n\tqe2 := &QueryError{\n\t\terr:                 errors.New(\"timeout error\"),\n\t\tpotentiallyExecuted: true,\n\t\tisIdempotent:        true,\n\t}\n\n\tqe3 := &QueryError{\n\t\terr:                 errors.New(\"write timeout\"),\n\t\tpotentiallyExecuted: true,\n\t\tisIdempotent:        false,\n\t}\n\n\tcases := []struct {\n\t\tattempts     int\n\t\tdelay        time.Duration\n\t\terr          error\n\t\tretryType    RetryType\n\t\tLWTRetryType RetryType\n\t}{\n\t\t{1, 100 * time.Millisecond, qe1, RetryNextHost, Retry},\n\t\t{2, (2) * 100 * time.Millisecond, qe2, RetryNextHost, Retry},\n\t\t{3, (2 * 2) * 100 * time.Millisecond, qe3, Rethrow, Rethrow},\n\t\t{4, (2 * 2 * 2) * 100 * time.Millisecond, regular_error, RetryNextHost, Retry},\n\t}\n\tfor _, c := range cases {\n\t\tif c.retryType != sut.GetRetryType(c.err) {\n\t\t\tt.Fatalf(\"retry type for %v should be %v\", c.err, c.retryType)\n\t\t}\n\t\tif c.LWTRetryType != sut.GetRetryTypeLWT(c.err) {\n\t\t\tt.Fatalf(\"LWT retry type for %v should be %v\", c.err, c.LWTRetryType)\n\t\t}\n\t\t// test 100 times for each case\n\t\tfor i := 0; i < 100; i++ {\n\t\t\td := sut.napTime(c.attempts)\n\t\t\tif d < c.delay-(100*time.Millisecond)/2 {\n\t\t\t\tt.Fatalf(\"Delay %d less than jitter min of %d\", d, c.delay-100*time.Millisecond/2)\n\t\t\t}\n\t\t\tif d > c.delay+(100*time.Millisecond)/2 {\n\t\t\t\tt.Fatalf(\"Delay %d greater than jitter max of %d\", d, c.delay+100*time.Millisecond/2)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestLWTExponentialBackoffPolicy(t *testing.T) {\n\tt.Parallel()\n\n\tebrp := &ExponentialBackoffRetryPolicy{NumRetries: 2}\n\t// Verify that ExponentialBackoffRetryPolicy implements both interfaces\n\tvar _ RetryPolicy = ebrp\n\tvar lwt_rt LWTRetryPolicy = ebrp\n\ttests.AssertEqual(t, \"retry type of LWT policy\", lwt_rt.GetRetryTypeLWT(nil), Retry)\n}\n\nfunc TestDowngradingConsistencyRetryPolicy(t *testing.T) {\n\tt.Parallel()\n\n\tq := &Query{cons: LocalQuorum, routingInfo: &queryRoutingInfo{}}\n\n\trewt0 := &RequestErrWriteTimeout{\n\t\tReceived:  0,\n\t\tWriteType: \"SIMPLE\",\n\t}\n\n\trewt1 := &RequestErrWriteTimeout{\n\t\tReceived:  1,\n\t\tWriteType: \"BATCH\",\n\t}\n\n\trewt2 := &RequestErrWriteTimeout{\n\t\tWriteType: \"UNLOGGED_BATCH\",\n\t}\n\n\trert := &RequestErrReadTimeout{}\n\n\treu0 := &RequestErrUnavailable{\n\t\tAlive: 0,\n\t}\n\n\treu1 := &RequestErrUnavailable{\n\t\tAlive: 1,\n\t}\n\n\t// this should allow a total of 3 tries.\n\tconsistencyLevels := []Consistency{Three, Two, One}\n\trt := &DowngradingConsistencyRetryPolicy{ConsistencyLevelsToTry: consistencyLevels}\n\tcases := []struct {\n\t\tattempts  int\n\t\tallow     bool\n\t\terr       error\n\t\tretryType RetryType\n\t}{\n\t\t{0, true, rewt0, Rethrow},\n\t\t{3, true, rewt1, Ignore},\n\t\t{1, true, rewt2, Retry},\n\t\t{2, true, rert, Retry},\n\t\t{4, false, reu0, Rethrow},\n\t\t{16, false, reu1, Retry},\n\t}\n\n\tfor _, c := range cases {\n\t\tq.metrics = preFilledQueryMetrics(map[UUID]*hostMetrics{TimeUUID(): {Attempts: c.attempts}})\n\t\tif c.retryType != rt.GetRetryType(c.err) {\n\t\t\tt.Fatalf(\"retry type should be %v\", c.retryType)\n\t\t}\n\t\tif c.allow && !rt.Attempt(q) {\n\t\t\tt.Fatalf(\"should allow retry after %d attempts\", c.attempts)\n\t\t}\n\t\tif !c.allow && rt.Attempt(q) {\n\t\t\tt.Fatalf(\"should not allow retry after %d attempts\", c.attempts)\n\t\t}\n\t}\n}\n\n// expectHosts makes sure that the next len(hostIDs) returned from iter is a permutation of hostIDs.\nfunc expectHosts(t *testing.T, msg string, iter NextHost, hostIDs ...string) {\n\tt.Helper()\n\n\texpectedHostIDs := make(map[string]struct{}, len(hostIDs))\n\tfor i := range hostIDs {\n\t\texpectedHostIDs[hostIDs[i]] = struct{}{}\n\t}\n\n\texpectedStr := func() string {\n\t\tkeys := make([]string, 0, len(expectedHostIDs))\n\t\tfor k := range expectedHostIDs {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tsort.Strings(keys)\n\t\treturn strings.Join(keys, \", \")\n\t}\n\n\tfor len(expectedHostIDs) > 0 {\n\t\thost := iter()\n\t\tif host == nil || host.Info() == nil {\n\t\t\tt.Fatalf(\"%s: expected hostID one of {%s}, but got nil\", msg, expectedStr())\n\t\t}\n\t\thostID := host.Info().HostID()\n\t\tif _, ok := expectedHostIDs[hostID]; !ok {\n\t\t\tt.Fatalf(\"%s: expected host ID one of {%s}, but got %s\", msg, expectedStr(), hostID)\n\t\t}\n\t\tdelete(expectedHostIDs, hostID)\n\t}\n}\n\nfunc expectNoMoreHosts(t *testing.T, iter NextHost) {\n\tt.Helper()\n\thost := iter()\n\tif host == nil {\n\t\t// success\n\t\treturn\n\t}\n\tinfo := host.Info()\n\tif info == nil {\n\t\tt.Fatalf(\"expected no more hosts, but got host with nil Info()\")\n\t\treturn\n\t}\n\tt.Fatalf(\"expected no more hosts, but got %s\", info.HostID())\n}\n\nfunc TestHostPolicy_DCAwareRR(t *testing.T) {\n\tt.Parallel()\n\n\tp := DCAwareRoundRobinPolicy(\"local\")\n\n\thosts := [...]*HostInfo{\n\t\t{hostId: tUUID(0), connectAddress: net.ParseIP(\"10.0.0.1\"), dataCenter: \"local\"},\n\t\t{hostId: tUUID(1), connectAddress: net.ParseIP(\"10.0.0.2\"), dataCenter: \"local\"},\n\t\t{hostId: tUUID(2), connectAddress: net.ParseIP(\"10.0.0.3\"), dataCenter: \"remote\"},\n\t\t{hostId: tUUID(3), connectAddress: net.ParseIP(\"10.0.0.4\"), dataCenter: \"remote\"},\n\t}\n\n\tfor _, host := range hosts {\n\t\tp.AddHost(host)\n\t}\n\n\tgot := make(map[UUID]bool, len(hosts))\n\tvar dcs []string\n\n\tit := p.Pick(nil)\n\tfor h := it(); h != nil; h = it() {\n\t\tid := h.Info().hostId\n\t\tdc := h.Info().dataCenter\n\n\t\tif got[id] {\n\t\t\tt.Fatalf(\"got duplicate host %s\", id)\n\t\t}\n\t\tgot[id] = true\n\t\tdcs = append(dcs, dc)\n\t}\n\n\tif len(got) != len(hosts) {\n\t\tt.Fatalf(\"expected %d hosts got %d\", len(hosts), len(got))\n\t}\n\n\tvar remote bool\n\tfor _, dc := range dcs {\n\t\tif dc == \"local\" {\n\t\t\tif remote {\n\t\t\t\tt.Fatalf(\"got local dc after remote: %v\", dcs)\n\t\t\t}\n\t\t} else {\n\t\t\tremote = true\n\t\t}\n\t}\n\n}\n\nfunc TestHostPolicy_DCAwareRR_disableDCFailover(t *testing.T) {\n\tt.Parallel()\n\n\tp := DCAwareRoundRobinPolicy(\"local\", HostPolicyOptionDisableDCFailover)\n\n\thosts := [...]*HostInfo{\n\t\t{hostId: tUUID(0), connectAddress: net.ParseIP(\"10.0.0.1\"), dataCenter: \"local\"},\n\t\t{hostId: tUUID(1), connectAddress: net.ParseIP(\"10.0.0.2\"), dataCenter: \"local\"},\n\t\t{hostId: tUUID(2), connectAddress: net.ParseIP(\"10.0.0.3\"), dataCenter: \"remote\"},\n\t\t{hostId: tUUID(3), connectAddress: net.ParseIP(\"10.0.0.4\"), dataCenter: \"remote\"},\n\t}\n\n\tfor _, host := range hosts {\n\t\tp.AddHost(host)\n\t}\n\n\tgot := make(map[UUID]bool, len(hosts))\n\tvar dcs []string\n\n\tit := p.Pick(nil)\n\tfor h := it(); h != nil; h = it() {\n\t\tid := h.Info().hostId\n\t\tdc := h.Info().dataCenter\n\n\t\tif got[id] {\n\t\t\tt.Fatalf(\"got duplicate host %s\", id)\n\t\t}\n\t\tgot[id] = true\n\t\tdcs = append(dcs, dc)\n\t}\n\n\tif len(got) != 2 {\n\t\tt.Fatalf(\"expected %d hosts got %d\", 2, len(got))\n\t}\n\n\tfor _, dc := range dcs {\n\t\tif dc == \"remote\" {\n\t\t\tt.Fatalf(\"got remote dc but failover was diabled\")\n\t\t}\n\t}\n}\n\n// Tests of the token-aware host selection policy implementation with a\n// DC aware round-robin host selection policy fallback\n// with {\"class\": \"NetworkTopologyStrategy\", \"a\": 1, \"b\": 1, \"c\": 1} replication.\nfunc TestHostPolicy_TokenAware(t *testing.T) {\n\tt.Parallel()\n\n\tconst keyspace = \"myKeyspace\"\n\tpolicy := TokenAwareHostPolicy(DCAwareRoundRobinPolicy(\"local\"))\n\tpolicyInternal := policy.(*tokenAwareHostPolicy)\n\tpolicyInternal.getKeyspaceName = func() string { return keyspace }\n\tpolicyInternal.getKeyspaceMetadata = func(ks string) (*KeyspaceMetadata, error) {\n\t\treturn nil, errors.New(\"not initialized\")\n\t}\n\n\tquery := &Query{routingInfo: &queryRoutingInfo{}}\n\tquery.getKeyspace = func() string { return keyspace }\n\n\titer := policy.Pick(nil)\n\tif iter == nil {\n\t\tt.Fatal(\"host iterator was nil\")\n\t}\n\tactual := iter()\n\tif actual != nil {\n\t\tt.Fatalf(\"expected nil from iterator, but was %v\", actual)\n\t}\n\n\t// set the hosts\n\thosts := [...]*HostInfo{\n\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"05\"}, dataCenter: \"remote1\"},\n\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"10\"}, dataCenter: \"local\"},\n\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"15\"}, dataCenter: \"remote2\"},\n\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"20\"}, dataCenter: \"remote1\"},\n\t\t{hostId: tUUID(4), connectAddress: net.IPv4(10, 0, 0, 5), tokens: []string{\"25\"}, dataCenter: \"local\"},\n\t\t{hostId: tUUID(5), connectAddress: net.IPv4(10, 0, 0, 6), tokens: []string{\"30\"}, dataCenter: \"remote2\"},\n\t\t{hostId: tUUID(6), connectAddress: net.IPv4(10, 0, 0, 7), tokens: []string{\"35\"}, dataCenter: \"remote1\"},\n\t\t{hostId: tUUID(7), connectAddress: net.IPv4(10, 0, 0, 8), tokens: []string{\"40\"}, dataCenter: \"local\"},\n\t\t{hostId: tUUID(8), connectAddress: net.IPv4(10, 0, 0, 9), tokens: []string{\"45\"}, dataCenter: \"remote2\"},\n\t\t{hostId: tUUID(9), connectAddress: net.IPv4(10, 0, 0, 10), tokens: []string{\"50\"}, dataCenter: \"remote1\"},\n\t\t{hostId: tUUID(10), connectAddress: net.IPv4(10, 0, 0, 11), tokens: []string{\"55\"}, dataCenter: \"local\"},\n\t\t{hostId: tUUID(11), connectAddress: net.IPv4(10, 0, 0, 12), tokens: []string{\"60\"}, dataCenter: \"remote2\"},\n\t}\n\tfor _, host := range hosts {\n\t\tpolicy.AddHost(host)\n\t}\n\n\t// the token ring is not setup without the partitioner, but the fallback\n\t// should work\n\tif actual := policy.Pick(nil)(); actual == nil {\n\t\tt.Fatal(\"expected to get host from fallback got nil\")\n\t}\n\n\tquery.RoutingKey([]byte(\"30\"))\n\tif actual := policy.Pick(query)(); actual == nil {\n\t\tt.Fatal(\"expected to get host from fallback got nil\")\n\t}\n\n\tpolicy.SetPartitioner(\"OrderedPartitioner\")\n\n\tpolicyInternal.getKeyspaceMetadata = func(keyspaceName string) (*KeyspaceMetadata, error) {\n\t\tif keyspaceName != keyspace {\n\t\t\treturn nil, fmt.Errorf(\"unknown keyspace: %s\", keyspaceName)\n\t\t}\n\t\treturn &KeyspaceMetadata{\n\t\t\tName:          keyspace,\n\t\t\tStrategyClass: \"NetworkTopologyStrategy\",\n\t\t\tStrategyOptions: map[string]any{\n\t\t\t\t\"class\":   \"NetworkTopologyStrategy\",\n\t\t\t\t\"local\":   1,\n\t\t\t\t\"remote1\": 1,\n\t\t\t\t\"remote2\": 1,\n\t\t\t},\n\t\t}, nil\n\t}\n\tpolicy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: \"myKeyspace\"})\n\n\t// The NetworkTopologyStrategy above should generate the following replicas.\n\t// It's handy to have as reference here.\n\ttests.AssertDeepEqual(t, \"replicas\", map[string]tokenRingReplicas{\n\t\t\"myKeyspace\": {\n\t\t\t{orderedToken(\"05\"), []*HostInfo{hosts[0], hosts[1], hosts[2]}},\n\t\t\t{orderedToken(\"10\"), []*HostInfo{hosts[1], hosts[2], hosts[3]}},\n\t\t\t{orderedToken(\"15\"), []*HostInfo{hosts[2], hosts[3], hosts[4]}},\n\t\t\t{orderedToken(\"20\"), []*HostInfo{hosts[3], hosts[4], hosts[5]}},\n\t\t\t{orderedToken(\"25\"), []*HostInfo{hosts[4], hosts[5], hosts[6]}},\n\t\t\t{orderedToken(\"30\"), []*HostInfo{hosts[5], hosts[6], hosts[7]}},\n\t\t\t{orderedToken(\"35\"), []*HostInfo{hosts[6], hosts[7], hosts[8]}},\n\t\t\t{orderedToken(\"40\"), []*HostInfo{hosts[7], hosts[8], hosts[9]}},\n\t\t\t{orderedToken(\"45\"), []*HostInfo{hosts[8], hosts[9], hosts[10]}},\n\t\t\t{orderedToken(\"50\"), []*HostInfo{hosts[9], hosts[10], hosts[11]}},\n\t\t\t{orderedToken(\"55\"), []*HostInfo{hosts[10], hosts[11], hosts[0]}},\n\t\t\t{orderedToken(\"60\"), []*HostInfo{hosts[11], hosts[0], hosts[1]}},\n\t\t},\n\t}, policyInternal.getMetadataReadOnly().replicas)\n\n\t// now the token ring is configured\n\tquery.RoutingKey([]byte(\"23\"))\n\titer = policy.Pick(query)\n\t// first should be host with matching token from the local DC\n\texpectHosts(t, \"matching token from local DC\", iter, tID(4))\n\t// next are in non-deterministic order\n\texpectHosts(t, \"rest\", iter, tID(0), tID(1), tID(2), tID(3), tID(5), tID(6), tID(7), tID(8), tID(9), tID(10), tID(11))\n\texpectNoMoreHosts(t, iter)\n}\n\n// Tests of the token-aware host selection policy implementation with a\n// DC aware round-robin host selection policy fallback\n// with {\"class\": \"NetworkTopologyStrategy\", \"a\": 2, \"b\": 2, \"c\": 2} replication.\nfunc TestHostPolicy_TokenAware_NetworkStrategy(t *testing.T) {\n\tt.Parallel()\n\n\tconst keyspace = \"myKeyspace\"\n\tpolicy := TokenAwareHostPolicy(DCAwareRoundRobinPolicy(\"local\"), NonLocalReplicasFallback(), DontShuffleReplicas())\n\tpolicyInternal := policy.(*tokenAwareHostPolicy)\n\tpolicyInternal.getKeyspaceName = func() string { return keyspace }\n\tpolicyInternal.getKeyspaceMetadata = func(ks string) (*KeyspaceMetadata, error) {\n\t\treturn nil, errors.New(\"not initialized\")\n\t}\n\n\tquery := &Query{routingInfo: &queryRoutingInfo{}}\n\tquery.getKeyspace = func() string { return keyspace }\n\n\titer := policy.Pick(nil)\n\tif iter == nil {\n\t\tt.Fatal(\"host iterator was nil\")\n\t}\n\tactual := iter()\n\tif actual != nil {\n\t\tt.Fatalf(\"expected nil from iterator, but was %v\", actual)\n\t}\n\n\t// set the hosts\n\thosts := [...]*HostInfo{\n\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"05\"}, dataCenter: \"remote1\"},\n\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"10\"}, dataCenter: \"local\"},\n\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"15\"}, dataCenter: \"remote2\"},\n\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"20\"}, dataCenter: \"remote1\"}, // 1\n\t\t{hostId: tUUID(4), connectAddress: net.IPv4(10, 0, 0, 5), tokens: []string{\"25\"}, dataCenter: \"local\"},   // 2\n\t\t{hostId: tUUID(5), connectAddress: net.IPv4(10, 0, 0, 6), tokens: []string{\"30\"}, dataCenter: \"remote2\"}, // 3\n\t\t{hostId: tUUID(6), connectAddress: net.IPv4(10, 0, 0, 7), tokens: []string{\"35\"}, dataCenter: \"remote1\"}, // 4\n\t\t{hostId: tUUID(7), connectAddress: net.IPv4(10, 0, 0, 8), tokens: []string{\"40\"}, dataCenter: \"local\"},   // 5\n\t\t{hostId: tUUID(8), connectAddress: net.IPv4(10, 0, 0, 9), tokens: []string{\"45\"}, dataCenter: \"remote2\"}, // 6\n\t\t{hostId: tUUID(9), connectAddress: net.IPv4(10, 0, 0, 10), tokens: []string{\"50\"}, dataCenter: \"remote1\"},\n\t\t{hostId: tUUID(10), connectAddress: net.IPv4(10, 0, 0, 11), tokens: []string{\"55\"}, dataCenter: \"local\"},\n\t\t{hostId: tUUID(11), connectAddress: net.IPv4(10, 0, 0, 12), tokens: []string{\"60\"}, dataCenter: \"remote2\"},\n\t}\n\tfor _, host := range hosts {\n\t\tpolicy.AddHost(host)\n\t}\n\n\tpolicy.SetPartitioner(\"OrderedPartitioner\")\n\n\tpolicyInternal.getKeyspaceMetadata = func(keyspaceName string) (*KeyspaceMetadata, error) {\n\t\tif keyspaceName != keyspace {\n\t\t\treturn nil, fmt.Errorf(\"unknown keyspace: %s\", keyspaceName)\n\t\t}\n\t\treturn &KeyspaceMetadata{\n\t\t\tName:          keyspace,\n\t\t\tStrategyClass: \"NetworkTopologyStrategy\",\n\t\t\tStrategyOptions: map[string]any{\n\t\t\t\t\"class\":   \"NetworkTopologyStrategy\",\n\t\t\t\t\"local\":   2,\n\t\t\t\t\"remote1\": 2,\n\t\t\t\t\"remote2\": 2,\n\t\t\t},\n\t\t}, nil\n\t}\n\tpolicy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: keyspace})\n\n\t// The NetworkTopologyStrategy above should generate the following replicas.\n\t// It's handy to have as reference here.\n\ttests.AssertDeepEqual(t, \"replicas\", map[string]tokenRingReplicas{\n\t\tkeyspace: {\n\t\t\t{orderedToken(\"05\"), []*HostInfo{hosts[0], hosts[1], hosts[2], hosts[3], hosts[4], hosts[5]}},\n\t\t\t{orderedToken(\"10\"), []*HostInfo{hosts[1], hosts[2], hosts[3], hosts[4], hosts[5], hosts[6]}},\n\t\t\t{orderedToken(\"15\"), []*HostInfo{hosts[2], hosts[3], hosts[4], hosts[5], hosts[6], hosts[7]}},\n\t\t\t{orderedToken(\"20\"), []*HostInfo{hosts[3], hosts[4], hosts[5], hosts[6], hosts[7], hosts[8]}},\n\t\t\t{orderedToken(\"25\"), []*HostInfo{hosts[4], hosts[5], hosts[6], hosts[7], hosts[8], hosts[9]}},\n\t\t\t{orderedToken(\"30\"), []*HostInfo{hosts[5], hosts[6], hosts[7], hosts[8], hosts[9], hosts[10]}},\n\t\t\t{orderedToken(\"35\"), []*HostInfo{hosts[6], hosts[7], hosts[8], hosts[9], hosts[10], hosts[11]}},\n\t\t\t{orderedToken(\"40\"), []*HostInfo{hosts[7], hosts[8], hosts[9], hosts[10], hosts[11], hosts[0]}},\n\t\t\t{orderedToken(\"45\"), []*HostInfo{hosts[8], hosts[9], hosts[10], hosts[11], hosts[0], hosts[1]}},\n\t\t\t{orderedToken(\"50\"), []*HostInfo{hosts[9], hosts[10], hosts[11], hosts[0], hosts[1], hosts[2]}},\n\t\t\t{orderedToken(\"55\"), []*HostInfo{hosts[10], hosts[11], hosts[0], hosts[1], hosts[2], hosts[3]}},\n\t\t\t{orderedToken(\"60\"), []*HostInfo{hosts[11], hosts[0], hosts[1], hosts[2], hosts[3], hosts[4]}},\n\t\t},\n\t}, policyInternal.getMetadataReadOnly().replicas)\n\n\t// now the token ring is configured\n\tquery.RoutingKey([]byte(\"18\"))\n\titer = policy.Pick(query)\n\t// first should be hosts with matching token from the local DC\n\texpectHosts(t, \"matching token from local DC\", iter, tID(4), tID(7))\n\t// rest should be hosts with matching token from remote DCs\n\texpectHosts(t, \"matching token from remote DCs\", iter, tID(3), tID(5), tID(6), tID(8))\n\t// followed by other hosts\n\texpectHosts(t, \"rest\", iter, tID(0), tID(1), tID(2), tID(9), tID(10), tID(11))\n\texpectNoMoreHosts(t, iter)\n}\n\nfunc TestHostPolicy_RackAwareRR(t *testing.T) {\n\tt.Parallel()\n\n\tp := RackAwareRoundRobinPolicy(\"local\", \"b\")\n\n\thosts := [...]*HostInfo{\n\t\t{hostId: tUUID(0), connectAddress: net.ParseIP(\"10.0.0.1\"), dataCenter: \"local\", rack: \"a\"},\n\t\t{hostId: tUUID(1), connectAddress: net.ParseIP(\"10.0.0.2\"), dataCenter: \"local\", rack: \"a\"},\n\t\t{hostId: tUUID(2), connectAddress: net.ParseIP(\"10.0.0.3\"), dataCenter: \"local\", rack: \"b\"},\n\t\t{hostId: tUUID(3), connectAddress: net.ParseIP(\"10.0.0.4\"), dataCenter: \"local\", rack: \"b\"},\n\t\t{hostId: tUUID(4), connectAddress: net.ParseIP(\"10.0.0.5\"), dataCenter: \"remote\", rack: \"a\"},\n\t\t{hostId: tUUID(5), connectAddress: net.ParseIP(\"10.0.0.6\"), dataCenter: \"remote\", rack: \"a\"},\n\t\t{hostId: tUUID(6), connectAddress: net.ParseIP(\"10.0.0.7\"), dataCenter: \"remote\", rack: \"b\"},\n\t\t{hostId: tUUID(7), connectAddress: net.ParseIP(\"10.0.0.8\"), dataCenter: \"remote\", rack: \"b\"},\n\t}\n\n\tfor _, host := range hosts {\n\t\tp.AddHost(host)\n\t}\n\n\tit := p.Pick(nil)\n\n\t// Must start with rack-local hosts\n\texpectHosts(t, \"rack-local hosts\", it, tID(3), tID(2))\n\t// Then dc-local hosts\n\texpectHosts(t, \"dc-local hosts\", it, tID(0), tID(1))\n\t// Then the remote hosts\n\texpectHosts(t, \"remote hosts\", it, tID(4), tID(5), tID(6), tID(7))\n\texpectNoMoreHosts(t, it)\n}\n\n// Tests of the token-aware host selection policy implementation with a\n// DC & Rack aware round-robin host selection policy fallback\nfunc TestHostPolicy_TokenAware_RackAware(t *testing.T) {\n\tt.Parallel()\n\n\tconst keyspace = \"myKeyspace\"\n\tpolicy := TokenAwareHostPolicy(RackAwareRoundRobinPolicy(\"local\", \"b\"))\n\tpolicyWithFallback := TokenAwareHostPolicy(RackAwareRoundRobinPolicy(\"local\", \"b\"), NonLocalReplicasFallback())\n\n\tpolicyInternal := policy.(*tokenAwareHostPolicy)\n\tpolicyInternal.getKeyspaceName = func() string { return keyspace }\n\tpolicyInternal.getKeyspaceMetadata = func(ks string) (*KeyspaceMetadata, error) {\n\t\treturn nil, errors.New(\"not initialized\")\n\t}\n\n\tpolicyWithFallbackInternal := policyWithFallback.(*tokenAwareHostPolicy)\n\tpolicyWithFallbackInternal.getKeyspaceName = policyInternal.getKeyspaceName\n\tpolicyWithFallbackInternal.getKeyspaceMetadata = policyInternal.getKeyspaceMetadata\n\n\tquery := &Query{routingInfo: &queryRoutingInfo{}}\n\tquery.getKeyspace = func() string { return keyspace }\n\n\titer := policy.Pick(nil)\n\tif iter == nil {\n\t\tt.Fatal(\"host iterator was nil\")\n\t}\n\tactual := iter()\n\tif actual != nil {\n\t\tt.Fatalf(\"expected nil from iterator, but was %v\", actual)\n\t}\n\n\t// set the hosts\n\thosts := [...]*HostInfo{\n\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"05\"}, dataCenter: \"remote\", rack: \"a\"},\n\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"10\"}, dataCenter: \"remote\", rack: \"b\"},\n\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"15\"}, dataCenter: \"local\", rack: \"a\"},\n\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"20\"}, dataCenter: \"local\", rack: \"b\"},\n\t\t{hostId: tUUID(4), connectAddress: net.IPv4(10, 0, 0, 5), tokens: []string{\"25\"}, dataCenter: \"remote\", rack: \"a\"},\n\t\t{hostId: tUUID(5), connectAddress: net.IPv4(10, 0, 0, 6), tokens: []string{\"30\"}, dataCenter: \"remote\", rack: \"b\"},\n\t\t{hostId: tUUID(6), connectAddress: net.IPv4(10, 0, 0, 7), tokens: []string{\"35\"}, dataCenter: \"local\", rack: \"a\"},\n\t\t{hostId: tUUID(7), connectAddress: net.IPv4(10, 0, 0, 8), tokens: []string{\"40\"}, dataCenter: \"local\", rack: \"b\"},\n\t\t{hostId: tUUID(8), connectAddress: net.IPv4(10, 0, 0, 9), tokens: []string{\"45\"}, dataCenter: \"remote\", rack: \"a\"},\n\t\t{hostId: tUUID(9), connectAddress: net.IPv4(10, 0, 0, 10), tokens: []string{\"50\"}, dataCenter: \"remote\", rack: \"b\"},\n\t\t{hostId: tUUID(10), connectAddress: net.IPv4(10, 0, 0, 11), tokens: []string{\"55\"}, dataCenter: \"local\", rack: \"a\"},\n\t\t{hostId: tUUID(11), connectAddress: net.IPv4(10, 0, 0, 12), tokens: []string{\"60\"}, dataCenter: \"local\", rack: \"b\"},\n\t}\n\tfor _, host := range hosts {\n\t\tpolicy.AddHost(host)\n\t\tpolicyWithFallback.AddHost(host)\n\t}\n\n\t// the token ring is not setup without the partitioner, but the fallback\n\t// should work\n\tif actual := policy.Pick(nil)(); actual == nil {\n\t\tt.Fatal(\"expected to get host from fallback got nil\")\n\t}\n\n\tquery.RoutingKey([]byte(\"30\"))\n\tif actual := policy.Pick(query)(); actual == nil {\n\t\tt.Fatal(\"expected to get host from fallback got nil\")\n\t}\n\n\tpolicy.SetPartitioner(\"OrderedPartitioner\")\n\tpolicyWithFallback.SetPartitioner(\"OrderedPartitioner\")\n\n\tpolicyInternal.getKeyspaceMetadata = func(keyspaceName string) (*KeyspaceMetadata, error) {\n\t\tif keyspaceName != keyspace {\n\t\t\treturn nil, fmt.Errorf(\"unknown keyspace: %s\", keyspaceName)\n\t\t}\n\t\treturn &KeyspaceMetadata{\n\t\t\tName:          keyspace,\n\t\t\tStrategyClass: \"NetworkTopologyStrategy\",\n\t\t\tStrategyOptions: map[string]any{\n\t\t\t\t\"class\":  \"NetworkTopologyStrategy\",\n\t\t\t\t\"local\":  2,\n\t\t\t\t\"remote\": 2,\n\t\t\t},\n\t\t}, nil\n\t}\n\tpolicyWithFallbackInternal.getKeyspaceMetadata = policyInternal.getKeyspaceMetadata\n\tpolicy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: \"myKeyspace\"})\n\tpolicyWithFallback.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: \"myKeyspace\"})\n\n\t// The NetworkTopologyStrategy above should generate the following replicas.\n\t// It's handy to have as reference here.\n\ttests.AssertDeepEqual(t, \"replicas\", map[string]tokenRingReplicas{\n\t\t\"myKeyspace\": {\n\t\t\t{orderedToken(\"05\"), []*HostInfo{hosts[0], hosts[1], hosts[2], hosts[3]}},\n\t\t\t{orderedToken(\"10\"), []*HostInfo{hosts[1], hosts[2], hosts[3], hosts[4]}},\n\t\t\t{orderedToken(\"15\"), []*HostInfo{hosts[2], hosts[3], hosts[4], hosts[5]}},\n\t\t\t{orderedToken(\"20\"), []*HostInfo{hosts[3], hosts[4], hosts[5], hosts[6]}},\n\t\t\t{orderedToken(\"25\"), []*HostInfo{hosts[4], hosts[5], hosts[6], hosts[7]}},\n\t\t\t{orderedToken(\"30\"), []*HostInfo{hosts[5], hosts[6], hosts[7], hosts[8]}},\n\t\t\t{orderedToken(\"35\"), []*HostInfo{hosts[6], hosts[7], hosts[8], hosts[9]}},\n\t\t\t{orderedToken(\"40\"), []*HostInfo{hosts[7], hosts[8], hosts[9], hosts[10]}},\n\t\t\t{orderedToken(\"45\"), []*HostInfo{hosts[8], hosts[9], hosts[10], hosts[11]}},\n\t\t\t{orderedToken(\"50\"), []*HostInfo{hosts[9], hosts[10], hosts[11], hosts[0]}},\n\t\t\t{orderedToken(\"55\"), []*HostInfo{hosts[10], hosts[11], hosts[0], hosts[1]}},\n\t\t\t{orderedToken(\"60\"), []*HostInfo{hosts[11], hosts[0], hosts[1], hosts[2]}},\n\t\t},\n\t}, policyInternal.getMetadataReadOnly().replicas)\n\n\tquery.RoutingKey([]byte(\"23\"))\n\n\t// now the token ring is configured\n\t// Test the policy with fallback\n\titer = policyWithFallback.Pick(query)\n\n\t// first should be host with matching token from the local DC & rack\n\texpectHosts(t, \"matching token from local DC and local rack\", iter, tID(7))\n\t// next should be host with matching token from local DC and other rack\n\texpectHosts(t, \"matching token from local DC and non-local rack\", iter, tID(6))\n\t// next should be hosts with matching token from other DC, in any order\n\texpectHosts(t, \"matching token from non-local DC\", iter, tID(4), tID(5))\n\t// then the local DC & rack that didn't match the token\n\texpectHosts(t, \"non-matching token from local DC and local rack\", iter, tID(3), tID(11))\n\t// then the local DC & other rack that didn't match the token\n\texpectHosts(t, \"non-matching token from local DC and non-local rack\", iter, tID(2), tID(10))\n\t// finally, the other DC that didn't match the token\n\texpectHosts(t, \"non-matching token from non-local DC\", iter, tID(0), tID(1), tID(8), tID(9))\n\texpectNoMoreHosts(t, iter)\n\n\t// Test the policy without fallback\n\titer = policy.Pick(query)\n\n\t// first should be host with matching token from the local DC & Rack\n\texpectHosts(t, \"matching token from local DC and local rack\", iter, tID(7))\n\t// next should be the other two hosts from local DC & rack\n\texpectHosts(t, \"non-matching token local DC and local rack\", iter, tID(3), tID(11))\n\t// then the three hosts from the local DC but other rack\n\texpectHosts(t, \"local DC, non-local rack\", iter, tID(2), tID(6), tID(10))\n\t// then the 6 hosts from the other DC\n\texpectHosts(t, \"non-local DC\", iter, tID(0), tID(1), tID(4), tID(5), tID(8), tID(9))\n\texpectNoMoreHosts(t, iter)\n}\n\nfunc TestHostPolicy_TokenAware_Issue1274(t *testing.T) {\n\tt.Parallel()\n\n\tpolicy := TokenAwareHostPolicy(DCAwareRoundRobinPolicy(\"local\"))\n\tpolicyInternal := policy.(*tokenAwareHostPolicy)\n\tpolicyInternal.getKeyspaceName = func() string { return \"myKeyspace\" }\n\tpolicyInternal.getKeyspaceMetadata = func(ks string) (*KeyspaceMetadata, error) {\n\t\treturn nil, errors.New(\"not initialized\")\n\t}\n\n\tquery := &Query{routingInfo: &queryRoutingInfo{}}\n\tquery.getKeyspace = func() string { return \"myKeyspace\" }\n\n\titer := policy.Pick(nil)\n\tif iter == nil {\n\t\tt.Fatal(\"host iterator was nil\")\n\t}\n\tactual := iter()\n\tif actual != nil {\n\t\tt.Fatalf(\"expected nil from iterator, but was %v\", actual)\n\t}\n\n\t// set the hosts\n\thosts := [...]*HostInfo{\n\t\t{hostId: tUUID(0), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"05\"}, dataCenter: \"remote1\"},\n\t\t{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"10\"}, dataCenter: \"local\"},\n\t\t{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"15\"}, dataCenter: \"remote2\"},\n\t\t{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{\"20\"}, dataCenter: \"remote1\"},\n\t\t{hostId: tUUID(4), connectAddress: net.IPv4(10, 0, 0, 5), tokens: []string{\"25\"}, dataCenter: \"local\"},\n\t\t{hostId: tUUID(5), connectAddress: net.IPv4(10, 0, 0, 6), tokens: []string{\"30\"}, dataCenter: \"remote2\"},\n\t\t{hostId: tUUID(6), connectAddress: net.IPv4(10, 0, 0, 7), tokens: []string{\"35\"}, dataCenter: \"remote1\"},\n\t\t{hostId: tUUID(7), connectAddress: net.IPv4(10, 0, 0, 8), tokens: []string{\"40\"}, dataCenter: \"local\"},\n\t\t{hostId: tUUID(8), connectAddress: net.IPv4(10, 0, 0, 9), tokens: []string{\"45\"}, dataCenter: \"remote2\"},\n\t\t{hostId: tUUID(9), connectAddress: net.IPv4(10, 0, 0, 10), tokens: []string{\"50\"}, dataCenter: \"remote1\"},\n\t\t{hostId: tUUID(10), connectAddress: net.IPv4(10, 0, 0, 11), tokens: []string{\"55\"}, dataCenter: \"local\"},\n\t\t{hostId: tUUID(11), connectAddress: net.IPv4(10, 0, 0, 12), tokens: []string{\"60\"}, dataCenter: \"remote2\"},\n\t}\n\n\tpolicy.SetPartitioner(\"OrderedPartitioner\")\n\n\tpolicyInternal.getKeyspaceMetadata = func(keyspaceName string) (*KeyspaceMetadata, error) {\n\t\tif keyspaceName != \"myKeyspace\" {\n\t\t\treturn nil, fmt.Errorf(\"unknown keyspace: %s\", keyspaceName)\n\t\t}\n\t\treturn &KeyspaceMetadata{\n\t\t\tName:          \"myKeyspace\",\n\t\t\tStrategyClass: \"NetworkTopologyStrategy\",\n\t\t\tStrategyOptions: map[string]any{\n\t\t\t\t\"class\":   \"NetworkTopologyStrategy\",\n\t\t\t\t\"local\":   1,\n\t\t\t\t\"remote1\": 1,\n\t\t\t\t\"remote2\": 1,\n\t\t\t},\n\t\t}, nil\n\t}\n\tpolicy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: \"myKeyspace\"})\n\n\tcancel := make(chan struct{})\n\n\t// now the token ring is configured\n\tfor _, host := range hosts {\n\t\thost := host\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-cancel:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tpolicy.AddHost(host)\n\t\t\t\t\tpolicy.RemoveHost(host)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\tclose(cancel)\n}\n\nfunc TestTokenAwarePolicyReset(t *testing.T) {\n\tt.Parallel()\n\n\tpolicy := TokenAwareHostPolicy(\n\t\tRackAwareRoundRobinPolicy(\"local\", \"b\"),\n\t\tNonLocalReplicasFallback(),\n\t)\n\tpolicyInternal := policy.(*tokenAwareHostPolicy)\n\n\tif policyInternal.fallback == nil {\n\t\tt.Fatal(\"fallback is nil\")\n\t}\n\tif !policyInternal.nonLocalReplicasFallback {\n\t\tt.Fatal(\"nonLocalReplicasFallback is false\")\n\t}\n\n\tpolicy.Init(&Session{logger: &defaultLogger{}})\n\tif policyInternal.getKeyspaceMetadata == nil {\n\t\tt.Fatal(\"keyspace metatadata fn is nil\")\n\t}\n\tif policyInternal.getKeyspaceName == nil {\n\t\tt.Fatal(\"keyspace name fn is nil\")\n\t}\n\tif policyInternal.logger == nil {\n\t\tt.Fatal(\"logger is nil\")\n\t}\n\n\t// Reset - should reset fields that were set in Init\n\tpolicy.Reset()\n\n\tif policyInternal.fallback == nil { // we don't touch fallback\n\t\tt.Fatal(\"fallback is nil\")\n\t}\n\tif !policyInternal.nonLocalReplicasFallback { // we don't touch nonLocalReplicasFallback\n\t\tt.Fatal(\"nonLocalReplicasFallback is false\")\n\t}\n\tif policyInternal.getKeyspaceMetadata != nil {\n\t\tt.Fatal(\"keyspace metatadata fn is not nil\")\n\t}\n\tif policyInternal.getKeyspaceName != nil {\n\t\tt.Fatal(\"keyspace name fn is not nil\")\n\t}\n\tif policyInternal.logger != nil {\n\t\tt.Fatal(\"logger is nil\")\n\t}\n}\n\nfunc TestTokenAwareHostPolicyTabletPath(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"HappyPath\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tconst keyspace = \"testks\"\n\t\tconst table = \"testtbl\"\n\n\t\tpolicy := TokenAwareHostPolicy(RoundRobinHostPolicy())\n\t\tpolicyInternal := policy.(*tokenAwareHostPolicy)\n\t\tpolicyInternal.getKeyspaceName = func() string { return keyspace }\n\t\tpolicyInternal.getKeyspaceMetadata = func(ks string) (*KeyspaceMetadata, error) {\n\t\t\treturn nil, errors.New(\"not initialized\")\n\t\t}\n\n\t\thost1 := &HostInfo{hostId: tUUID(1), connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{\"-6148914691236517206\"}}\n\t\thost2 := &HostInfo{hostId: tUUID(2), connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{\"0\"}}\n\t\thost3 := &HostInfo{hostId: tUUID(3), connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{\"6148914691236517206\"}}\n\n\t\tpolicy.AddHost(host1)\n\t\tpolicy.AddHost(host2)\n\t\tpolicy.AddHost(host3)\n\t\tpolicy.SetPartitioner(\"Murmur3Partitioner\")\n\n\t\tpolicyInternal.getKeyspaceMetadata = func(ks string) (*KeyspaceMetadata, error) {\n\t\t\treturn &KeyspaceMetadata{\n\t\t\t\tName:          keyspace,\n\t\t\t\tStrategyClass: \"SimpleStrategy\",\n\t\t\t\tStrategyOptions: map[string]any{\n\t\t\t\t\t\"class\":              \"SimpleStrategy\",\n\t\t\t\t\t\"replication_factor\": 1,\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\tpolicy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: keyspace})\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.tabletsRoutingV1 = true\n\n\t\tt1, err := tablets.TabletInfoBuilder{\n\t\t\tKeyspaceName: keyspace,\n\t\t\tTableName:    table,\n\t\t\tFirstToken:   -9223372036854775808,\n\t\t\tLastToken:    0,\n\t\t\tReplicas:     [][]any{{host2.hostId, 0}},\n\t\t}.Build()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tt2, err := tablets.TabletInfoBuilder{\n\t\t\tKeyspaceName: keyspace,\n\t\t\tTableName:    table,\n\t\t\tFirstToken:   0,\n\t\t\tLastToken:    9223372036854775807,\n\t\t\tReplicas:     [][]any{{host3.hostId, 0}},\n\t\t}.Build()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\ts.metadataDescriber.AddTablet(t1)\n\t\ts.metadataDescriber.AddTablet(t2)\n\t\ts.metadataDescriber.metadata.tabletsMetadata.Flush()\n\n\t\tquery := &Query{\n\t\t\troutingInfo: &queryRoutingInfo{\n\t\t\t\tkeyspace:    keyspace,\n\t\t\t\ttable:       table,\n\t\t\t\tpartitioner: fixedInt64Partitioner(-42),\n\t\t\t},\n\t\t\tsession: s,\n\t\t}\n\t\tquery.getKeyspace = func() string { return keyspace }\n\t\tquery.routingKey = []byte(\"anything\")\n\n\t\titer := policy.Pick(query)\n\t\tfirst := iter()\n\t\tif first == nil || first.Info() == nil {\n\t\t\tt.Fatal(\"expected a host from tablet path, got nil\")\n\t\t}\n\t\tif first.Info().HostID() != tID(2) {\n\t\t\tt.Fatalf(\"expected host tUUID(2) from tablet path, got %s\", first.Info().HostID())\n\t\t}\n\n\t\tquery2 := &Query{\n\t\t\troutingInfo: &queryRoutingInfo{\n\t\t\t\tkeyspace:    keyspace,\n\t\t\t\ttable:       table,\n\t\t\t\tpartitioner: fixedInt64Partitioner(42),\n\t\t\t},\n\t\t\tsession: s,\n\t\t}\n\t\tquery2.getKeyspace = func() string { return keyspace }\n\t\tquery2.routingKey = []byte(\"anything\")\n\n\t\titer2 := policy.Pick(query2)\n\t\tfirst2 := iter2()\n\t\tif first2 == nil || first2.Info() == nil {\n\t\t\tt.Fatal(\"expected a host from tablet path, got nil\")\n\t\t}\n\t\tif first2.Info().HostID() != tID(3) {\n\t\t\tt.Fatalf(\"expected host tUUID(3) from tablet path, got %s\", first2.Info().HostID())\n\t\t}\n\t})\n}\n\ntype fixedInt64Partitioner int64\n\nfunc (f fixedInt64Partitioner) Name() string               { return \"FixedInt64Partitioner\" }\nfunc (f fixedInt64Partitioner) Hash([]byte) Token          { return int64Token(f) }\nfunc (f fixedInt64Partitioner) ParseString(s string) Token { return parseInt64Token(s) }\n\nfunc TestHostSetInline(t *testing.T) {\n\tvar s hostSet\n\thosts := make([]*HostInfo, 9)\n\tfor i := range hosts {\n\t\thosts[i] = &HostInfo{}\n\t\ts.add(hosts[i])\n\t}\n\t// All 9 should be tracked inline (no overflow map).\n\tif s.overflow != nil {\n\t\tt.Fatal(\"expected inline-only storage for 9 hosts\")\n\t}\n\tfor i, h := range hosts {\n\t\tif !s.contains(h) {\n\t\t\tt.Fatalf(\"host %d not found in inline set\", i)\n\t\t}\n\t}\n\t// Unknown host should not be found.\n\tif s.contains(&HostInfo{}) {\n\t\tt.Fatal(\"unexpected contains=true for unknown host\")\n\t}\n}\n\nfunc TestHostSetOverflow(t *testing.T) {\n\tvar s hostSet\n\thosts := make([]*HostInfo, 15) // exceeds inline capacity of 9\n\tfor i := range hosts {\n\t\thosts[i] = &HostInfo{}\n\t\ts.add(hosts[i])\n\t}\n\t// Should have spilled to map.\n\tif s.overflow == nil {\n\t\tt.Fatal(\"expected overflow map for 15 hosts\")\n\t}\n\t// Every host must be found, including those added before and after spill.\n\tfor i, h := range hosts {\n\t\tif !s.contains(h) {\n\t\t\tt.Fatalf(\"host %d not found after overflow\", i)\n\t\t}\n\t}\n\t// Unknown host should not be found.\n\tif s.contains(&HostInfo{}) {\n\t\tt.Fatal(\"unexpected contains=true for unknown host in overflow mode\")\n\t}\n}\n\nfunc TestHostSetOverflowPreservesInlineEntries(t *testing.T) {\n\tvar s hostSet\n\t// Fill inline storage exactly.\n\tinline := make([]*HostInfo, 9)\n\tfor i := range inline {\n\t\tinline[i] = &HostInfo{}\n\t\ts.add(inline[i])\n\t}\n\t// Add one more to trigger spill.\n\textra := &HostInfo{}\n\ts.add(extra)\n\n\tif s.overflow == nil {\n\t\tt.Fatal(\"expected overflow map after 10th add\")\n\t}\n\t// Inline entries must be findable via the map path.\n\tfor i, h := range inline {\n\t\tif !s.contains(h) {\n\t\t\tt.Fatalf(\"inline host %d lost after spill\", i)\n\t\t}\n\t}\n\tif !s.contains(extra) {\n\t\tt.Fatal(\"extra host not found after spill\")\n\t}\n}\n"
  },
  {
    "path": "prepared_cache.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"sync\"\n\n\t\"github.com/gocql/gocql/internal/lru\"\n)\n\nconst defaultMaxPreparedStmts = 1000\n\n// stmtCacheKey is a composite key for the prepared statement cache.\n// Using a struct avoids the string concatenation allocation that occurred\n// on every query and fixes the theoretical key collision bug where\n// different (hostID, keyspace, statement) tuples could produce the same\n// concatenated string.\ntype stmtCacheKey struct {\n\thostID    string\n\tkeyspace  string\n\tstatement string\n}\n\n// preparedLRU is the prepared statement cache\ntype preparedLRU struct {\n\tlru *lru.Cache[stmtCacheKey]\n\tmu  sync.Mutex\n}\n\nfunc (p *preparedLRU) clear() {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tfor p.lru.Len() > 0 {\n\t\tp.lru.RemoveOldest()\n\t}\n}\n\nfunc (p *preparedLRU) add(key stmtCacheKey, val *inflightPrepare) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tp.lru.Add(key, val)\n}\n\nfunc (p *preparedLRU) remove(key stmtCacheKey) bool {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\treturn p.lru.Remove(key)\n}\n\nfunc (p *preparedLRU) execIfMissing(key stmtCacheKey, fn func(cache *lru.Cache[stmtCacheKey]) *inflightPrepare) (*inflightPrepare, bool) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tval, ok := p.lru.Get(key)\n\tif ok {\n\t\treturn val.(*inflightPrepare), true\n\t}\n\n\treturn fn(p.lru), false\n}\n\n// keyFor constructs a zero-allocation composite cache key from the given\n// components. The returned struct references the original strings without\n// copying, so no heap allocation occurs.\nfunc (p *preparedLRU) keyFor(hostID, keyspace, statement string) stmtCacheKey {\n\treturn stmtCacheKey{\n\t\thostID:    hostID,\n\t\tkeyspace:  keyspace,\n\t\tstatement: statement,\n\t}\n}\n\nfunc (p *preparedLRU) evictPreparedID(key stmtCacheKey, id []byte) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tval, ok := p.lru.Get(key)\n\tif !ok {\n\t\treturn\n\t}\n\n\tifp, ok := val.(*inflightPrepare)\n\tif !ok {\n\t\treturn\n\t}\n\n\tselect {\n\tcase <-ifp.done:\n\t\tif bytes.Equal(id, ifp.preparedStatment.id) {\n\t\t\tp.lru.Remove(key)\n\t\t}\n\tdefault:\n\t}\n}\n"
  },
  {
    "path": "query_error_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage gocql\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestQueryError_PotentiallyExecuted(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname                string\n\t\tpotentiallyExecuted bool\n\t\texpected            bool\n\t}{\n\t\t{\n\t\t\tname:                \"potentially executed true\",\n\t\t\tpotentiallyExecuted: true,\n\t\t\texpected:            true,\n\t\t},\n\t\t{\n\t\t\tname:                \"potentially executed false\",\n\t\t\tpotentiallyExecuted: false,\n\t\t\texpected:            false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tqErr := &QueryError{\n\t\t\t\terr:                 errors.New(\"test error\"),\n\t\t\t\tpotentiallyExecuted: tt.potentiallyExecuted,\n\t\t\t}\n\n\t\t\tgot := qErr.PotentiallyExecuted()\n\t\t\tif got != tt.expected {\n\t\t\t\tt.Fatalf(\"QueryError.PotentiallyExecuted() = %v, expected %v\", got, tt.expected)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestQueryError_IsIdempotent(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname         string\n\t\tisIdempotent bool\n\t\texpected     bool\n\t}{\n\t\t{\n\t\t\tname:         \"idempotent true\",\n\t\t\tisIdempotent: true,\n\t\t\texpected:     true,\n\t\t},\n\t\t{\n\t\t\tname:         \"idempotent false\",\n\t\t\tisIdempotent: false,\n\t\t\texpected:     false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tqErr := &QueryError{\n\t\t\t\terr:          errors.New(\"test error\"),\n\t\t\t\tisIdempotent: tt.isIdempotent,\n\t\t\t}\n\n\t\t\tgot := qErr.IsIdempotent()\n\t\t\tif got != tt.expected {\n\t\t\t\tt.Errorf(\"QueryError.IsIdempotent() = %v, expected %v\", got, tt.expected)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestQueryError_Error(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname                string\n\t\terr                 error\n\t\tpotentiallyExecuted bool\n\t\ttimeout             time.Duration\n\t\tinFlight            int\n\t\texpected            string\n\t}{\n\t\t{\n\t\t\tname:                \"with potentially executed true\",\n\t\t\terr:                 errors.New(\"connection error\"),\n\t\t\tpotentiallyExecuted: true,\n\t\t\texpected:            \"connection error (potentially executed: true)\",\n\t\t},\n\t\t{\n\t\t\tname:                \"with potentially executed false\",\n\t\t\terr:                 errors.New(\"syntax error\"),\n\t\t\tpotentiallyExecuted: false,\n\t\t\texpected:            \"syntax error (potentially executed: false)\",\n\t\t},\n\t\t{\n\t\t\tname:                \"with timeout\",\n\t\t\terr:                 ErrTimeoutNoResponse,\n\t\t\tpotentiallyExecuted: true,\n\t\t\ttimeout:             11 * time.Second,\n\t\t\tinFlight:            42,\n\t\t\texpected:            \"gocql: no response received from cassandra within timeout period (timeout: 11s, in-flight: 42) (potentially executed: true)\",\n\t\t},\n\t\t{\n\t\t\tname:                \"with zero timeout omits timeout\",\n\t\t\terr:                 errors.New(\"some error\"),\n\t\t\tpotentiallyExecuted: false,\n\t\t\ttimeout:             0,\n\t\t\texpected:            \"some error (potentially executed: false)\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tqErr := &QueryError{\n\t\t\t\terr:                 tt.err,\n\t\t\t\tpotentiallyExecuted: tt.potentiallyExecuted,\n\t\t\t\ttimeout:             tt.timeout,\n\t\t\t\tinFlight:            tt.inFlight,\n\t\t\t}\n\n\t\t\tgot := qErr.Error()\n\t\t\tif got != tt.expected {\n\t\t\t\tt.Errorf(\"QueryError.Error() = %v, expected %v\", got, tt.expected)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "query_executor.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype ExecutableQuery interface {\n\tborrowForExecution()    // Used to ensure that the query stays alive for lifetime of a particular execution goroutine.\n\treleaseAfterExecution() // Used when a goroutine finishes its execution attempts, either with ok result or an error.\n\texecute(ctx context.Context, conn *Conn) *Iter\n\tattempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo)\n\tretryPolicy() RetryPolicy\n\tspeculativeExecutionPolicy() SpeculativeExecutionPolicy\n\tGetRoutingKey() ([]byte, error)\n\tKeyspace() string\n\tTable() string\n\tIsIdempotent() bool\n\tIsLWT() bool\n\tGetCustomPartitioner() Partitioner\n\tGetHostID() string\n\n\twithContext(context.Context) ExecutableQuery\n\n\tRetryableQuery\n\n\tGetSession() *Session\n}\n\ntype queryExecutor struct {\n\tpool   *policyConnPool\n\tpolicy HostSelectionPolicy\n}\n\nfunc (q *queryExecutor) attemptQuery(ctx context.Context, qry ExecutableQuery, conn *Conn) *Iter {\n\tstart := time.Now()\n\titer := qry.execute(ctx, conn)\n\tend := time.Now()\n\n\tqry.attempt(q.pool.keyspace, end, start, iter, conn.host)\n\n\treturn iter\n}\n\nfunc (q *queryExecutor) speculate(ctx context.Context, qry ExecutableQuery, sp SpeculativeExecutionPolicy,\n\thostIter NextHost, results chan *Iter) *Iter {\n\tticker := time.NewTicker(sp.Delay())\n\tdefer ticker.Stop()\n\n\tfor i := 0; i < sp.Attempts(); i++ {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tqry.borrowForExecution() // ensure liveness in case of executing Query to prevent races with Query.Release().\n\t\t\tgo q.run(ctx, qry, hostIter, results)\n\t\tcase <-ctx.Done():\n\t\t\treturn &Iter{err: ctx.Err()}\n\t\tcase iter := <-results:\n\t\t\treturn iter\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) {\n\tvar hostIter NextHost\n\n\t// check if the hostID is specified for the query,\n\t// if true  - the query execute at the specified host.\n\t// if false - the query execute at the host picked by HostSelectionPolicy\n\tif hostID := qry.GetHostID(); hostID != \"\" {\n\t\tpool, ok := q.pool.getPoolByHostID(hostID)\n\t\tif !ok {\n\t\t\t// if the specified host ID have no connection pool we return error\n\t\t\treturn nil, fmt.Errorf(\"query is targeting unknown host id %s: %w\", hostID, ErrNoPool)\n\t\t} else if pool.Size() == 0 {\n\t\t\t// if the pool have no connection we return error\n\t\t\treturn nil, fmt.Errorf(\"query is targeting host id %s that driver is not connected to: %w\", hostID, ErrNoConnectionsInPool)\n\t\t}\n\t\thostIter = newSingleHost(pool.host, 5, 200*time.Millisecond).selectHost\n\t} else {\n\t\thostIter = q.policy.Pick(qry)\n\t}\n\n\t// check if the query is not marked as idempotent, if\n\t// it is, we force the policy to NonSpeculative\n\tsp := qry.speculativeExecutionPolicy()\n\tif qry.GetHostID() != \"\" || !qry.IsIdempotent() || sp.Attempts() == 0 {\n\t\treturn q.do(qry.Context(), qry, hostIter), nil\n\t}\n\n\t// When speculative execution is enabled, we could be accessing the host iterator from multiple goroutines below.\n\t// To ensure we don't call it concurrently, we wrap the returned NextHost function here to synchronize access to it.\n\tvar mu sync.Mutex\n\torigHostIter := hostIter\n\thostIter = func() SelectedHost {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\treturn origHostIter()\n\t}\n\n\tctx, cancel := context.WithCancel(qry.Context())\n\tdefer cancel()\n\n\tresults := make(chan *Iter, 1)\n\n\t// Launch the main execution\n\tqry.borrowForExecution() // ensure liveness in case of executing Query to prevent races with Query.Release().\n\tgo q.run(ctx, qry, hostIter, results)\n\n\t// The speculative executions are launched _in addition_ to the main\n\t// execution, on a timer. So Speculation{2} would make 3 executions running\n\t// in total.\n\tif iter := q.speculate(ctx, qry, sp, hostIter, results); iter != nil {\n\t\treturn iter, nil\n\t}\n\n\tselect {\n\tcase iter := <-results:\n\t\treturn iter, nil\n\tcase <-ctx.Done():\n\t\treturn &Iter{err: ctx.Err()}, nil\n\t}\n}\n\nfunc (q *queryExecutor) do(ctx context.Context, qry ExecutableQuery, hostIter NextHost) *Iter {\n\trt := qry.retryPolicy()\n\tif rt == nil {\n\t\trt = &SimpleRetryPolicy{NumRetries: 3}\n\t}\n\n\tlwtRT, isRTSupportsLWT := rt.(LWTRetryPolicy)\n\n\tvar getShouldRetry func(qry RetryableQuery) bool\n\tvar getRetryType func(error) RetryType\n\n\tif isRTSupportsLWT && qry.IsLWT() {\n\t\tgetShouldRetry = lwtRT.AttemptLWT\n\t\tgetRetryType = lwtRT.GetRetryTypeLWT\n\t} else {\n\t\tgetShouldRetry = rt.Attempt\n\t\tgetRetryType = rt.GetRetryType\n\t}\n\n\tvar potentiallyExecuted bool\n\n\texecute := func(qry ExecutableQuery, selectedHost SelectedHost) (iter *Iter, retry RetryType) {\n\t\thost := selectedHost.Info()\n\t\tif host == nil || !host.IsUp() {\n\t\t\treturn &Iter{\n\t\t\t\terr: &QueryError{\n\t\t\t\t\terr:                 ErrHostDown,\n\t\t\t\t\tpotentiallyExecuted: potentiallyExecuted,\n\t\t\t\t},\n\t\t\t}, RetryNextHost\n\t\t}\n\t\tpool, ok := q.pool.getPool(host)\n\t\tif !ok {\n\t\t\treturn &Iter{\n\t\t\t\terr: &QueryError{\n\t\t\t\t\terr:                 ErrNoPool,\n\t\t\t\t\tpotentiallyExecuted: potentiallyExecuted,\n\t\t\t\t},\n\t\t\t}, RetryNextHost\n\t\t}\n\t\tconn := pool.Pick(selectedHost.Token(), qry)\n\t\tif conn == nil {\n\t\t\treturn &Iter{\n\t\t\t\terr: &QueryError{\n\t\t\t\t\terr:                 ErrNoConnectionsInPool,\n\t\t\t\t\tpotentiallyExecuted: potentiallyExecuted,\n\t\t\t\t},\n\t\t\t}, RetryNextHost\n\t\t}\n\t\titer = q.attemptQuery(ctx, qry, conn)\n\t\titer.host = selectedHost.Info()\n\t\t// Update host\n\t\tif iter.err == nil {\n\t\t\treturn iter, RetryType(255)\n\t\t}\n\n\t\tswitch {\n\t\tcase errors.Is(iter.err, context.Canceled),\n\t\t\terrors.Is(iter.err, context.DeadlineExceeded):\n\t\t\tselectedHost.Mark(nil)\n\t\t\tpotentiallyExecuted = true\n\t\t\tretry = Rethrow\n\t\tdefault:\n\t\t\tselectedHost.Mark(iter.err)\n\t\t\tretry = RetryType(255) // Don't enforce retry and get it from retry policy\n\t\t}\n\n\t\tvar qErr *QueryError\n\t\tif errors.As(iter.err, &qErr) {\n\t\t\tpotentiallyExecuted = potentiallyExecuted || qErr.PotentiallyExecuted()\n\t\t\tqErr.potentiallyExecuted = potentiallyExecuted\n\t\t\tqErr.isIdempotent = qry.IsIdempotent()\n\t\t\titer.err = qErr\n\t\t} else {\n\t\t\titer.err = &QueryError{\n\t\t\t\terr:                 iter.err,\n\t\t\t\tpotentiallyExecuted: potentiallyExecuted,\n\t\t\t\tisIdempotent:        qry.IsIdempotent(),\n\t\t\t}\n\t\t}\n\t\treturn iter, retry\n\t}\n\n\tvar lastErr error\n\tselectedHost := hostIter()\n\tfor selectedHost != nil {\n\t\titer, retryType := execute(qry, selectedHost)\n\t\tif iter.err == nil {\n\t\t\treturn iter\n\t\t}\n\t\tlastErr = iter.err\n\n\t\t// Exit if retry policy decides to not retry anymore\n\t\tif retryType == RetryType(255) {\n\t\t\tif !getShouldRetry(qry) {\n\t\t\t\treturn iter\n\t\t\t}\n\t\t\tretryType = getRetryType(iter.err)\n\t\t}\n\n\t\t// If query is unsuccessful, check the error with RetryPolicy to retry\n\t\tswitch retryType {\n\t\tcase Retry:\n\t\t\titer.finalize(true)\n\t\t\t// retry on the same host\n\t\t\tcontinue\n\t\tcase Rethrow, Ignore:\n\t\t\treturn iter\n\t\tcase RetryNextHost:\n\t\t\titer.finalize(true)\n\t\t\t// retry on the next host\n\t\t\tselectedHost = hostIter()\n\t\t\tcontinue\n\t\tdefault:\n\t\t\t// Undefined? Return nil and error, this will panic in the requester\n\t\t\treturn &Iter{err: ErrUnknownRetryType}\n\t\t}\n\t}\n\tif lastErr != nil {\n\t\treturn &Iter{err: lastErr}\n\t}\n\treturn &Iter{err: ErrNoConnections}\n}\n\nfunc (q *queryExecutor) run(ctx context.Context, qry ExecutableQuery, hostIter NextHost, results chan<- *Iter) {\n\titer := q.do(ctx, qry, hostIter)\n\tselect {\n\tcase results <- iter:\n\tcase <-ctx.Done():\n\t\titer.discard()\n\t}\n\tqry.releaseAfterExecution()\n}\n"
  },
  {
    "path": "recreate.go",
    "content": "// Copyright (C) 2017 ScyllaDB\n\npackage gocql\n\nimport (\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text/template\"\n)\n\n// ToCQL returns a CQL query that ca be used to recreate keyspace with all\n// user defined types, tables, indexes, functions, aggregates and views associated\n// with this keyspace.\nfunc (ks *KeyspaceMetadata) ToCQL() (string, error) {\n\t// Be aware that `CreateStmts` is not only a cache for ToCQL,\n\t// but it also can be populated from response to `DESCRIBE KEYSPACE %s WITH INTERNALS`\n\tif len(ks.CreateStmts) != 0 {\n\t\treturn ks.CreateStmts, nil\n\t}\n\n\tvar sb strings.Builder\n\n\tif err := ks.keyspaceToCQL(&sb); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsortedTypes := ks.typesSortedTopologically()\n\tfor _, tm := range sortedTypes {\n\t\tif err := ks.userTypeToCQL(&sb, tm); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tfor _, tm := range ks.Tables {\n\t\tif err := ks.tableToCQL(&sb, ks.Name, tm); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tfor _, im := range ks.Indexes {\n\t\tif err := ks.indexToCQL(&sb, im); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tfor _, fm := range ks.Functions {\n\t\tif err := ks.functionToCQL(&sb, ks.Name, fm); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tfor _, am := range ks.Aggregates {\n\t\tif err := ks.aggregateToCQL(&sb, am); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tfor _, vm := range ks.Views {\n\t\tif err := ks.viewToCQL(&sb, vm); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tks.CreateStmts = sb.String()\n\treturn ks.CreateStmts, nil\n}\n\nfunc (ks *KeyspaceMetadata) typesSortedTopologically() []*TypeMetadata {\n\tsortedTypes := make([]*TypeMetadata, 0, len(ks.Types))\n\tfor _, tm := range ks.Types {\n\t\tsortedTypes = append(sortedTypes, tm)\n\t}\n\tsort.Slice(sortedTypes, func(i, j int) bool {\n\t\tfor _, ft := range sortedTypes[j].FieldTypes {\n\t\t\tif strings.Contains(ft, sortedTypes[i].Name) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t})\n\treturn sortedTypes\n}\n\nvar tableCQLTemplate = template.Must(template.New(\"table\").\n\tFuncs(map[string]any{\n\t\t\"escape\":               cqlHelpers.escape,\n\t\t\"tableColumnToCQL\":     cqlHelpers.tableColumnToCQL,\n\t\t\"tablePropertiesToCQL\": cqlHelpers.tablePropertiesToCQL,\n\t}).\n\tParse(`\nCREATE TABLE {{ .KeyspaceName }}.{{ .Tm.Name }} (\n    {{ tableColumnToCQL .Tm }}\n) WITH {{ tablePropertiesToCQL .Tm.ClusteringColumns .Tm.Options .Tm.Extensions }};\n`))\n\nfunc (ks *KeyspaceMetadata) tableToCQL(w io.Writer, kn string, tm *TableMetadata) error {\n\tif err := tableCQLTemplate.Execute(w, map[string]any{\n\t\t\"Tm\":           tm,\n\t\t\"KeyspaceName\": kn,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar functionTemplate = template.Must(template.New(\"functions\").\n\tFuncs(map[string]any{\n\t\t\"escape\":      cqlHelpers.escape,\n\t\t\"zip\":         cqlHelpers.zip,\n\t\t\"stripFrozen\": cqlHelpers.stripFrozen,\n\t}).\n\tParse(`\nCREATE FUNCTION {{ .keyspaceName }}.{{ .fm.Name }} ( \n    {{- range $i, $args := zip .fm.ArgumentNames .fm.ArgumentTypes }}\n    {{- if ne $i 0 }}, {{ end }}\n    {{- (index $args 0) }}\n    {{ stripFrozen (index $args 1) }}\n    {{- end -}})\n    {{ if .fm.CalledOnNullInput }}CALLED{{ else }}RETURNS NULL{{ end }} ON NULL INPUT\n    RETURNS {{ .fm.ReturnType }}\n    LANGUAGE {{ .fm.Language }}\n    AS $${{ .fm.Body }}$$;\n`))\n\nfunc (ks *KeyspaceMetadata) functionToCQL(w io.Writer, keyspaceName string, fm *FunctionMetadata) error {\n\tif err := functionTemplate.Execute(w, map[string]any{\n\t\t\"fm\":           fm,\n\t\t\"keyspaceName\": keyspaceName,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar viewTemplate = template.Must(template.New(\"views\").\n\tFuncs(map[string]any{\n\t\t\"zip\":                  cqlHelpers.zip,\n\t\t\"partitionKeyString\":   cqlHelpers.partitionKeyString,\n\t\t\"tablePropertiesToCQL\": cqlHelpers.tablePropertiesToCQL,\n\t}).\n\tParse(`\nCREATE MATERIALIZED VIEW {{ .vm.KeyspaceName }}.{{ .vm.ViewName }} AS\n    SELECT {{ if .vm.IncludeAllColumns }}*{{ else }}\n    {{- range $i, $col := .vm.OrderedColumns }}\n    {{- if ne $i 0 }}, {{ end }}\n    {{ $col }}\n    {{- end }}\n    {{- end }}\n    FROM {{ .vm.KeyspaceName }}.{{ .vm.BaseTableName }}\n    WHERE {{ .vm.WhereClause }}\n    PRIMARY KEY ({{ partitionKeyString .vm.PartitionKey .vm.ClusteringColumns }})\n    WITH {{ tablePropertiesToCQL .vm.ClusteringColumns .vm.Options .vm.Extensions }};\n`))\n\nfunc (ks *KeyspaceMetadata) viewToCQL(w io.Writer, vm *ViewMetadata) error {\n\tif err := viewTemplate.Execute(w, map[string]any{\n\t\t\"vm\": vm,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar aggregatesTemplate = template.Must(template.New(\"aggregate\").\n\tFuncs(map[string]any{\n\t\t\"stripFrozen\": cqlHelpers.stripFrozen,\n\t}).\n\tParse(`\nCREATE AGGREGATE {{ .Keyspace }}.{{ .Name }}( \n    {{- range $i, $arg := .ArgumentTypes }}\n    {{- if ne $i 0 }}, {{ end }}\n    {{ stripFrozen $arg }}\n    {{- end -}})\n    SFUNC {{ .StateFunc.Name }}\n    STYPE {{ stripFrozen .StateType }}\n    {{- if ne .FinalFunc.Name \"\" }}\n    FINALFUNC {{ .FinalFunc.Name }}\n    {{- end -}}\n    {{- if ne .InitCond \"\" }}\n    INITCOND {{ .InitCond }}\n    {{- end -}}\n;\n`))\n\nfunc (ks *KeyspaceMetadata) aggregateToCQL(w io.Writer, am *AggregateMetadata) error {\n\tif err := aggregatesTemplate.Execute(w, am); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar typeCQLTemplate = template.Must(template.New(\"types\").\n\tFuncs(map[string]any{\n\t\t\"zip\": cqlHelpers.zip,\n\t}).\n\tParse(`\nCREATE TYPE {{ .Keyspace }}.{{ .Name }} ( \n  {{- range $i, $fields := zip .FieldNames .FieldTypes }} {{- if ne $i 0 }},{{ end }}\n    {{ index $fields 0 }} {{ index $fields 1 }}\n  {{- end }}\n);\n`))\n\nfunc (ks *KeyspaceMetadata) userTypeToCQL(w io.Writer, tm *TypeMetadata) error {\n\tif err := typeCQLTemplate.Execute(w, tm); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (ks *KeyspaceMetadata) indexToCQL(w io.Writer, im *IndexMetadata) error {\n\t// Scylla doesn't support any custom indexes\n\tif im.Kind == IndexKindCustom {\n\t\treturn nil\n\t}\n\n\toptions := im.Options\n\tindexTarget := options[\"target\"]\n\n\t// secondary index\n\tsi := struct {\n\t\tClusteringKeys []string `json:\"ck\"`\n\t\tPartitionKeys  []string `json:\"pk\"`\n\t}{}\n\n\tif err := json.Unmarshal([]byte(indexTarget), &si); err == nil {\n\t\tindexTarget = fmt.Sprintf(\"(%s), %s\",\n\t\t\tstrings.Join(si.PartitionKeys, \",\"),\n\t\t\tstrings.Join(si.ClusteringKeys, \",\"),\n\t\t)\n\t}\n\n\t_, err := fmt.Fprintf(w, \"\\nCREATE INDEX %s ON %s.%s (%s);\\n\",\n\t\tim.Name,\n\t\tim.KeyspaceName,\n\t\tim.TableName,\n\t\tindexTarget,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nvar keyspaceCQLTemplate = template.Must(template.New(\"keyspace\").\n\tFuncs(map[string]any{\n\t\t\"escape\":      cqlHelpers.escape,\n\t\t\"fixStrategy\": cqlHelpers.fixStrategy,\n\t}).\n\tParse(`CREATE KEYSPACE {{ .Name }} WITH replication = {\n    'class': {{ escape ( fixStrategy .StrategyClass) }}\n    {{- range $key, $value := .StrategyOptions }},\n    {{ escape $key }}: {{ escape $value }}\n    {{- end }}\n}{{ if not .DurableWrites }} AND durable_writes = 'false'{{ end }};\n`))\n\nfunc (ks *KeyspaceMetadata) keyspaceToCQL(w io.Writer) error {\n\tif err := keyspaceCQLTemplate.Execute(w, ks); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc contains(in []string, v string) bool {\n\tfor _, e := range in {\n\t\tif e == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype toCQLHelpers struct{}\n\nvar cqlHelpers = toCQLHelpers{}\n\nfunc (h toCQLHelpers) zip(a []string, b []string) [][]string {\n\tm := make([][]string, len(a))\n\tfor i := range a {\n\t\tm[i] = []string{a[i], b[i]}\n\t}\n\treturn m\n}\n\nfunc (h toCQLHelpers) escape(e any) string {\n\tswitch v := e.(type) {\n\tcase int, float64:\n\t\treturn fmt.Sprint(v)\n\tcase bool:\n\t\tif v {\n\t\t\treturn \"true\"\n\t\t}\n\t\treturn \"false\"\n\tcase string:\n\t\treturn \"'\" + strings.ReplaceAll(v, \"'\", \"''\") + \"'\"\n\tcase []byte:\n\t\treturn string(v)\n\t}\n\treturn \"\"\n}\n\nfunc (h toCQLHelpers) stripFrozen(v string) string {\n\treturn strings.TrimSuffix(strings.TrimPrefix(v, \"frozen<\"), \">\")\n}\nfunc (h toCQLHelpers) fixStrategy(v string) string {\n\treturn strings.TrimPrefix(v, \"org.apache.cassandra.locator.\")\n}\n\nfunc (h toCQLHelpers) fixQuote(v string) string {\n\treturn strings.ReplaceAll(v, `\"`, `'`)\n}\n\nfunc (h toCQLHelpers) tableOptionsToCQL(ops TableMetadataOptions) ([]string, error) {\n\topts := map[string]any{\n\t\t\"bloom_filter_fp_chance\":      ops.BloomFilterFpChance,\n\t\t\"comment\":                     ops.Comment,\n\t\t\"crc_check_chance\":            ops.CrcCheckChance,\n\t\t\"default_time_to_live\":        ops.DefaultTimeToLive,\n\t\t\"gc_grace_seconds\":            ops.GcGraceSeconds,\n\t\t\"max_index_interval\":          ops.MaxIndexInterval,\n\t\t\"memtable_flush_period_in_ms\": ops.MemtableFlushPeriodInMs,\n\t\t\"min_index_interval\":          ops.MinIndexInterval,\n\t\t\"speculative_retry\":           ops.SpeculativeRetry,\n\t}\n\n\tvar err error\n\topts[\"caching\"], err = json.Marshal(ops.Caching)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts[\"compaction\"], err = json.Marshal(ops.Compaction)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts[\"compression\"], err = json.Marshal(ops.Compression)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcdc, err := json.Marshal(ops.CDC)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif string(cdc) != \"null\" {\n\t\topts[\"cdc\"] = cdc\n\t}\n\n\tif ops.InMemory {\n\t\topts[\"in_memory\"] = ops.InMemory\n\t}\n\n\tout := make([]string, 0, len(opts))\n\tfor key, opt := range opts {\n\t\tout = append(out, fmt.Sprintf(\"%s = %s\", key, h.fixQuote(h.escape(opt))))\n\t}\n\n\tsort.Strings(out)\n\treturn out, nil\n}\n\nfunc (h toCQLHelpers) tableExtensionsToCQL(extensions map[string]any) ([]string, error) {\n\texts := map[string]any{}\n\n\tif blob, ok := extensions[\"scylla_encryption_options\"]; ok {\n\t\tencOpts := &scyllaEncryptionOptions{}\n\t\tif err := encOpts.UnmarshalBinary(blob.([]byte)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar err error\n\t\texts[\"scylla_encryption_options\"], err = json.Marshal(encOpts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t}\n\n\tout := make([]string, 0, len(exts))\n\tfor key, ext := range exts {\n\t\tout = append(out, fmt.Sprintf(\"%s = %s\", key, h.fixQuote(h.escape(ext))))\n\t}\n\n\tsort.Strings(out)\n\treturn out, nil\n}\n\nfunc (h toCQLHelpers) tablePropertiesToCQL(cks []*ColumnMetadata, opts TableMetadataOptions,\n\textensions map[string]any) (string, error) {\n\tvar sb strings.Builder\n\n\tvar properties []string\n\n\tif len(cks) > 0 {\n\t\tvar inner []string\n\t\tfor _, col := range cks {\n\t\t\tinner = append(inner, fmt.Sprintf(\"%s %s\", col.Name, col.ClusteringOrder))\n\t\t}\n\t\tproperties = append(properties, fmt.Sprintf(\"CLUSTERING ORDER BY (%s)\", strings.Join(inner, \", \")))\n\t}\n\n\toptions, err := h.tableOptionsToCQL(opts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tproperties = append(properties, options...)\n\n\texts, err := h.tableExtensionsToCQL(extensions)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tproperties = append(properties, exts...)\n\n\tsb.WriteString(strings.Join(properties, \"\\n    AND \"))\n\treturn sb.String(), nil\n}\n\nfunc (h toCQLHelpers) tableColumnToCQL(tm *TableMetadata) string {\n\tvar sb strings.Builder\n\n\tvar columns []string\n\tfor _, cn := range tm.OrderedColumns {\n\t\tcm := tm.Columns[cn]\n\t\tcolumn := fmt.Sprintf(\"%s %s\", cn, cm.Type)\n\t\tif cm.Kind == ColumnStatic {\n\t\t\tcolumn += \" static\"\n\t\t}\n\t\tcolumns = append(columns, column)\n\t}\n\tif len(tm.PartitionKey) == 1 && len(tm.ClusteringColumns) == 0 && len(columns) > 0 {\n\t\tcolumns[0] += \" PRIMARY KEY\"\n\t}\n\n\tsb.WriteString(strings.Join(columns, \",\\n    \"))\n\n\tif len(tm.PartitionKey) > 1 || len(tm.ClusteringColumns) > 0 {\n\t\tsb.WriteString(\",\\n    PRIMARY KEY (\")\n\t\tsb.WriteString(h.partitionKeyString(tm.PartitionKey, tm.ClusteringColumns))\n\t\tsb.WriteRune(')')\n\t}\n\n\treturn sb.String()\n}\n\nfunc (h toCQLHelpers) partitionKeyString(pks, cks []*ColumnMetadata) string {\n\tvar sb strings.Builder\n\n\tif len(pks) > 1 {\n\t\tsb.WriteRune('(')\n\t\tfor i, pk := range pks {\n\t\t\tif i != 0 {\n\t\t\t\tsb.WriteString(\", \")\n\t\t\t}\n\t\t\tsb.WriteString(pk.Name)\n\t\t}\n\t\tsb.WriteRune(')')\n\t} else {\n\t\tsb.WriteString(pks[0].Name)\n\t}\n\n\tif len(cks) > 0 {\n\t\tsb.WriteString(\", \")\n\t\tfor i, ck := range cks {\n\t\t\tif i != 0 {\n\t\t\t\tsb.WriteString(\", \")\n\t\t\t}\n\t\t\tsb.WriteString(ck.Name)\n\t\t}\n\t}\n\n\treturn sb.String()\n}\n\ntype scyllaEncryptionOptions struct {\n\tCipherAlgorithm   string `json:\"cipher_algorithm\"`\n\tKeyProvider       string `json:\"key_provider\"`\n\tSecretKeyFile     string `json:\"secret_key_file\"`\n\tSecretKeyStrength int    `json:\"secret_key_strength\"`\n}\n\n// UnmarshalBinary deserializes blob into scyllaEncryptionOptions.\n// Format:\n//   - 4 bytes - size of KV map\n//     Size times:\n//   - 4 bytes - length of key\n//   - len_of_key bytes - key\n//   - 4 bytes - length of value\n//   - len_of_value bytes - value\nfunc (enc *scyllaEncryptionOptions) UnmarshalBinary(data []byte) error {\n\tsize := binary.LittleEndian.Uint32(data[0:4])\n\n\tm := make(map[string]string, size)\n\n\toff := uint32(4)\n\tfor i := uint32(0); i < size; i++ {\n\t\tkeyLen := binary.LittleEndian.Uint32(data[off : off+4])\n\t\toff += 4\n\n\t\tkey := string(data[off : off+keyLen])\n\t\toff += keyLen\n\n\t\tvalueLen := binary.LittleEndian.Uint32(data[off : off+4])\n\t\toff += 4\n\n\t\tvalue := string(data[off : off+valueLen])\n\t\toff += valueLen\n\n\t\tm[key] = value\n\t}\n\n\tenc.CipherAlgorithm = m[\"cipher_algorithm\"]\n\tenc.KeyProvider = m[\"key_provider\"]\n\tenc.SecretKeyFile = m[\"secret_key_file\"]\n\tif secretKeyStrength, ok := m[\"secret_key_strength\"]; ok {\n\t\tsks, err := strconv.Atoi(secretKeyStrength)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tenc.SecretKeyStrength = sks\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "recreate_test.go",
    "content": "//go:build integration\n// +build integration\n\n// Copyright (C) 2017 ScyllaDB\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\n\tfrm \"github.com/gocql/gocql/internal/frame\"\n)\n\nvar updateGolden = flag.Bool(\"update-golden\", false, \"update golden files\")\n\nfunc TestRecreateSchema(t *testing.T) {\n\tt.Parallel()\n\n\tfailsOnOldScylla := false\n\tif *flagDistribution == \"scylla\" && flagCassVersion.Before(2024, 0, 0) {\n\t\tfailsOnOldScylla = true\n\t}\n\n\tsession := createSessionFromClusterTabletsDisabled(createCluster(), t)\n\tdefer session.Close()\n\n\tgetStmtFromCluster := isDescribeKeyspaceSupported(t, session)\n\ttabletsAutoEnabled := isTabletsSupported() && isTabletsAutoEnabled()\n\n\ttcs := []struct {\n\t\tName            string\n\t\tFixedKeyspace   string // original keyspace name used in .cql files\n\t\tFailWithTablets bool\n\t\tInput           string\n\t\tGolden          string\n\t}{\n\t\t{\n\t\t\tName:          \"Keyspace\",\n\t\t\tFixedKeyspace: \"gocqlx_keyspace\",\n\t\t\tInput:         \"testdata/recreate/keyspace.cql\",\n\t\t\tGolden:        \"testdata/recreate/keyspace_golden.cql\",\n\t\t},\n\t\t{\n\t\t\tName:          \"Table\",\n\t\t\tFixedKeyspace: \"gocqlx_table\",\n\t\t\tInput:         \"testdata/recreate/table.cql\",\n\t\t\tGolden:        \"testdata/recreate/table_golden.cql\",\n\t\t},\n\t\t{\n\t\t\tName:            \"Materialized Views\",\n\t\t\tFixedKeyspace:   \"gocqlx_mv\",\n\t\t\tFailWithTablets: failsOnOldScylla,\n\t\t\tInput:           \"testdata/recreate/materialized_views.cql\",\n\t\t\tGolden:          \"testdata/recreate/materialized_views_golden.cql\",\n\t\t},\n\t\t{\n\t\t\tName:            \"Index\",\n\t\t\tFixedKeyspace:   \"gocqlx_idx\",\n\t\t\tFailWithTablets: failsOnOldScylla,\n\t\t\tInput:           \"testdata/recreate/index.cql\",\n\t\t\tGolden:          \"testdata/recreate/index_golden.cql\",\n\t\t},\n\t\t{\n\t\t\tName:            \"Secondary Index\",\n\t\t\tFixedKeyspace:   \"gocqlx_sec_idx\",\n\t\t\tFailWithTablets: failsOnOldScylla,\n\t\t\tInput:           \"testdata/recreate/secondary_index.cql\",\n\t\t\tGolden:          \"testdata/recreate/secondary_index_golden.cql\",\n\t\t},\n\t\t{\n\t\t\tName:          \"UDT\",\n\t\t\tFixedKeyspace: \"gocqlx_udt\",\n\t\t\tInput:         \"testdata/recreate/udt.cql\",\n\t\t\tGolden:        \"testdata/recreate/udt_golden.cql\",\n\t\t},\n\t\t{\n\t\t\tName:          \"Aggregates\",\n\t\t\tFixedKeyspace: \"gocqlx_aggregates\",\n\t\t\tInput:         \"testdata/recreate/aggregates.cql\",\n\t\t\tGolden:        \"testdata/recreate/aggregates_golden.cql\",\n\t\t},\n\t}\n\n\tfor i := range tcs {\n\t\ttest := tcs[i]\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tif test.Name == \"UDT\" && *flagDistribution == \"scylla\" && flagCassVersion.Major == 2024 && flagCassVersion.Minor == 1 {\n\t\t\t\tt.Skip(\"Doesn't work properly on Scylla 2024.1 due to https://github.com/scylladb/scylladb/issues/26761\")\n\t\t\t}\n\n\t\t\t// Generate a unique keyspace name to avoid collisions under parallel execution.\n\t\t\t// Replace the fixed keyspace name in CQL input/golden files with the unique name.\n\t\t\tks := testKeyspaceName(t)\n\t\t\tcleanup(t, session, ks)\n\n\t\t\tin, err := os.ReadFile(test.Input)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\t// Substitute the fixed keyspace name in the CQL input with the unique name.\n\t\t\tinStr := strings.ReplaceAll(string(in), test.FixedKeyspace, ks)\n\n\t\t\tqueries := trimQueries(strings.Split(inStr, \";\"))\n\t\t\tfor _, q := range queries {\n\t\t\t\tqr := session.Query(q, nil)\n\t\t\t\terr = qr.Exec()\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tqr.Release()\n\t\t\t}\n\n\t\t\terr = session.AwaitSchemaAgreement(context.Background())\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"failed to await for schema agreement\", err)\n\t\t\t}\n\t\t\terr = session.metadataDescriber.refreshKeyspaceSchema(ks)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"failed to read schema for keyspace\", err)\n\t\t\t}\n\n\t\t\tif tabletsAutoEnabled && test.FailWithTablets {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Errorf(\"did not get expected error or tablets\")\n\t\t\t\t} else if strings.Contains(err.Error(), \"not supported\") && strings.Contains(err.Error(), \"tablets\") {\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tt.Fatal(\"query failed with unexpected error\", err)\n\t\t\t\t}\n\t\t\t} else if err != nil {\n\t\t\t\tt.Fatal(\"invalid input query\", err)\n\t\t\t}\n\n\t\t\tkm, err := session.KeyspaceMetadata(ks)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"dump schema\", err)\n\t\t\t}\n\t\t\tdump, err := km.ToCQL()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"recreate schema\", err)\n\t\t\t}\n\n\t\t\tdump = trimSchema(dump)\n\n\t\t\t// Normalize the dump back to the fixed keyspace name for comparison with golden files.\n\t\t\tdump = strings.ReplaceAll(dump, ks, test.FixedKeyspace)\n\n\t\t\tvar golden []byte\n\t\t\tif getStmtFromCluster {\n\t\t\t\tgolden, err = getCreateStatements(session, ks)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tgolden = []byte(trimSchema(string(golden)))\n\t\t\t\t// Normalize from the cluster's unique keyspace name back to fixed name.\n\t\t\t\tgolden = []byte(strings.ReplaceAll(string(golden), ks, test.FixedKeyspace))\n\t\t\t} else {\n\t\t\t\tif *updateGolden {\n\t\t\t\t\tif err := os.WriteFile(test.Golden, []byte(dump), 0644); err != nil {\n\t\t\t\t\t\tt.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tgolden, err = os.ReadFile(test.Golden)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tgolden = []byte(trimSchema(string(golden)))\n\t\t\t}\n\n\t\t\tgoldenQueries := trimQueries(sortQueries(strings.Split(string(golden), \";\")))\n\t\t\tdumpQueries := trimQueries(sortQueries(strings.Split(dump, \";\")))\n\n\t\t\tif len(goldenQueries) != len(dumpQueries) {\n\t\t\t\tt.Fatalf(\"Expected len(dumpQueries) to be %d, got %d\", len(goldenQueries), len(dumpQueries))\n\t\t\t}\n\t\t\t// Compare with golden\n\t\t\tfor i, dq := range dumpQueries {\n\t\t\t\tgq := goldenQueries[i]\n\n\t\t\t\tif diff := cmp.Diff(gq, dq); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"dumpQueries[%d] diff\\n%s\", i, diff)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Exec dumped queries to check if they are CQL-correct.\n\t\t\t// Substitute the fixed keyspace name back to the unique name for execution.\n\t\t\tcleanup(t, session, ks)\n\t\t\tsession.metadataDescriber.invalidateKeyspaceSchema(ks)\n\n\t\t\tfor _, q := range trimQueries(strings.Split(strings.ReplaceAll(dump, test.FixedKeyspace, ks), \";\")) {\n\t\t\t\tqr := session.Query(q, nil)\n\t\t\t\tif err := qr.Exec(); err != nil {\n\t\t\t\t\tt.Fatal(\"invalid dump query\", q, err)\n\t\t\t\t}\n\t\t\t\tqr.Release()\n\t\t\t}\n\n\t\t\t// Check if new dump is the same as previous\n\t\t\terr = session.AwaitSchemaAgreement(context.Background())\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"failed to await for schema agreement\", err)\n\t\t\t}\n\t\t\terr = session.metadataDescriber.refreshKeyspaceSchema(ks)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"failed to read schema for keyspace\", err)\n\t\t\t}\n\t\t\tkm, err = session.KeyspaceMetadata(ks)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"dump schema\", err)\n\t\t\t}\n\t\t\tsecondDump, err := km.ToCQL()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"recreate schema\", err)\n\t\t\t}\n\n\t\t\tsecondDump = trimSchema(secondDump)\n\t\t\t// Normalize the second dump back to fixed keyspace name for comparison.\n\t\t\tsecondDump = strings.ReplaceAll(secondDump, ks, test.FixedKeyspace)\n\n\t\t\tsecondDumpQueries := trimQueries(sortQueries(strings.Split(secondDump, \";\")))\n\n\t\t\tif !cmp.Equal(secondDumpQueries, dumpQueries) {\n\t\t\t\tt.Errorf(\"first dump and second one differs: %s\", cmp.Diff(secondDumpQueries, dumpQueries))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc isDescribeKeyspaceSupported(t *testing.T, s *Session) bool {\n\tt.Helper()\n\n\terr := s.control.query(fmt.Sprintf(`DESCRIBE KEYSPACE system WITH INTERNALS`)).Close()\n\tif err != nil {\n\t\tif errFrame, ok := err.(frm.ErrorFrame); ok && errFrame.Code == ErrCodeSyntax {\n\t\t\t// DESCRIBE KEYSPACE is not supported on older versions of Cassandra and Scylla\n\t\t\t// For such case schema statement is going to be recreated on the client side\n\t\t\treturn false\n\t\t}\n\t\tt.Fatalf(\"error querying keyspace schema: %v\", err)\n\t}\n\treturn true\n}\n\nfunc TestScyllaEncryptionOptionsUnmarshaller(t *testing.T) {\n\tt.Parallel()\n\n\tconst (\n\t\tinput  = \"testdata/recreate/scylla_encryption_options.bin\"\n\t\tgolden = \"testdata/recreate/scylla_encryption_options_golden.json\"\n\t)\n\n\tinputBuf, err := os.ReadFile(input)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgoldenBuf, err := os.ReadFile(golden)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgoldenOpts := &scyllaEncryptionOptions{}\n\tif err := json.Unmarshal(goldenBuf, goldenOpts); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\topts := &scyllaEncryptionOptions{}\n\tif err := opts.UnmarshalBinary(inputBuf); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif !cmp.Equal(goldenOpts, opts) {\n\t\tt.Error(cmp.Diff(goldenOpts, opts))\n\t}\n\n}\n\nfunc cleanup(t *testing.T, session *Session, keyspace string) {\n\tqr := session.Query(`DROP KEYSPACE IF EXISTS ` + keyspace)\n\tif err := qr.Exec(); err != nil {\n\t\tt.Fatalf(\"unable to drop keyspace: %v\", err)\n\t}\n\tqr.Release()\n}\n\nfunc sortQueries(in []string) []string {\n\tq := trimQueries(in)\n\tsort.Strings(q)\n\treturn q\n}\n\nfunc trimQueries(in []string) []string {\n\tqueries := in[:0]\n\tfor _, q := range in {\n\t\tq = strings.TrimSpace(q)\n\t\tif q == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif len(q) != 0 {\n\t\t\tqueries = append(queries, q)\n\t\t}\n\t}\n\treturn queries\n}\n\nvar schemaVersion = regexp.MustCompile(` WITH ID = [0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}[ \\t\\n]+AND`)\n\nfunc trimSchema(s string) string {\n\t// Remove temporary items from the scheme, in particular schema version:\n\t// ) WITH ID = cf0364d0-3b85-11ef-b79d-80a2ee1928c0\n\treturn strings.ReplaceAll(schemaVersion.ReplaceAllString(s, \" WITH\"), \"\\n\\n\", \"\\n\")\n}\n"
  },
  {
    "path": "renovate.json",
    "content": "{\n  \"$schema\": \"https://docs.renovatebot.com/renovate-schema.json\",\n  \"extends\": [\n    \"config:recommended\"\n  ],\n  \"prConcurrentLimit\": 2\n}\n"
  },
  {
    "path": "ring_describer.go",
    "content": "package gocql\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n)\n\n// Polls system.peers at a specific interval to find new hosts\ntype ringDescriber struct {\n\tcontrol controlConnection\n\tlogger  StdLogger\n\tcfg     *ClusterConfig\n\t// hosts are the set of all hosts in the cassandra ring that we know of.\n\t// key of map is host_id.\n\thosts map[string]*HostInfo\n\t// hostIPToUUID maps host native address to host_id.\n\thostIPToUUID    map[string]string\n\tprevPartitioner string\n\tprevHosts       []*HostInfo\n\tmu              sync.RWMutex\n}\n\nfunc (r *ringDescriber) setControlConn(c controlConnection) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tr.control = c\n}\n\n// Ask the control node for the local host info\nfunc (r *ringDescriber) getLocalHostInfo(conn ConnInterface) (*HostInfo, error) {\n\titer := conn.querySystem(context.TODO(), qrySystemLocal)\n\n\tif iter == nil {\n\t\treturn nil, errNoControl\n\t}\n\n\thost, err := hostInfoFromIter(iter, r.cfg.Port)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not retrieve local host info: %w\", err)\n\t}\n\treturn host, nil\n}\n\n// Ask the control node for host info on all it's known peers\nfunc (r *ringDescriber) getClusterPeerInfo(localHost *HostInfo, c ConnInterface) ([]*HostInfo, error) {\n\tvar iter *Iter\n\tif c.getIsSchemaV2() {\n\t\titer = c.querySystem(context.TODO(), qrySystemPeersV2)\n\t} else {\n\t\titer = c.querySystem(context.TODO(), qrySystemPeers)\n\t}\n\n\tif iter == nil {\n\t\treturn nil, errNoControl\n\t}\n\tdefer iter.Close()\n\n\trows, err := iter.SliceMap()\n\tif err != nil {\n\t\t// TODO(zariel): make typed error\n\t\treturn nil, fmt.Errorf(\"unable to fetch peer host info: %s\", err)\n\t}\n\n\treturn getPeersFromQuerySystemPeers(rows, r.cfg.Port, r.logger)\n}\n\nfunc getPeersFromQuerySystemPeers(querySystemPeerRows []map[string]any, defaultPort int, logger StdLogger) ([]*HostInfo, error) {\n\tvar peers []*HostInfo\n\n\tfor _, row := range querySystemPeerRows {\n\t\t// extract all available info about the peer\n\t\thost, err := hostInfoFromMap(row, defaultPort)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if !isValidPeer(host) {\n\t\t\t// If it's not a valid peer\n\t\t\tlogger.Printf(\"Found invalid peer '%s' \"+\n\t\t\t\t\"Likely due to a gossip or snitch issue, this host will be ignored\", host)\n\t\t\tcontinue\n\t\t} else if isZeroToken(host) {\n\t\t\tcontinue\n\t\t}\n\n\t\tpeers = append(peers, host)\n\t}\n\n\treturn peers, nil\n}\n\n// Return true if the host is a valid peer\nfunc isValidPeer(host *HostInfo) bool {\n\treturn !(len(host.RPCAddress()) == 0 ||\n\t\thost.hostId.IsEmpty() ||\n\t\thost.dataCenter == \"\" ||\n\t\thost.rack == \"\")\n}\n\nfunc isZeroToken(host *HostInfo) bool {\n\treturn len(host.tokens) == 0\n}\n\n// GetHostsFromSystem returns a list of hosts found via queries to system.local and system.peers\nfunc (r *ringDescriber) GetHostsFromSystem() ([]*HostInfo, string, error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.control == nil {\n\t\treturn r.prevHosts, r.prevPartitioner, errNoControl\n\t}\n\n\tch := r.control.getConn()\n\tlocalHost, err := r.getLocalHostInfo(ch.conn)\n\tif err != nil {\n\t\treturn r.prevHosts, r.prevPartitioner, err\n\t}\n\n\tpeerHosts, err := r.getClusterPeerInfo(localHost, ch.conn)\n\tif err != nil {\n\t\treturn r.prevHosts, r.prevPartitioner, err\n\t}\n\n\tvar hosts []*HostInfo\n\tif !isZeroToken(localHost) {\n\t\thosts = []*HostInfo{localHost}\n\t}\n\thosts = append(hosts, peerHosts...)\n\n\tvar partitioner string\n\tif len(hosts) > 0 {\n\t\tpartitioner = hosts[0].Partitioner()\n\t}\n\n\tr.prevHosts = hosts\n\tr.prevPartitioner = partitioner\n\n\treturn hosts, partitioner, nil\n}\n\nfunc (r *ringDescriber) getHostByIP(ip string) (*HostInfo, bool) {\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\thi, ok := r.hostIPToUUID[ip]\n\treturn r.hosts[hi], ok\n}\n\nfunc (r *ringDescriber) getHost(hostID string) *HostInfo {\n\tr.mu.RLock()\n\thost := r.hosts[hostID]\n\tr.mu.RUnlock()\n\treturn host\n}\n\nfunc (r *ringDescriber) getHostsList() []*HostInfo {\n\tr.mu.RLock()\n\thosts := make([]*HostInfo, 0, len(r.hosts))\n\tfor _, host := range r.hosts {\n\t\thosts = append(hosts, host)\n\t}\n\tr.mu.RUnlock()\n\treturn hosts\n}\n\nfunc (r *ringDescriber) getHostsMap() map[string]*HostInfo {\n\tr.mu.RLock()\n\thosts := make(map[string]*HostInfo, len(r.hosts))\n\tfor k, v := range r.hosts {\n\t\thosts[k] = v\n\t}\n\tr.mu.RUnlock()\n\treturn hosts\n}\n\nfunc (r *ringDescriber) addOrUpdate(host *HostInfo) *HostInfo {\n\tif existingHost, ok := r.addHostIfMissing(host); ok {\n\t\texistingHost.update(host)\n\t\thost = existingHost\n\t}\n\treturn host\n}\n\nfunc (r *ringDescriber) addHostIfMissing(host *HostInfo) (*HostInfo, bool) {\n\tif !validIpAddr(host.ConnectAddress()) {\n\t\tpanic(fmt.Sprintf(\"invalid host: %v\", host))\n\t}\n\thostID := host.HostID()\n\n\tr.mu.Lock()\n\tif r.hosts == nil {\n\t\tr.hosts = make(map[string]*HostInfo)\n\t}\n\tif r.hostIPToUUID == nil {\n\t\tr.hostIPToUUID = make(map[string]string)\n\t}\n\n\texisting, ok := r.hosts[hostID]\n\tif !ok {\n\t\tr.hosts[hostID] = host\n\t\tr.hostIPToUUID[host.nodeToNodeAddress().String()] = hostID\n\t\texisting = host\n\t}\n\tr.mu.Unlock()\n\treturn existing, ok\n}\n\nfunc (r *ringDescriber) removeHost(hostID string) bool {\n\tr.mu.Lock()\n\tif r.hosts == nil {\n\t\tr.hosts = make(map[string]*HostInfo)\n\t}\n\tif r.hostIPToUUID == nil {\n\t\tr.hostIPToUUID = make(map[string]string)\n\t}\n\n\th, ok := r.hosts[hostID]\n\tif ok {\n\t\tdelete(r.hostIPToUUID, h.nodeToNodeAddress().String())\n\t}\n\tdelete(r.hosts, hostID)\n\tr.mu.Unlock()\n\treturn ok\n}\n"
  },
  {
    "path": "ring_describer_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n\n\t\"github.com/gocql/gocql/internal/tests/mock\"\n)\n\nfunc TestGetClusterPeerInfoZeroToken(t *testing.T) {\n\tt.Parallel()\n\n\tschema_version1 := ParseUUIDMust(\"af810386-a694-11ef-81fa-3aea73156247\")\n\n\tpeersRows := []map[string]any{\n\t\t{\n\t\t\t\"data_center\":     \"datacenter1\",\n\t\t\t\"host_id\":         ParseUUIDMust(\"b2035fd9-e0ca-4857-8c45-e63c00fb7c43\"),\n\t\t\t\"peer\":            \"127.0.0.3\",\n\t\t\t\"preferred_ip\":    \"127.0.0.3\",\n\t\t\t\"rack\":            \"rack1\",\n\t\t\t\"release_version\": \"3.0.8\",\n\t\t\t\"rpc_address\":     \"127.0.0.3\",\n\t\t\t\"schema_version\":  schema_version1,\n\t\t\t\"tokens\":          []string{\"-1296227678594315580994457470329811265\"},\n\t\t},\n\t\t{\n\t\t\t\"data_center\":     \"datacenter1\",\n\t\t\t\"host_id\":         ParseUUIDMust(\"4b21ee4c-acea-4267-8e20-aaed5361a0dd\"),\n\t\t\t\"peer\":            \"127.0.0.2\",\n\t\t\t\"preferred_ip\":    \"127.0.0.2\",\n\t\t\t\"rack\":            \"rack1\",\n\t\t\t\"release_version\": \"3.0.8\",\n\t\t\t\"rpc_address\":     \"127.0.0.2\",\n\t\t\t\"schema_version\":  schema_version1,\n\t\t\t\"tokens\":          []string{\"-1129762924682054333\"},\n\t\t},\n\t\t{\n\t\t\t\"data_center\":     \"datacenter2\",\n\t\t\t\"host_id\":         ParseUUIDMust(\"dfef4a22-b8d8-47e9-aee5-8c19d4b7a9e3\"),\n\t\t\t\"peer\":            \"127.0.0.5\",\n\t\t\t\"preferred_ip\":    \"127.0.0.5\",\n\t\t\t\"rack\":            \"rack1\",\n\t\t\t\"release_version\": \"3.0.8\",\n\t\t\t\"rpc_address\":     \"127.0.0.5\",\n\t\t\t\"schema_version\":  schema_version1,\n\t\t\t\"tokens\":          []string{},\n\t\t},\n\t}\n\n\tvar logger StdLogger\n\tt.Run(\"OmitOneZeroTokenNode\", func(t *testing.T) {\n\t\tpeers, err := getPeersFromQuerySystemPeers(\n\t\t\tpeersRows,\n\t\t\t9042,\n\t\t\tlogger,\n\t\t)\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unable to get peers: %v\", err)\n\t\t}\n\t\ttests.AssertEqual(t, \"peers length\", 2, len(peers))\n\t})\n\n\tt.Run(\"NoZeroTokenNodes\", func(t *testing.T) {\n\t\tpeersRows[2][\"tokens\"] = []string{\"-1129762924682054333\"}\n\t\tpeers, err := getPeersFromQuerySystemPeers(\n\t\t\tpeersRows,\n\t\t\t9042,\n\t\t\tlogger,\n\t\t)\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unable to get peers: %v\", err)\n\t\t}\n\t\ttests.AssertEqual(t, \"peers length\", 3, len(peers))\n\t})\n}\n\ntype mockConnection struct{}\n\nfunc (*mockConnection) Close() {}\nfunc (*mockConnection) exec(ctx context.Context, req frameBuilder, tracer Tracer, requestTimeout time.Duration) (*framer, error) {\n\treturn nil, nil\n}\nfunc (*mockConnection) awaitSchemaAgreement(ctx context.Context) error     { return nil }\nfunc (*mockConnection) executeQuery(ctx context.Context, qry *Query) *Iter { return nil }\n\nvar systemLocalResultMetadata = resultMetadata{\n\tflags:          0,\n\tpagingState:    []byte{},\n\tactualColCount: 18,\n\tcolumns: []ColumnInfo{{\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"key\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"bootstrapped\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"broadcast_address\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeInet},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"cluster_name\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"cql_version\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"data_center\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"gossip_generation\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeInt},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"host_id\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeUUID},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"listen_address\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeInet},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"native_protocol_version\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"partitioner\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"rack\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"release_version\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"rpc_address\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeInet},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"schema_version\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeUUID},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"supported_features\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"tokens\",\n\t\tTypeInfo: CollectionType{\n\t\t\tNativeType: NativeType{proto: protoVersion4, typ: TypeSet},\n\t\t\tElem:       NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t\t},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"truncated_at\",\n\t\tTypeInfo: CollectionType{\n\t\t\tNativeType: NativeType{proto: protoVersion4, typ: TypeMap},\n\n\t\t\tKey:  NativeType{proto: protoVersion4, typ: TypeUUID},\n\t\t\tElem: NativeType{proto: protoVersion4, typ: TypeBlob},\n\t\t},\n\t}},\n}\n\nvar systemPeersResultMetadata = resultMetadata{\n\tflags:          0,\n\tpagingState:    []byte{},\n\tactualColCount: 10,\n\tcolumns: []ColumnInfo{{\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"peer\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeInet},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"data_center\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"host_id\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeUUID},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"preferred_ip\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeInet},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"rack\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"release_version\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"rpc_address\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeInet},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"schema_version\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeUUID},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"supported_features\",\n\t\tTypeInfo: NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t}, {\n\t\tKeyspace: \"system\",\n\t\tTable:    \"local\",\n\t\tName:     \"tokens\",\n\t\tTypeInfo: CollectionType{\n\t\t\tNativeType: NativeType{proto: protoVersion4, typ: TypeSet},\n\t\t\tElem:       NativeType{proto: protoVersion4, typ: TypeVarchar},\n\t\t},\n\t}},\n}\n\nfunc (*mockConnection) querySystem(ctx context.Context, query string, values ...any) *Iter {\n\tlocalData := []any{\"local\", \"COMPLETED\", net.IPv4(192, 168, 100, 12), \"\", \"3.3.1\", \"datacenter1\", 1733834239, ParseUUIDMust(\"045859a7-6b9f-4efd-a5e7-acd64a295e13\"), net.IPv4(192, 168, 100, 12), \"4\", \"org.apache.cassandra.dht.Murmur3Partitioner\", \"rack1\", \"3.0.8\", net.IPv4(192, 168, 100, 12), ParseUUIDMust(\"daf4df2c-b708-11ef-5c25-3004361afd71\"), \"\", []string{}, map[UUID]byte{}}\n\tpeerData1 := []any{net.IPv4(192, 168, 100, 13), \"datacenter1\", ParseUUIDMust(\"b953309f-6e68-41f2-baf5-0e60da317a9c\"), net.IP{}, \"rack1\", \"3.0.8\", net.IPv4(192, 168, 100, 13), ParseUUIDMust(\"b6ed5bde-b318-11ef-8f58-aeba19e31273\"), \"\", []string{\"-1032311531684407545\", \"-1112089412567859825\"}}\n\tpeerData2 := []any{net.IPv4(192, 168, 100, 14), \"datacenter1\", ParseUUIDMust(\"8269e111-ea38-44bd-a73f-9d3d12cfaf78\"), net.IP{}, \"rack1\", \"3.0.8\", net.IPv4(192, 168, 100, 14), ParseUUIDMust(\"b6ed5bde-b318-11ef-8f58-aeba19e31273\"), \"\", []string{}}\n\n\tif query == \"SELECT * FROM system.local WHERE key='local'\" {\n\t\treturn &Iter{\n\t\t\tmeta:    systemLocalResultMetadata,\n\t\t\tframer:  &mock.MockFramer{Data: marshalMetadataMust(systemLocalResultMetadata, localData)},\n\t\t\tnumRows: 1,\n\t\t\tnext:    nil,\n\t\t}\n\t} else if query == \"SELECT * FROM system.peers\" {\n\t\treturn &Iter{\n\t\t\tmeta:    systemPeersResultMetadata,\n\t\t\tframer:  &mock.MockFramer{Data: append(marshalMetadataMust(systemPeersResultMetadata, peerData1), marshalMetadataMust(systemPeersResultMetadata, peerData2)...)},\n\t\t\tnumRows: 2,\n\t\t\tnext:    nil,\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (*mockConnection) getIsSchemaV2() bool { return false }\nfunc (*mockConnection) setSchemaV2(s bool)  {}\nfunc (*mockConnection) getScyllaSupported() ScyllaConnectionFeatures {\n\treturn ScyllaConnectionFeatures{}\n}\n\ntype mockControlConn struct{}\n\nfunc (m *mockControlConn) querySystem(statement string, values ...any) (iter *Iter) {\n\treturn nil\n}\n\nfunc (m *mockControlConn) reconnect() error {\n\treturn nil\n}\n\nfunc (m *mockControlConn) getConn() *connHost {\n\treturn &connHost{\n\t\tconn: &mockConnection{},\n\t\thost: &HostInfo{},\n\t}\n}\n\nfunc (m *mockControlConn) awaitSchemaAgreement() error                        { return nil }\nfunc (m *mockControlConn) query(statement string, values ...any) (iter *Iter) { return nil }\nfunc (m *mockControlConn) discoverProtocol(hosts []*HostInfo) (int, error)    { return 0, nil }\nfunc (m *mockControlConn) connect(hosts []*HostInfo) error                    { return nil }\nfunc (m *mockControlConn) close()                                             {}\nfunc (m *mockControlConn) getSession() *Session                               { return nil }\n\nfunc marshalMetadataMust(metadata resultMetadata, data []any) [][]byte {\n\tif len(metadata.columns) != len(data) {\n\t\tpanic(\"metadata length mismatch\")\n\t}\n\tres := make([][]byte, len(metadata.columns))\n\tfor id, col := range metadata.columns {\n\t\tvar err error\n\t\tvalue := data[id]\n\t\tres[id], err = Marshal(col.TypeInfo, value)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"unable to marshal column %d: %v\", id, err))\n\t\t}\n\t}\n\treturn res\n}\n\ntype trackingRingConnection struct {\n\titer     *Iter\n\tschemaV2 bool\n}\n\nfunc (*trackingRingConnection) Close() {}\nfunc (*trackingRingConnection) exec(context.Context, frameBuilder, Tracer, time.Duration) (*framer, error) {\n\treturn nil, nil\n}\nfunc (*trackingRingConnection) awaitSchemaAgreement(context.Context) error { return nil }\nfunc (*trackingRingConnection) executeQuery(context.Context, *Query) *Iter { return nil }\nfunc (c *trackingRingConnection) querySystem(context.Context, string, ...any) *Iter {\n\treturn c.iter\n}\nfunc (c *trackingRingConnection) getIsSchemaV2() bool { return c.schemaV2 }\nfunc (*trackingRingConnection) setSchemaV2(bool)      {}\nfunc (*trackingRingConnection) getScyllaSupported() ScyllaConnectionFeatures {\n\treturn ScyllaConnectionFeatures{}\n}\n\nfunc TestMockGetHostsFromSystem(t *testing.T) {\n\tt.Parallel()\n\n\tr := &ringDescriber{control: &mockControlConn{}, cfg: &ClusterConfig{}}\n\n\thosts, _, err := r.GetHostsFromSystem()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to get hosts: %v\", err)\n\t}\n\n\t// local host and one of the peers are zero token so only one peer should be returned with 2 tokens\n\ttests.AssertEqual(t, \"hosts length\", 1, len(hosts))\n\ttests.AssertEqual(t, \"host token length\", 2, len(hosts[0].tokens))\n}\n\nfunc TestRingDescriberGetClusterPeerInfoClosesIter(t *testing.T) {\n\tt.Parallel()\n\n\trow := []any{\n\t\tnet.IPv4(192, 168, 100, 13),\n\t\t\"datacenter1\",\n\t\tParseUUIDMust(\"b953309f-6e68-41f2-baf5-0e60da317a9c\"),\n\t\tnet.IP{},\n\t\t\"rack1\",\n\t\t\"3.0.8\",\n\t\tnet.IPv4(192, 168, 100, 13),\n\t\tParseUUIDMust(\"b6ed5bde-b318-11ef-8f58-aeba19e31273\"),\n\t\t\"\",\n\t\t[]string{\"-1032311531684407545\"},\n\t}\n\tframer := &trackingMockFramer{\n\t\tMockFramer: mock.MockFramer{Data: marshalMetadataMust(systemPeersResultMetadata, row)},\n\t}\n\tr := &ringDescriber{cfg: &ClusterConfig{}}\n\n\tpeers, err := r.getClusterPeerInfo(&HostInfo{}, &trackingRingConnection{\n\t\titer: &Iter{\n\t\t\tmeta:    systemPeersResultMetadata,\n\t\t\tframer:  framer,\n\t\t\tnumRows: 1,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif len(peers) != 1 {\n\t\tt.Fatalf(\"expected 1 peer, got %d\", len(peers))\n\t}\n\tif !framer.released {\n\t\tt.Fatal(\"expected iterator framer to be released\")\n\t}\n}\n\nfunc TestRing_AddHostIfMissing_Missing(t *testing.T) {\n\tt.Parallel()\n\n\tring := &ringDescriber{}\n\n\thost := &HostInfo{hostId: MustRandomUUID(), connectAddress: net.IPv4(1, 1, 1, 1)}\n\th1, ok := ring.addHostIfMissing(host)\n\tif ok {\n\t\tt.Fatal(\"host was reported as already existing\")\n\t} else if !h1.Equal(host) {\n\t\tt.Fatalf(\"hosts not equal that are returned %v != %v\", h1, host)\n\t} else if h1 != host {\n\t\tt.Fatalf(\"returned host same pointer: %p != %p\", h1, host)\n\t}\n}\n\nfunc TestRing_AddHostIfMissing_Existing(t *testing.T) {\n\tt.Parallel()\n\n\tring := &ringDescriber{}\n\n\thost := &HostInfo{hostId: MustRandomUUID(), connectAddress: net.IPv4(1, 1, 1, 1)}\n\tring.addHostIfMissing(host)\n\n\th2 := &HostInfo{hostId: host.hostId, connectAddress: net.IPv4(2, 2, 2, 2)}\n\n\th1, ok := ring.addHostIfMissing(h2)\n\tif !ok {\n\t\tt.Fatal(\"host was not reported as already existing\")\n\t} else if !h1.Equal(host) {\n\t\tt.Fatalf(\"hosts not equal that are returned %v != %v\", h1, host)\n\t} else if h1 != host {\n\t\tt.Fatalf(\"returned host same pointer: %p != %p\", h1, host)\n\t}\n}\n"
  },
  {
    "path": "schema_queries_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n)\n\nfunc TestSchemaQueries(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\n\tfallback := RoundRobinHostPolicy()\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(fallback)\n\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\tkeyspaceMetadata, err := session.metadataDescriber.GetKeyspace(\"gocql_test\")\n\tif err != nil {\n\t\tt.Fatal(\"unable to get keyspace metadata for keyspace: \", err)\n\t}\n\ttests.AssertTrue(t, \"keyspace present in metadataDescriber\", keyspaceMetadata.Name == \"gocql_test\")\n}\n"
  },
  {
    "path": "scylla.go",
    "content": "package gocql\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/internal/debug\"\n)\n\n// ScyllaFeatures represents Scylla connection options as sent in SUPPORTED\n// frame.\n// FIXME: Should also follow `cqlProtocolExtension` interface.\ntype ScyllaConnectionFeatures struct {\n\tScyllaHostFeatures\n\t// Comes from SCYLLA_SHARD\n\tshard int\n}\n\nfunc (f ScyllaConnectionFeatures) Shard() int {\n\treturn f.shard\n}\n\ntype ScyllaHostFeatures struct {\n\t// Comes from SCYLLA_PARTITIONER\n\tpartitioner string\n\t// Comes from SCYLLA_SHARDING_ALGORITHM\n\tshardingAlgorithm string\n\t// Comes from SCYLLA_NR_SHARDS\n\tnrShards int\n\t// Comes from SCYLLA_SHARDING_IGNORE_MSB\n\tmsbIgnore uint64\n\t// Comes from SCYLLA_LWT_ADD_METADATA_MARK.LWT_OPTIMIZATION_META_BIT_MASK\n\tlwtFlagMask int\n\t// Comes from SCYLLA_RATE_LIMIT_ERROR.ERROR_CODE\n\trateLimitErrorCode int\n\t// Comes from SCYLLA_SHARD_AWARE_PORT\n\tshardAwarePort uint16\n\t// Comes from SCYLLA_SHARD_AWARE_PORT_SSL\n\tshardAwarePortTLS uint16\n\t// Comes from SCYLLA_USE_METADATA_ID\n\t// Signals that host supports proper prepared statement metadata invalidation read more at:\n\t// https://github.com/scylladb/scylladb/issues/20860\n\t// https://github.com/scylladb/scylladb/pull/23292\n\tisMetadataIDSupported bool\n}\n\nfunc (f ScyllaHostFeatures) IsPresent() bool {\n\treturn f.nrShards != 0\n}\n\nfunc (f ScyllaHostFeatures) Partitioner() string {\n\treturn f.partitioner\n}\n\nfunc (f ScyllaHostFeatures) ShardingAlgorithm() string {\n\treturn f.shardingAlgorithm\n}\n\nfunc (f ScyllaHostFeatures) ShardsCount() int {\n\treturn f.nrShards\n}\n\nfunc (f ScyllaHostFeatures) MSBIgnore() uint64 {\n\treturn f.msbIgnore\n}\n\nfunc (f ScyllaHostFeatures) LWTFlagMask() int {\n\treturn f.lwtFlagMask\n}\n\nfunc (f ScyllaHostFeatures) ShardAwarePort() uint16 {\n\treturn f.shardAwarePort\n}\n\nfunc (f ScyllaHostFeatures) ShardAwarePortTLS() uint16 {\n\treturn f.shardAwarePortTLS\n}\n\nfunc (f ScyllaHostFeatures) RateLimitErrorCode() int {\n\treturn f.rateLimitErrorCode\n}\n\nfunc (f ScyllaHostFeatures) IsMetadataIDSupported() bool {\n\treturn f.isMetadataIDSupported\n}\n\n// CQL Protocol extension interface for Scylla.\n// Each extension is identified by a name and defines a way to serialize itself\n// in STARTUP message payload.\ntype cqlProtocolExtension interface {\n\tname() string\n\tserialize() map[string]string\n}\n\nfunc findCQLProtoExtByName(exts []cqlProtocolExtension, name string) cqlProtocolExtension {\n\tfor i := range exts {\n\t\tif exts[i].name() == name {\n\t\t\treturn exts[i]\n\t\t}\n\t}\n\treturn nil\n}\n\n// Top-level keys used for serialization/deserialization of CQL protocol\n// extensions in SUPPORTED/STARTUP messages.\n// Each key identifies a single extension.\nconst (\n\tlwtAddMetadataMarkKey = \"SCYLLA_LWT_ADD_METADATA_MARK\"\n\trateLimitError        = \"SCYLLA_RATE_LIMIT_ERROR\"\n\ttabletsRoutingV1      = \"TABLETS_ROUTING_V1\"\n)\n\n// \"tabletsRoutingV1\" CQL Protocol Extension.\n// This extension, if enabled (properly negotiated), allows Scylla server\n// to send a tablet information in `custom_payload`.\n//\n// Implements cqlProtocolExtension interface.\ntype tabletsRoutingV1Ext struct {\n}\n\nvar _ cqlProtocolExtension = &tabletsRoutingV1Ext{}\n\n// Factory function to deserialize and create an `tabletsRoutingV1Ext` instance\n// from SUPPORTED message payload.\nfunc newTabletsRoutingV1Ext(supported map[string][]string) *tabletsRoutingV1Ext {\n\tif _, found := supported[tabletsRoutingV1]; found {\n\t\treturn &tabletsRoutingV1Ext{}\n\t}\n\treturn nil\n}\n\nfunc (ext *tabletsRoutingV1Ext) serialize() map[string]string {\n\treturn map[string]string{\n\t\ttabletsRoutingV1: \"\",\n\t}\n}\n\nfunc (ext *tabletsRoutingV1Ext) name() string {\n\treturn tabletsRoutingV1\n}\n\n// \"Rate limit\" CQL Protocol Extension.\n// This extension, if enabled (properly negotiated), allows Scylla server\n// to send a special kind of error.\n//\n// Implements cqlProtocolExtension interface.\ntype rateLimitExt struct {\n\trateLimitErrorCode int\n}\n\nvar _ cqlProtocolExtension = &rateLimitExt{}\n\n// Factory function to deserialize and create an `rateLimitExt` instance\n// from SUPPORTED message payload.\nfunc newRateLimitExt(supported map[string][]string, logger StdLogger) *rateLimitExt {\n\tconst rateLimitErrorCode = \"ERROR_CODE\"\n\n\tif v, found := supported[rateLimitError]; found {\n\t\tfor i := range v {\n\t\t\tsplitVal := strings.Split(v[i], \"=\")\n\t\t\tif splitVal[0] == rateLimitErrorCode {\n\t\t\t\tvar (\n\t\t\t\t\terr       error\n\t\t\t\t\terrorCode int\n\t\t\t\t)\n\t\t\t\tif errorCode, err = strconv.Atoi(splitVal[1]); err != nil {\n\t\t\t\t\tif debug.Enabled {\n\t\t\t\t\t\tlogger.Printf(\"scylla: failed to parse %s value %v: %s\", rateLimitErrorCode, splitVal[1], err)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn &rateLimitExt{\n\t\t\t\t\trateLimitErrorCode: errorCode,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ext *rateLimitExt) serialize() map[string]string {\n\treturn map[string]string{\n\t\trateLimitError: \"\",\n\t}\n}\n\nfunc (ext *rateLimitExt) name() string {\n\treturn rateLimitError\n}\n\n// \"LWT prepared statements metadata mark\" CQL Protocol Extension.\n// This extension, if enabled (properly negotiated), allows Scylla server\n// to set a special bit in prepared statements metadata, which would indicate\n// whether the statement at hand is LWT statement or not.\n//\n// This is further used to consistently choose primary replicas in a predefined\n// order for these queries, which can reduce contention over hot keys and thus\n// increase LWT performance.\n//\n// Implements cqlProtocolExtension interface.\ntype lwtAddMetadataMarkExt struct {\n\tlwtOptMetaBitMask int\n}\n\nvar _ cqlProtocolExtension = &lwtAddMetadataMarkExt{}\n\n// Factory function to deserialize and create an `lwtAddMetadataMarkExt` instance\n// from SUPPORTED message payload.\nfunc newLwtAddMetaMarkExt(supported map[string][]string, logger StdLogger) *lwtAddMetadataMarkExt {\n\tconst lwtOptMetaBitMaskKey = \"LWT_OPTIMIZATION_META_BIT_MASK\"\n\n\tif v, found := supported[lwtAddMetadataMarkKey]; found {\n\t\tfor i := range v {\n\t\t\tsplitVal := strings.Split(v[i], \"=\")\n\t\t\tif splitVal[0] == lwtOptMetaBitMaskKey {\n\t\t\t\tvar (\n\t\t\t\t\terr     error\n\t\t\t\t\tbitMask int\n\t\t\t\t)\n\t\t\t\tif bitMask, err = strconv.Atoi(splitVal[1]); err != nil {\n\t\t\t\t\tif debug.Enabled {\n\t\t\t\t\t\tlogger.Printf(\"scylla: failed to parse %s value %v: %s\", lwtOptMetaBitMaskKey, splitVal[1], err)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn &lwtAddMetadataMarkExt{\n\t\t\t\t\tlwtOptMetaBitMask: bitMask,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ext *lwtAddMetadataMarkExt) serialize() map[string]string {\n\treturn map[string]string{\n\t\tlwtAddMetadataMarkKey: fmt.Sprintf(\"LWT_OPTIMIZATION_META_BIT_MASK=%d\", ext.lwtOptMetaBitMask),\n\t}\n}\n\nfunc (ext *lwtAddMetadataMarkExt) name() string {\n\treturn lwtAddMetadataMarkKey\n}\n\nfunc parseSupported(supported map[string][]string, logger StdLogger) ScyllaConnectionFeatures {\n\tconst (\n\t\tscyllaShard             = \"SCYLLA_SHARD\"\n\t\tscyllaNrShards          = \"SCYLLA_NR_SHARDS\"\n\t\tscyllaPartitioner       = \"SCYLLA_PARTITIONER\"\n\t\tscyllaShardingAlgorithm = \"SCYLLA_SHARDING_ALGORITHM\"\n\t\tscyllaShardingIgnoreMSB = \"SCYLLA_SHARDING_IGNORE_MSB\"\n\t\tscyllaShardAwarePort    = \"SCYLLA_SHARD_AWARE_PORT\"\n\t\tscyllaShardAwarePortSSL = \"SCYLLA_SHARD_AWARE_PORT_SSL\"\n\t\tscyllaUseMetadataID     = \"SCYLLA_USE_METADATA_ID\"\n\t)\n\n\tvar (\n\t\tsi  ScyllaConnectionFeatures\n\t\terr error\n\t)\n\n\tif s, ok := supported[scyllaShard]; ok {\n\t\tif si.shard, err = strconv.Atoi(s[0]); err != nil {\n\t\t\tif debug.Enabled {\n\t\t\t\tlogger.Printf(\"scylla: failed to parse %s value %v: %s\", scyllaShard, s, err)\n\t\t\t}\n\t\t}\n\t}\n\tif s, ok := supported[scyllaNrShards]; ok {\n\t\tif si.nrShards, err = strconv.Atoi(s[0]); err != nil {\n\t\t\tif debug.Enabled {\n\t\t\t\tlogger.Printf(\"scylla: failed to parse %s value %v: %s\", scyllaNrShards, s, err)\n\t\t\t}\n\t\t}\n\t}\n\tif s, ok := supported[scyllaShardingIgnoreMSB]; ok {\n\t\tif si.msbIgnore, err = strconv.ParseUint(s[0], 10, 64); err != nil {\n\t\t\tif debug.Enabled {\n\t\t\t\tlogger.Printf(\"scylla: failed to parse %s value %v: %s\", scyllaShardingIgnoreMSB, s, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif s, ok := supported[scyllaPartitioner]; ok {\n\t\tsi.partitioner = s[0]\n\t}\n\tif s, ok := supported[scyllaShardingAlgorithm]; ok {\n\t\tsi.shardingAlgorithm = s[0]\n\t}\n\tif s, ok := supported[scyllaShardAwarePort]; ok {\n\t\tif shardAwarePort, err := strconv.ParseUint(s[0], 10, 16); err != nil {\n\t\t\tif debug.Enabled {\n\t\t\t\tlogger.Printf(\"scylla: failed to parse %s value %v: %s\", scyllaShardAwarePort, s, err)\n\t\t\t}\n\t\t} else {\n\t\t\tsi.shardAwarePort = uint16(shardAwarePort)\n\t\t}\n\t}\n\tif s, ok := supported[scyllaShardAwarePortSSL]; ok {\n\t\tif shardAwarePortTLS, err := strconv.ParseUint(s[0], 10, 16); err != nil {\n\t\t\tif debug.Enabled {\n\t\t\t\tlogger.Printf(\"scylla: failed to parse %s value %v: %s\", scyllaShardAwarePortSSL, s, err)\n\t\t\t}\n\t\t} else {\n\t\t\tsi.shardAwarePortTLS = uint16(shardAwarePortTLS)\n\t\t}\n\t}\n\n\tif lwtInfo := newLwtAddMetaMarkExt(supported, logger); lwtInfo != nil {\n\t\tsi.lwtFlagMask = lwtInfo.lwtOptMetaBitMask\n\t}\n\n\tif rateLimitInfo := newRateLimitExt(supported, logger); rateLimitInfo != nil {\n\t\tsi.rateLimitErrorCode = rateLimitInfo.rateLimitErrorCode\n\t}\n\n\tif _, ok := supported[scyllaUseMetadataID]; ok {\n\t\tsi.isMetadataIDSupported = true\n\t}\n\n\tif si.partitioner != \"org.apache.cassandra.dht.Murmur3Partitioner\" || si.shardingAlgorithm != \"biased-token-round-robin\" || si.nrShards == 0 || si.msbIgnore == 0 {\n\t\tif debug.Enabled {\n\t\t\tlogger.Printf(\"scylla: unsupported sharding configuration, partitioner=%s, algorithm=%s, no_shards=%d, msb_ignore=%d\",\n\t\t\t\tsi.partitioner, si.shardingAlgorithm, si.nrShards, si.msbIgnore)\n\t\t}\n\t\treturn ScyllaConnectionFeatures{}\n\t}\n\n\treturn si\n}\n\nfunc parseCQLProtocolExtensions(supported map[string][]string, logger StdLogger) []cqlProtocolExtension {\n\texts := []cqlProtocolExtension{}\n\n\tlwtExt := newLwtAddMetaMarkExt(supported, logger)\n\tif lwtExt != nil {\n\t\texts = append(exts, lwtExt)\n\t}\n\n\trateLimitExt := newRateLimitExt(supported, logger)\n\tif rateLimitExt != nil {\n\t\texts = append(exts, rateLimitExt)\n\t}\n\n\ttabletsExt := newTabletsRoutingV1Ext(supported)\n\tif tabletsExt != nil {\n\t\texts = append(exts, tabletsExt)\n\t}\n\n\treturn exts\n}\n\n// isScyllaConn checks if conn is suitable for scyllaConnPicker.\nfunc (c *Conn) isScyllaConn() bool {\n\treturn c.getScyllaSupported().nrShards != 0\n}\n\n// scyllaConnPicker is a specialised ConnPicker that selects connections based\n// on token trying to get connection to a shard containing the given token.\n// A list of excess connections is maintained to allow for lazy closing of\n// connections to already opened shards. Keeping excess connections open helps\n// reaching equilibrium faster since the likelihood of hitting the same shard\n// decreases with the number of connections to the shard.\n//\n// scyllaConnPicker keeps track of the details about the shard-aware port.\n// When used as a Dialer, it connects to the shard-aware port instead of the\n// regular port (if the node supports it). For each subsequent connection\n// it tries to make, the shard that it aims to connect to is chosen\n// in a round-robin fashion.\ntype scyllaConnPicker struct {\n\tlogger StdLogger\n\t// disableShardAwarePortUntil is used to temporarily disable new connections to the shard-aware port temporarily\n\tdisableShardAwarePortUntil *atomic.Value\n\taddress                    string\n\texcessConns                []*Conn\n\tconns                      []*Conn\n\tnrShards                   int\n\tpos                        uint64\n\tlastAttemptedShard         int\n\tmsbIgnore                  uint64\n\tnrConns                    int\n\texcessConnsLimitRate       float32\n\thostId                     UUID\n\tshardAwarePortDisabled     bool\n}\n\nfunc newScyllaConnPicker(conn *Conn, logger StdLogger) *scyllaConnPicker {\n\taddr := conn.Address()\n\n\tif conn.scyllaSupported.nrShards == 0 {\n\t\tpanic(fmt.Sprintf(\"scylla: %s not a sharded connection\", addr))\n\t}\n\n\tif debug.Enabled {\n\t\tlogger.Printf(\"scylla: %s new conn picker sharding options %+v\", addr, conn.scyllaSupported)\n\t}\n\n\treturn &scyllaConnPicker{\n\t\taddress:                addr,\n\t\thostId:                 conn.host.hostId,\n\t\tnrShards:               conn.scyllaSupported.nrShards,\n\t\tmsbIgnore:              conn.scyllaSupported.msbIgnore,\n\t\tlastAttemptedShard:     0,\n\t\tshardAwarePortDisabled: conn.session.cfg.DisableShardAwarePort,\n\t\tlogger:                 logger,\n\t\texcessConnsLimitRate:   conn.session.cfg.MaxExcessShardConnectionsRate,\n\n\t\tdisableShardAwarePortUntil: new(atomic.Value),\n\t}\n}\n\nfunc (p *scyllaConnPicker) Pick(t Token, qry ExecutableQuery) *Conn {\n\tif len(p.conns) == 0 {\n\t\treturn nil\n\t}\n\n\tif t == nil {\n\t\treturn p.leastBusyConn()\n\t}\n\n\tmmt, ok := t.(int64Token)\n\t// double check if that's murmur3 token\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tidx := -1\n\nouter:\n\tfor _, conn := range p.conns {\n\t\tif conn == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif qry != nil && conn.isTabletSupported() {\n\t\t\tfor _, replica := range conn.session.findTabletReplicasUnsafeForToken(qry.Keyspace(), qry.Table(), int64(mmt)) {\n\t\t\t\tif UUID(replica.HostUUIDValue()) == p.hostId {\n\t\t\t\t\tidx = replica.ShardID()\n\t\t\t\t\tbreak outer\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tbreak\n\t}\n\n\tif idx == -1 {\n\t\tidx = p.shardOf(mmt)\n\t}\n\n\tif c := p.conns[idx]; c != nil {\n\t\t// We have this shard's connection\n\t\t// so let's give it to the caller.\n\t\t// But only if it's not loaded too much and load is well distributed.\n\t\tif qry != nil && qry.IsLWT() {\n\t\t\treturn c\n\t\t}\n\t\treturn p.maybeReplaceWithLessBusyConnection(c)\n\t}\n\treturn p.leastBusyConn()\n}\n\nfunc (p *scyllaConnPicker) maybeReplaceWithLessBusyConnection(c *Conn) *Conn {\n\tif !isHeavyLoaded(c) {\n\t\treturn c\n\t}\n\talternative := p.leastBusyConn()\n\tif alternative == nil || alternative.AvailableStreams()*120 > c.AvailableStreams()*100 {\n\t\treturn c\n\t} else {\n\t\treturn alternative\n\t}\n}\n\nfunc isHeavyLoaded(c *Conn) bool {\n\treturn c.streams.NumStreams/2 > c.AvailableStreams()\n}\n\nfunc (p *scyllaConnPicker) leastBusyConn() *Conn {\n\tvar (\n\t\tleastBusyConn    *Conn\n\t\tstreamsAvailable int\n\t)\n\tidx := int(atomic.AddUint64(&p.pos, 1))\n\t// find the conn which has the most available streams, this is racy\n\tfor i := range p.conns {\n\t\tif conn := p.conns[(idx+i)%len(p.conns)]; conn != nil {\n\t\t\tif streams := conn.AvailableStreams(); streams > streamsAvailable {\n\t\t\t\tleastBusyConn = conn\n\t\t\t\tstreamsAvailable = streams\n\t\t\t}\n\t\t}\n\t}\n\treturn leastBusyConn\n}\n\nfunc (p *scyllaConnPicker) shardOf(token int64Token) int {\n\tshards := uint64(p.nrShards)\n\tz := uint64(token+math.MinInt64) << p.msbIgnore\n\tlo := z & 0xffffffff\n\thi := (z >> 32) & 0xffffffff\n\tmul1 := lo * shards\n\tmul2 := hi * shards\n\tsum := (mul1 >> 32) + mul2\n\treturn int(sum >> 32)\n}\n\nfunc (p *scyllaConnPicker) Put(conn *Conn) error {\n\tvar (\n\t\tnrShards = conn.scyllaSupported.nrShards\n\t\tshard    = conn.scyllaSupported.shard\n\t)\n\n\tif nrShards == 0 {\n\t\treturn errors.New(\"server reported that it has no shards\")\n\t}\n\n\tif nrShards != p.nrShards {\n\t\tif debug.Enabled {\n\t\t\tp.logger.Printf(\"scylla: %s shard count changed from %d to %d, rebuilding connection pool\",\n\t\t\t\tp.address, p.nrShards, nrShards)\n\t\t}\n\t\tp.handleShardCountChange(conn, nrShards)\n\t} else if nrShards != len(p.conns) {\n\t\tconns := p.conns\n\t\tp.conns = make([]*Conn, nrShards)\n\t\tcopy(p.conns, conns)\n\t}\n\n\tif c := p.conns[shard]; c != nil {\n\t\tif conn.isShardAware {\n\t\t\t// A connection made to the shard-aware port resulted in duplicate\n\t\t\t// connection to the same shard being made. Because this is never\n\t\t\t// intentional, it suggests that a NAT or AddressTranslator\n\t\t\t// changes the source port along the way, therefore we can't trust\n\t\t\t// the shard-aware port to return connection to the shard\n\t\t\t// that we requested. Fall back to non-shard-aware port for some time.\n\t\t\tp.logger.Printf(\n\t\t\t\t\"scylla: connection to shard-aware address %s resulted in wrong shard being assigned; please check that you are not behind a NAT or AddressTranslater which changes source ports; falling back to non-shard-aware port for %v\",\n\t\t\t\tp.address,\n\t\t\t\tscyllaShardAwarePortFallbackDuration,\n\t\t\t)\n\t\t\tuntil := time.Now().Add(scyllaShardAwarePortFallbackDuration)\n\t\t\tp.disableShardAwarePortUntil.Store(until)\n\n\t\t\treturn fmt.Errorf(\"connection landed on %d shard that already has connection\", shard)\n\t\t} else {\n\t\t\tp.excessConns = append(p.excessConns, conn)\n\t\t\tif debug.Enabled {\n\t\t\t\tp.logger.Printf(\"scylla: %s put shard %d excess connection total: %d missing: %d excess: %d\", p.address, shard, p.nrConns, p.nrShards-p.nrConns, len(p.excessConns))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tp.conns[shard] = conn\n\t\tp.nrConns++\n\t\tif debug.Enabled {\n\t\t\tp.logger.Printf(\"scylla: %s put shard %d connection total: %d missing: %d\", p.address, shard, p.nrConns, p.nrShards-p.nrConns)\n\t\t}\n\t}\n\n\tif p.shouldCloseExcessConns() {\n\t\tp.closeExcessConns()\n\t}\n\n\treturn nil\n}\n\nfunc (p *scyllaConnPicker) handleShardCountChange(newConn *Conn, newShardCount int) {\n\toldShardCount := p.nrShards\n\toldConns := make([]*Conn, len(p.conns))\n\tcopy(oldConns, p.conns)\n\n\tif debug.Enabled {\n\t\tp.logger.Printf(\"scylla: %s handling shard topology change from %d to %d\", p.address, oldShardCount, newShardCount)\n\t}\n\n\tnewConns := make([]*Conn, newShardCount)\n\tvar toClose []*Conn\n\tmigratedCount := 0\n\n\tfor i, conn := range oldConns {\n\t\tif conn == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif i < newShardCount {\n\t\t\tnewConns[i] = conn\n\t\t\tmigratedCount++\n\t\t} else {\n\t\t\ttoClose = append(toClose, conn)\n\t\t}\n\t}\n\n\tp.nrShards = newShardCount\n\tp.msbIgnore = newConn.scyllaSupported.msbIgnore\n\tp.conns = newConns\n\tp.nrConns = migratedCount\n\tp.lastAttemptedShard = 0\n\n\tif len(toClose) > 0 {\n\t\tgo closeConns(toClose...)\n\t}\n\n\tif debug.Enabled {\n\t\tp.logger.Printf(\"scylla: %s migrated %d/%d connections to new shard topology, closing %d excess connections\", p.address, migratedCount, len(oldConns), len(toClose))\n\t}\n}\n\nfunc (p *scyllaConnPicker) shouldCloseExcessConns() bool {\n\tif p.nrConns >= p.nrShards {\n\t\treturn true\n\t}\n\treturn len(p.excessConns) > int(p.excessConnsLimitRate*float32(p.nrShards))\n}\n\nfunc (p *scyllaConnPicker) GetConnectionCount() int {\n\treturn p.nrConns\n}\n\nfunc (p *scyllaConnPicker) GetExcessConnectionCount() int {\n\treturn len(p.excessConns)\n}\n\nfunc (p *scyllaConnPicker) GetShardCount() int {\n\treturn p.nrShards\n}\n\nfunc (p *scyllaConnPicker) Remove(conn *Conn) {\n\tshard := conn.scyllaSupported.shard\n\n\tif conn.scyllaSupported.nrShards == 0 {\n\t\t// It is possible for Remove to be called before the connection is added to the pool.\n\t\t// Ignoring these connections here is safe.\n\t\tif debug.Enabled {\n\t\t\tp.logger.Printf(\"scylla: %s has unknown sharding state, ignoring it\", p.address)\n\t\t}\n\t\treturn\n\t}\n\tif debug.Enabled {\n\t\tp.logger.Printf(\"scylla: %s remove shard %d connection\", p.address, shard)\n\t}\n\n\tif p.conns[shard] != nil {\n\t\tp.conns[shard] = nil\n\t\tp.nrConns--\n\t}\n}\n\nfunc (p *scyllaConnPicker) InFlight() int {\n\tresult := 0\n\tfor _, conn := range p.conns {\n\t\tif conn != nil {\n\t\t\tresult = result + (conn.streams.InUse())\n\t\t}\n\t}\n\treturn result\n}\n\nfunc (p *scyllaConnPicker) Size() (int, int) {\n\treturn p.nrConns, p.nrShards - p.nrConns\n}\n\nfunc (p *scyllaConnPicker) Close() {\n\tp.closeConns()\n\tp.closeExcessConns()\n}\n\nfunc (p *scyllaConnPicker) closeConns() {\n\tif len(p.conns) == 0 {\n\t\tif debug.Enabled {\n\t\t\tp.logger.Printf(\"scylla: %s no connections to close\", p.address)\n\t\t}\n\t\treturn\n\t}\n\n\tconns := p.conns\n\tp.conns = nil\n\tp.nrConns = 0\n\n\tif debug.Enabled {\n\t\tp.logger.Printf(\"scylla: %s closing %d connections\", p.address, len(conns))\n\t}\n\tgo closeConns(conns...)\n}\n\nfunc (p *scyllaConnPicker) closeExcessConns() {\n\tif len(p.excessConns) == 0 {\n\t\tif debug.Enabled {\n\t\t\tp.logger.Printf(\"scylla: %s no excess connections to close\", p.address)\n\t\t}\n\t\treturn\n\t}\n\n\tconns := p.excessConns\n\tp.excessConns = nil\n\n\tif debug.Enabled {\n\t\tp.logger.Printf(\"scylla: %s closing %d excess connections\", p.address, len(conns))\n\t}\n\tgo closeConns(conns...)\n}\n\n// Closing must be done outside of hostConnPool lock. If holding a lock\n// a deadlock can occur when closing one of the connections returns error on close.\n// See scylladb/gocql#53.\nfunc closeConns(conns ...*Conn) {\n\tfor _, conn := range conns {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}\n}\n\n// NextShard returns the shardID to connect to.\n// nrShard specifies how many shards the host has.\n// If nrShards is zero, the caller shouldn't use shard-aware port.\nfunc (p *scyllaConnPicker) NextShard() (shardID, nrShards int) {\n\tif p.shardAwarePortDisabled {\n\t\treturn 0, 0\n\t}\n\n\tdisableUntil, _ := p.disableShardAwarePortUntil.Load().(time.Time)\n\tif time.Now().Before(disableUntil) {\n\t\t// There is suspicion that the shard-aware-port is not reachable\n\t\t// or misconfigured, fall back to the non-shard-aware port\n\t\treturn 0, 0\n\t}\n\n\t// Find the shard without a connection\n\t// It's important to start counting from 1 here because we want\n\t// to consider the next shard after the previously attempted one\n\tfor i := 1; i <= p.nrShards; i++ {\n\t\tshardID := (p.lastAttemptedShard + i) % p.nrShards\n\t\tif p.conns == nil || p.conns[shardID] == nil {\n\t\t\tp.lastAttemptedShard = shardID\n\t\t\treturn shardID, p.nrShards\n\t\t}\n\t}\n\n\t// We did not find an unallocated shard\n\t// We will dial the non-shard-aware port\n\treturn 0, 0\n}\n\n// ShardDialer is like HostDialer but is shard-aware.\n// If the driver wants to connect to a specific shard, it will call DialShard,\n// otherwise it will call DialHost.\ntype ShardDialer interface {\n\tHostDialer\n\n\t// DialShard establishes a connection to the specified shard ID out of nrShards.\n\t// The returned connection must be directly usable for CQL protocol,\n\t// specifically DialShard is responsible also for setting up the TLS session if needed.\n\tDialShard(ctx context.Context, host *HostInfo, shardID, nrShards int) (*DialedHost, error)\n}\n\n// A dialer which dials a particular shard\ntype scyllaDialer struct {\n\tdialer    Dialer\n\tlogger    StdLogger\n\ttlsConfig *tls.Config\n\tcfg       *ClusterConfig\n}\n\nconst scyllaShardAwarePortFallbackDuration time.Duration = 5 * time.Minute\n\nfunc (sd *scyllaDialer) DialHost(ctx context.Context, host *HostInfo) (*DialedHost, error) {\n\tip := host.ConnectAddress()\n\tport := host.Port()\n\n\tif !validIpAddr(ip) {\n\t\treturn nil, fmt.Errorf(\"host missing connect ip address: %v\", ip)\n\t} else if port == 0 {\n\t\treturn nil, fmt.Errorf(\"host missing port: %v\", port)\n\t}\n\n\taddr := net.JoinHostPort(ip.String(), strconv.Itoa(port))\n\ttranslatedInfo := host.getTranslatedConnectionInfo()\n\tif translatedInfo != nil {\n\t\taddr = translatedInfo.CQL.ToNetAddr()\n\t}\n\n\tconn, err := sd.dialer.DialContext(ctx, \"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn WrapTLS(ctx, conn, addr, sd.tlsConfig)\n}\n\nfunc (sd *scyllaDialer) DialShard(ctx context.Context, host *HostInfo, shardID, nrShards int) (*DialedHost, error) {\n\tip := host.ConnectAddress()\n\tport := host.Port()\n\n\tif !validIpAddr(ip) {\n\t\treturn nil, fmt.Errorf(\"host missing connect ip address: %v\", ip)\n\t} else if port == 0 {\n\t\treturn nil, fmt.Errorf(\"host missing port: %v\", port)\n\t}\n\n\titer := newScyllaPortIterator(shardID, nrShards)\n\taddr := net.JoinHostPort(ip.String(), strconv.Itoa(port))\n\tshardAwareAddr := \"\"\n\ttranslatedInfo := host.getTranslatedConnectionInfo()\n\tif translatedInfo != nil {\n\t\taddr = translatedInfo.CQL.ToNetAddr()\n\t\tif sd.tlsConfig != nil {\n\t\t\tif translatedInfo.ShardAwareTLS.IsValid() {\n\t\t\t\tshardAwareAddr = translatedInfo.ShardAwareTLS.ToNetAddr()\n\t\t\t}\n\t\t} else if translatedInfo.ShardAware.IsValid() {\n\t\t\tshardAwareAddr = translatedInfo.ShardAware.ToNetAddr()\n\t\t}\n\t}\n\n\tif debug.Enabled {\n\t\tsd.logger.Printf(\"scylla: connecting to shard %d\", shardID)\n\t}\n\n\tconn, err := sd.dialShardAware(ctx, addr, shardAwareAddr, iter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn WrapTLS(ctx, conn, addr, sd.tlsConfig)\n}\n\nfunc (sd *scyllaDialer) dialShardAware(ctx context.Context, addr, shardAwareAddr string, iter *scyllaPortIterator) (net.Conn, error) {\n\tfor {\n\t\tport, ok := iter.Next()\n\t\tif !ok {\n\t\t\t// We exhausted ports to connect from. Try the non-shard-aware port.\n\t\t\treturn sd.dialer.DialContext(ctx, \"tcp\", addr)\n\t\t}\n\n\t\tctxWithPort := context.WithValue(ctx, scyllaSourcePortCtx{}, port)\n\t\tconn, err := sd.dialer.DialContext(ctxWithPort, \"tcp\", shardAwareAddr)\n\n\t\tif isLocalAddrInUseErr(err) {\n\t\t\t// This indicates that the source port is already in use\n\t\t\t// We can immediately retry with another source port for this shard\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\tconn, err := sd.dialer.DialContext(ctx, \"tcp\", addr)\n\t\t\tif err == nil {\n\t\t\t\t// We failed to connect to the shard-aware port, but succeeded\n\t\t\t\t// in connecting to the non-shard-aware port. This might\n\t\t\t\t// indicate that the shard-aware port is just not reachable,\n\t\t\t\t// but we may also be unlucky and the node became reachable\n\t\t\t\t// just after we tried the first connection.\n\t\t\t\t// We can't avoid false positives here, so I'm putting it\n\t\t\t\t// behind a debug flag.\n\t\t\t\tif debug.Enabled {\n\t\t\t\t\tsd.logger.Printf(\n\t\t\t\t\t\t\"scylla: %s couldn't connect to shard-aware address while the non-shard-aware address %s is available; this might be an issue with \",\n\t\t\t\t\t\taddr,\n\t\t\t\t\t\tshardAwareAddr,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn conn, err\n\t\t}\n\t\treturn conn, err\n\t}\n}\n\n// ErrScyllaSourcePortAlreadyInUse An error value which can returned from\n// a custom dialer implementation to indicate that the requested source port\n// to dial from is already in use\nvar ErrScyllaSourcePortAlreadyInUse = errors.New(\"scylla: source port is already in use\")\n\nfunc isLocalAddrInUseErr(err error) bool {\n\treturn errors.Is(err, syscall.EADDRINUSE) || errors.Is(err, ErrScyllaSourcePortAlreadyInUse)\n}\n\n// ScyllaShardAwareDialer wraps a net.Dialer, but uses a source port specified by gocql when connecting.\n//\n// Unlike in the case standard native transport ports, gocql can choose which shard will handle\n// a new connection by connecting from a specific source port. If you are using your own net.Dialer\n// in ClusterConfig, you can use ScyllaShardAwareDialer to \"upgrade\" it so that it connects\n// from the source port chosen by gocql.\n//\n// Please note that ScyllaShardAwareDialer overwrites the LocalAddr field in order to choose\n// the right source port for connection.\ntype ScyllaShardAwareDialer struct {\n\tnet.Dialer\n}\n\nfunc (d *ScyllaShardAwareDialer) DialContext(ctx context.Context, network, addr string) (conn net.Conn, err error) {\n\tsourcePort := ScyllaGetSourcePort(ctx)\n\tif sourcePort == 0 {\n\t\treturn d.Dialer.DialContext(ctx, network, addr)\n\t}\n\tdialerWithLocalAddr := d.Dialer\n\tdialerWithLocalAddr.LocalAddr, err = net.ResolveTCPAddr(network, fmt.Sprintf(\":%d\", sourcePort))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dialerWithLocalAddr.DialContext(ctx, network, addr)\n}\n\ntype scyllaPortIterator struct {\n\tcurrentPort int\n\tshardCount  int\n}\n\nconst (\n\tscyllaPortBasedBalancingMin = 0x8000\n\tscyllaPortBasedBalancingMax = 0xFFFF\n)\n\nfunc newScyllaPortIterator(shardID, shardCount int) *scyllaPortIterator {\n\tif shardCount == 0 {\n\t\tpanic(\"shardCount cannot be 0\")\n\t}\n\n\t// Find the smallest port p such that p >= min and p % shardCount == shardID\n\tport := scyllaPortBasedBalancingMin - scyllaShardForSourcePort(scyllaPortBasedBalancingMin, shardCount) + shardID\n\tif port < scyllaPortBasedBalancingMin {\n\t\tport += shardCount\n\t}\n\n\treturn &scyllaPortIterator{\n\t\tcurrentPort: port,\n\t\tshardCount:  shardCount,\n\t}\n}\n\nfunc (spi *scyllaPortIterator) Next() (uint16, bool) {\n\tif spi == nil {\n\t\treturn 0, false\n\t}\n\n\tp := spi.currentPort\n\n\tif p > scyllaPortBasedBalancingMax {\n\t\treturn 0, false\n\t}\n\n\tspi.currentPort += spi.shardCount\n\treturn uint16(p), true\n}\n\nfunc scyllaShardForSourcePort(sourcePort uint16, shardCount int) int {\n\treturn int(sourcePort) % shardCount\n}\n\ntype scyllaSourcePortCtx struct{}\n\n// ScyllaGetSourcePort returns the source port that should be used when connecting to a node.\n//\n// Unlike in the case standard native transport ports, gocql can choose which shard will handle\n// a new connection at the shard-aware port by connecting from a specific source port. Therefore,\n// if you are using a custom Dialer and your nodes expose shard-aware ports, your dialer should\n// use the source port specified by gocql.\n//\n// If this function returns 0, then your dialer can use any source port.\n//\n// If you aren't using a custom dialer, gocql will use a default one which uses appropriate source port.\n// If you are using net.Dialer, consider wrapping it in a gocql.ScyllaShardAwareDialer.\nfunc ScyllaGetSourcePort(ctx context.Context) uint16 {\n\tsourcePort, _ := ctx.Value(scyllaSourcePortCtx{}).(uint16)\n\treturn sourcePort\n}\n\n// Returns a partitioner specific to the table, or \"nil\"\n// if the cluster-global partitioner should be used\nfunc scyllaGetTablePartitioner(session *Session, keyspaceName, tableName string) (Partitioner, error) {\n\tisCdc, err := scyllaIsCdcTable(session, keyspaceName, tableName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif isCdc {\n\t\treturn scyllaCDCPartitioner{logger: &defaultLogger{}}, nil\n\t}\n\n\treturn nil, nil\n}\n"
  },
  {
    "path": "scylla_cdc.go",
    "content": "package gocql\n\nimport (\n\t\"encoding/binary\"\n\t\"math\"\n\t\"strings\"\n\n\t\"github.com/gocql/gocql/internal/debug\"\n)\n\n// cdc partitioner\n\nconst (\n\tscyllaCDCPartitionerName     = \"CDCPartitioner\"\n\tscyllaCDCPartitionerFullName = \"com.scylladb.dht.CDCPartitioner\"\n\n\tscyllaCDCPartitionKeyLength  = 16\n\tscyllaCDCVersionMask         = 0x0F\n\tscyllaCDCMinSupportedVersion = 1\n\tscyllaCDCMaxSupportedVersion = 1\n\n\tscyllaCDCMinToken           = int64Token(math.MinInt64)\n\tscyllaCDCLogTableNameSuffix = \"_scylla_cdc_log\"\n\tscyllaCDCExtensionName      = \"cdc\"\n)\n\ntype scyllaCDCPartitioner struct {\n\tlogger StdLogger\n}\n\nvar _ Partitioner = scyllaCDCPartitioner{logger: &defaultLogger{}}\n\nfunc (p scyllaCDCPartitioner) Name() string {\n\treturn scyllaCDCPartitionerName\n}\n\nfunc (p scyllaCDCPartitioner) Hash(partitionKey []byte) Token {\n\tif len(partitionKey) < 8 {\n\t\t// The key is too short to extract any sensible token,\n\t\t// so return the min token instead\n\t\tif debug.Enabled {\n\t\t\tp.logger.Printf(\"scylla: cdc partition key too short: %d < 8\", len(partitionKey))\n\t\t}\n\t\treturn scyllaCDCMinToken\n\t}\n\n\tupperQword := binary.BigEndian.Uint64(partitionKey[0:])\n\n\tif debug.Enabled {\n\t\t// In debug mode, do some more checks\n\n\t\tif len(partitionKey) != scyllaCDCPartitionKeyLength {\n\t\t\t// The token has unrecognized format, but the first quadword\n\t\t\t// should be the token value that we want\n\t\t\tp.logger.Printf(\"scylla: wrong size of cdc partition key: %d\", len(partitionKey))\n\t\t}\n\n\t\tlowerQword := binary.BigEndian.Uint64(partitionKey[8:])\n\t\tversion := lowerQword & scyllaCDCVersionMask\n\t\tif version < scyllaCDCMinSupportedVersion || version > scyllaCDCMaxSupportedVersion {\n\t\t\t// We don't support this version yet,\n\t\t\t// the token may be wrong\n\t\t\tp.logger.Printf(\n\t\t\t\t\"scylla: unsupported version: %d is not in range [%d, %d]\",\n\t\t\t\tversion,\n\t\t\t\tscyllaCDCMinSupportedVersion,\n\t\t\t\tscyllaCDCMaxSupportedVersion,\n\t\t\t)\n\t\t}\n\t}\n\n\treturn int64Token(upperQword)\n}\n\nfunc (p scyllaCDCPartitioner) ParseString(str string) Token {\n\treturn parseInt64Token(str)\n}\n\nfunc scyllaIsCdcTable(session *Session, keyspaceName, tableName string) (bool, error) {\n\tif !strings.HasSuffix(tableName, scyllaCDCLogTableNameSuffix) {\n\t\t// Not a CDC table, use the default partitioner\n\t\treturn false, nil\n\t}\n\n\t// Check if the table has the CDC partitioner set.\n\ttableMeta, err := session.TableMetadata(keyspaceName, tableName)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn tableMeta.Options.Partitioner == scyllaCDCPartitionerFullName, nil\n}\n"
  },
  {
    "path": "scylla_shard_aware_port_common_test.go",
    "content": "//go:build integration || unit\n// +build integration unit\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype makeClusterTestFunc func() *ClusterConfig\n\nfunc testShardAwarePortNoReconnections(t *testing.T, makeCluster makeClusterTestFunc) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\twg := &sync.WaitGroup{}\n\n\t// Initialize 10 sessions in parallel.\n\t// If shard-aware port is used and configured properly, we should get\n\t// a connection to each shard without any retries.\n\t// For each host, there should be N-1 connections to the special port.\n\tvar errs []error\n\tvar errLock sync.Mutex\n\n\tpushErr := func(err error) {\n\t\terrLock.Lock()\n\t\terrs = append(errs, err)\n\t\terrLock.Unlock()\n\t}\n\n\t// Run 10 sessions in parallel\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t// Each session gets a separate configuration, because we need to have\n\t\t\t// separate connection listeners - we need to differentiate connections\n\t\t\t// made for each session separately\n\t\t\tdialer := newLoggingTestDialer()\n\t\t\tcluster := makeCluster()\n\t\t\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())\n\t\t\tcluster.Dialer = dialer\n\n\t\t\tuseTLS := cluster.SslOpts != nil\n\n\t\t\tsess, err := cluster.CreateSession()\n\t\t\tif err != nil {\n\t\t\t\tcancel()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer sess.Close()\n\n\t\t\tif err := waitUntilPoolsStopFilling(ctx, sess, 10*time.Second); err != nil {\n\t\t\t\tcancel()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\thosts := sess.hostSource.getHostsList()\n\t\t\tfor _, host := range hosts {\n\t\t\t\tt.Logf(\"checking host %s:%d hostID: %q\", host.ConnectAddress(), host.Port(), host.hostId)\n\t\t\t\thostPool, ok := sess.pool.getPool(host)\n\t\t\t\tif !ok {\n\t\t\t\t\tpushErr(fmt.Errorf(\"host %q hostID not found\", host.hostname))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tshardAwarePort := getShardAwarePort(hostPool, useTLS)\n\t\t\t\tif shardAwarePort == 0 {\n\t\t\t\t\t// Shard aware port was not exposed by the host\n\t\t\t\t\tt.Log(\"the host does not expose a shard-aware port, skipping\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Verify that we have a sharded connPicker\n\t\t\t\tshardedPicker, ok := hostPool.connPicker.(*scyllaConnPicker)\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Errorf(\"not a sharded connection\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tnumberOfShards := shardedPicker.nrShards\n\n\t\t\t\t// Verify that there were no duplicate connections to the same shard\n\t\t\t\t// Make sure that we didn't connect to the same shard twice\n\t\t\t\t// There should be numberOfShards-1 connections to the new port\n\t\t\t\tevents := dialer.events[host.ConnectAddress().String()]\n\t\t\t\tshardAwareConnectionCount := 0\n\t\t\t\tshardsConnected := make(map[int]testConnectionEvent)\n\t\t\t\tfor _, evt := range events {\n\t\t\t\t\tif evt.destinationPort != shardAwarePort {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tshardAwareConnectionCount++\n\n\t\t\t\t\tshard := scyllaShardForSourcePort(evt.sourcePort, numberOfShards)\n\t\t\t\t\tif oldEvt, hasShard := shardsConnected[shard]; hasShard {\n\t\t\t\t\t\tt.Errorf(\"there was more than one connection to the shard aware port from the same shard (shard %d, port %d and %d)\",\n\t\t\t\t\t\t\tshard, oldEvt.sourcePort, evt.sourcePort)\n\t\t\t\t\t}\n\t\t\t\t\tshardsConnected[shard] = evt\n\t\t\t\t}\n\n\t\t\t\tif shardAwareConnectionCount != numberOfShards-1 {\n\t\t\t\t\tt.Errorf(\"expected %d connections to the shard aware port, but got %d\", numberOfShards-1, shardAwareConnectionCount)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn\n\t\t}()\n\t}\n\n\twg.Wait()\n\tfor _, err := range errs {\n\t\tt.Error(err.Error())\n\t}\n}\n\nfunc testShardAwarePortMaliciousNAT(t *testing.T, makeCluster makeClusterTestFunc) {\n\tcluster := makeCluster()\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())\n\tcluster.Dialer = &sourcePortOffByOneTestDialer{}\n\n\tsess, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"an error occurred while creating a session: %s\", err)\n\t}\n\tdefer sess.Close()\n\n\t// In this situation we are guaranteed that the connection will miss one\n\t// shard at this point. The first connection receives a random shard,\n\t// then we establish N-1 connections, targeting remaining shards.\n\t// Because the malicious port translator shifts the port by one,\n\t// one shard will be missed (if the host has more than one shard).\n\n\t// Retry until we establish one connection per shard\n\n\tfor {\n\t\tif err := waitUntilPoolsStopFilling(context.Background(), sess, 10*time.Second); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif checkIfPoolsAreFull(sess) {\n\t\t\tbreak\n\t\t}\n\n\t\ttriggerPoolsRefill(sess)\n\t}\n}\n\nfunc testShardAwarePortUnreachable(t *testing.T, makeCluster makeClusterTestFunc) {\n\tcluster := makeCluster()\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())\n\tcluster.Dialer = &allowOnlyNonShardAwarePortDialer{allowedPort: getClusterPort(cluster)}\n\n\tsess, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"an error occurred while creating a session: %s\", err)\n\t}\n\tdefer sess.Close()\n\n\t// In this situation, the connecting to the shard-aware port will fail,\n\t// but connections to the non-shard-aware port will succeed. This test\n\t// checks that we detect that the shard-aware-port is unreachable and\n\t// we fall back to the old port.\n\n\t// Retry until we establish one connection per shard\n\n\tfor {\n\t\tif err := waitUntilPoolsStopFilling(context.Background(), sess, 10*time.Second); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif checkIfPoolsAreFull(sess) {\n\t\t\tbreak\n\t\t}\n\n\t\ttriggerPoolsRefill(sess)\n\t}\n}\n\nfunc testShardAwarePortUnusedIfNotEnabled(t *testing.T, makeCluster makeClusterTestFunc) {\n\tdialer := newLoggingTestDialer()\n\tcluster := makeCluster()\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())\n\n\t// Explicitly disable the shard aware port\n\tcluster.DisableShardAwarePort = true\n\tcluster.Dialer = dialer\n\n\tuseTLS := cluster.SslOpts != nil\n\n\tsess, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"an error occurred while creating a session: %s\", err)\n\t}\n\tdefer sess.Close()\n\n\tif err := waitUntilPoolsStopFilling(context.Background(), sess, 10*time.Second); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\thosts := sess.hostSource.getHostsList()\n\tfor _, host := range hosts {\n\t\tt.Logf(\"checking host %s\", host.hostname)\n\t\thostPool, _ := sess.pool.getPool(host)\n\n\t\tshardAwarePort := getShardAwarePort(hostPool, useTLS)\n\t\tif shardAwarePort == 0 {\n\t\t\t// Shard aware port was not exposed by the host\n\t\t\tt.Log(\"the host does not expose a shard-aware port, skipping\")\n\t\t\tcontinue\n\t\t}\n\n\t\tevents, _ := dialer.events[host.ConnectAddress().String()]\n\n\t\tfor _, evt := range events {\n\t\t\tif evt.destinationPort == shardAwarePort {\n\t\t\t\tt.Error(\"there was an attempt to connect to a shard aware port, but the configuration does not allow that\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getShardAwarePort(pool *hostConnPool, useTLS bool) uint16 {\n\tif useTLS {\n\t\treturn pool.Host().ScyllaShardAwarePortTLS()\n\t}\n\treturn pool.Host().ScyllaShardAwarePort()\n}\n\nfunc triggerPoolsRefill(sess *Session) {\n\thosts := sess.hostSource.getHostsList()\n\tfor _, host := range hosts {\n\t\thostPool, _ := sess.pool.getPool(host)\n\t\tgo hostPool.fill_debounce()\n\t}\n}\n\nfunc waitUntilPoolsStopFilling(ctx context.Context, sess *Session, timeout time.Duration) error {\n\tdeadline := time.After(timeout)\n\tfor !checkIfPoolsStoppedFilling(sess) {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\n\t\tcase <-deadline:\n\t\t\treturn fmt.Errorf(\"failed to fill all connection pools in %s\", timeout)\n\n\t\tcase <-time.After(250 * time.Millisecond):\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc checkIfPoolsStoppedFilling(sess *Session) bool {\n\thosts := sess.hostSource.getHostsList()\n\tfor _, host := range hosts {\n\t\thostPool, _ := sess.pool.getPool(host)\n\n\t\thostPool.mu.Lock()\n\t\tisFilling := hostPool.filling\n\t\thostPool.mu.Unlock()\n\n\t\tif isFilling {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc checkIfPoolsAreFull(sess *Session) bool {\n\thosts := sess.hostSource.getHostsList()\n\tfor _, host := range hosts {\n\t\thostPool, _ := sess.pool.getPool(host)\n\n\t\thostPool.mu.Lock()\n\t\t_, remaining := hostPool.connPicker.Size()\n\t\thostPool.mu.Unlock()\n\n\t\tif remaining > 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc getClusterPort(cluster *ClusterConfig) uint16 {\n\t_, portStr, _ := net.SplitHostPort(cluster.Hosts[0])\n\tport, _ := strconv.Atoi(portStr)\n\n\tif port == 0 {\n\t\t// Assume default if it's not explicitly specified\n\t\treturn 9042\n\t}\n\treturn uint16(port)\n}\n\ntype sourcePortOffByOneTestDialer struct {\n\tScyllaShardAwareDialer\n}\n\nfunc (spobo *sourcePortOffByOneTestDialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {\n\t// Simulate a NAT that always increases the source port by 1\n\t// This should always result in wrong shard being assigned if host\n\t// has more than one shard.\n\n\tsourcePort := ScyllaGetSourcePort(ctx)\n\tif sourcePort > 0 {\n\t\tsourcePort++\n\t}\n\tnewCtx := context.WithValue(ctx, scyllaSourcePortCtx{}, sourcePort)\n\treturn spobo.ScyllaShardAwareDialer.DialContext(newCtx, network, addr)\n}\n\ntype allowOnlyNonShardAwarePortDialer struct {\n\tnet.Dialer\n\n\tallowedPort uint16\n}\n\nfunc (aonsa *allowOnlyNonShardAwarePortDialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {\n\t// Simulate a network configuration which allows connections to the\n\t// non-shard-aware port, but not to the shard-aware one.\n\n\t_, targetPort, _ := net.SplitHostPort(addr)\n\tif targetPort != strconv.Itoa(int(aonsa.allowedPort)) {\n\t\treturn nil, fmt.Errorf(\"allowOnlyNonShardAwarePortDialer: tried to connect to port %s, but only %d is allowed\", targetPort, aonsa.allowedPort)\n\t}\n\n\treturn aonsa.Dialer.DialContext(ctx, network, addr)\n}\n\ntype loggingTestDialer struct {\n\tScyllaShardAwareDialer\n\n\tevents map[string][]testConnectionEvent\n\tmu     sync.Mutex\n}\n\ntype testConnectionEvent struct {\n\tsourcePort, destinationPort uint16\n}\n\nfunc newLoggingTestDialer() *loggingTestDialer {\n\treturn &loggingTestDialer{\n\t\tevents: make(map[string][]testConnectionEvent),\n\t}\n}\n\nfunc (ltd *loggingTestDialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {\n\tsourcePort := ScyllaGetSourcePort(ctx)\n\n\tipaddr, destinationPortStr, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdestinationPort, err := strconv.ParseUint(destinationPortStr, 10, 16)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err := ltd.ScyllaShardAwareDialer.DialContext(ctx, network, addr)\n\n\tif err == nil {\n\t\tltd.mu.Lock()\n\t\tdefer ltd.mu.Unlock()\n\n\t\tevt := testConnectionEvent{\n\t\t\tsourcePort:      sourcePort,\n\t\t\tdestinationPort: uint16(destinationPort),\n\t\t}\n\t\tltd.events[ipaddr] = append(ltd.events[ipaddr], evt)\n\t}\n\n\treturn conn, err\n}\n"
  },
  {
    "path": "scylla_shard_aware_port_integration_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport \"testing\"\n\nfunc TestShardAwarePortIntegrationNoReconnections(t *testing.T) {\n\tt.Parallel()\n\n\ttestShardAwarePortNoReconnections(t, func() *ClusterConfig {\n\t\tc := createCluster()\n\t\tc.Port = 9042\n\t\treturn c\n\t})\n}\n\nfunc TestShardAwarePortIntegrationMaliciousNAT(t *testing.T) {\n\tt.Parallel()\n\n\ttestShardAwarePortMaliciousNAT(t, func() *ClusterConfig {\n\t\tc := createCluster()\n\t\tc.Port = 9042\n\t\treturn c\n\t})\n}\n\nfunc TestShardAwarePortIntegrationUnreachable(t *testing.T) {\n\tt.Parallel()\n\n\ttestShardAwarePortUnreachable(t, func() *ClusterConfig {\n\t\tc := createCluster()\n\t\tc.Port = 9042\n\t\treturn c\n\t})\n}\n\nfunc TestShardAwarePortIntegrationUnusedIfNotEnabled(t *testing.T) {\n\tt.Parallel()\n\n\ttestShardAwarePortUnusedIfNotEnabled(t, func() *ClusterConfig {\n\t\tc := createCluster()\n\t\tc.Port = 9042\n\t\treturn c\n\t})\n}\n"
  },
  {
    "path": "scylla_shard_aware_port_mocked_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"math/rand\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst testShardCount = 3\n\nfunc TestShardAwarePortMockedNoReconnections(t *testing.T) {\n\tt.Parallel()\n\n\ttestWithAndWithoutTLS(t, testShardAwarePortNoReconnections)\n}\n\nfunc TestShardAwarePortMockedMaliciousNAT(t *testing.T) {\n\tt.Parallel()\n\n\ttestWithAndWithoutTLS(t, testShardAwarePortMaliciousNAT)\n}\n\nfunc TestShardAwarePortMockedUnreachable(t *testing.T) {\n\tt.Parallel()\n\n\ttestWithAndWithoutTLS(t, testShardAwarePortUnreachable)\n}\n\nfunc TestShardAwarePortMockedUnusedIfNotEnabled(t *testing.T) {\n\tt.Parallel()\n\n\ttestWithAndWithoutTLS(t, testShardAwarePortUnusedIfNotEnabled)\n}\n\nfunc testWithAndWithoutTLS(t *testing.T, test func(t *testing.T, makeCluster makeClusterTestFunc)) {\n\tt.Run(\"without TLS\", func(t *testing.T) {\n\t\tmakeCluster, stop := startServerWithShardAwarePort(t, false)\n\t\tdefer stop()\n\t\ttest(t, makeCluster)\n\t})\n\n\tt.Run(\"with TLS\", func(t *testing.T) {\n\t\tmakeCluster, stop := startServerWithShardAwarePort(t, true)\n\t\tdefer stop()\n\t\ttest(t, makeCluster)\n\t})\n}\n\nfunc startServerWithShardAwarePort(t testing.TB, useTLS bool) (makeCluster func() *ClusterConfig, stop func()) {\n\tvar shardAwarePort uint32\n\n\tshardAwarePortKey := \"SCYLLA_SHARD_AWARE_PORT\"\n\tif useTLS {\n\t\tshardAwarePortKey = \"SCYLLA_SHARD_AWARE_PORT_SSL\"\n\t}\n\n\tregularSupportedFactory := func(conn net.Conn) map[string][]string {\n\t\t// Assign a random shard. Although Scylla uses a deterministic algorithm\n\t\t// for assigning shards, the driver doesn't have enough information\n\t\t// to determine which shard will be assigned - therefore, from its\n\t\t// perspective, it's practically random.\n\t\tsaPort := int(atomic.LoadUint32(&shardAwarePort))\n\n\t\tt.Log(\"Connecting to the regular port\")\n\n\t\tshardID := rand.Intn(testShardCount)\n\n\t\tsupported := getStandardScyllaExtensions(shardID, testShardCount)\n\t\tsupported[shardAwarePortKey] = []string{strconv.Itoa(saPort)}\n\t\treturn supported\n\t}\n\n\tshardAwareSupportedFactory := func(conn net.Conn) map[string][]string {\n\t\t// Shard ID depends on the source port.\n\t\tsaPort := int(atomic.LoadUint32(&shardAwarePort))\n\n\t\tt.Log(\"Connecting to the shard-aware port\")\n\n\t\tport := mustParsePortFromAddr(conn.RemoteAddr().String())\n\t\tshardID := scyllaShardForSourcePort(port, testShardCount)\n\n\t\tsupported := getStandardScyllaExtensions(shardID, testShardCount)\n\t\tsupported[shardAwarePortKey] = []string{strconv.Itoa(saPort)}\n\t\treturn supported\n\t}\n\n\tmakeServer := func(factory testSupportedFactory) *TestServer {\n\t\tif useTLS {\n\t\t\treturn NewSSLTestServerWithSupportedFactory(t,\n\t\t\t\tdefaultProto, context.Background(), factory)\n\t\t}\n\t\treturn NewTestServerWithAddressAndSupportedFactory(\"127.0.0.1:0\", t,\n\t\t\tdefaultProto, context.Background(), factory)\n\t}\n\n\tsrvRegular := makeServer(regularSupportedFactory)\n\tsrvShardAware := makeServer(shardAwareSupportedFactory)\n\n\tsaPort := mustParsePortFromAddr(srvShardAware.Address)\n\tatomic.StoreUint32(&shardAwarePort, uint32(saPort))\n\n\tt.Logf(\"regular port address: %s, shard aware port address: %s\",\n\t\tsrvRegular.Address, srvShardAware.Address)\n\n\tmakeCluster = func() *ClusterConfig {\n\t\tvar cluster *ClusterConfig\n\t\tif useTLS {\n\t\t\tcluster = createTestSslCluster(srvRegular.Address, defaultProto, false)\n\n\t\t\t// Give a long timeout. For some reason, closing tls connections\n\t\t\t// result in an i/o timeout error, and this mitigates this problem.\n\t\t\tcluster.Timeout = 1 * time.Minute\n\t\t} else {\n\t\t\tcluster = testCluster(defaultProto, srvRegular.Address)\n\t\t}\n\t\treturn cluster\n\t}\n\n\tstop = func() {\n\t\tsrvRegular.Stop()\n\t\tsrvShardAware.Stop()\n\t}\n\n\treturn makeCluster, stop\n}\n\nfunc mustParsePortFromAddr(addr string) uint16 {\n\t_, portStr, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tport, err := strconv.ParseUint(portStr, 10, 16)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn uint16(port)\n}\n\nfunc getStandardScyllaExtensions(shardID, shardCount int) map[string][]string {\n\treturn map[string][]string{\n\t\t\"SCYLLA_SHARD\":               []string{strconv.Itoa(shardID)},\n\t\t\"SCYLLA_NR_SHARDS\":           []string{strconv.Itoa(shardCount)},\n\t\t\"SCYLLA_PARTITIONER\":         []string{\"org.apache.cassandra.dht.Murmur3Partitioner\"},\n\t\t\"SCYLLA_SHARDING_ALGORITHM\":  []string{\"biased-token-round-robin\"},\n\t\t\"SCYLLA_SHARDING_IGNORE_MSB\": []string{\"12\"},\n\t}\n}\n"
  },
  {
    "path": "scylla_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\"\n\t\"net\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/gocql/gocql/internal/streams\"\n)\n\nfunc TestScyllaConnPickerPickNilToken(t *testing.T) {\n\tt.Parallel()\n\n\ts := scyllaConnPicker{\n\t\tnrShards:  4,\n\t\tmsbIgnore: 12,\n\t}\n\n\tt.Run(\"no conns\", func(t *testing.T) {\n\t\ts.conns = []*Conn{{\n\t\t\tstreams: streams.New(),\n\t\t}}\n\t\tif s.Pick(Token(nil), nil) != s.conns[0] {\n\t\t\tt.Fatal(\"expected connection\")\n\t\t}\n\t})\n\n\tt.Run(\"one shard\", func(t *testing.T) {\n\t\ts.conns = []*Conn{{\n\t\t\tstreams: streams.New(),\n\t\t}}\n\t\tif s.Pick(Token(nil), nil) != s.conns[0] {\n\t\t\tt.Fatal(\"expected connection\")\n\t\t}\n\t})\n\n\tt.Run(\"multiple shards\", func(t *testing.T) {\n\t\ts.conns = []*Conn{nil, {\n\t\t\tstreams: streams.New(),\n\t\t}}\n\t\tif s.Pick(Token(nil), nil) != s.conns[1] {\n\t\t\tt.Fatal(\"expected connection\")\n\t\t}\n\t\tif s.Pick(Token(nil), nil) != s.conns[1] {\n\t\t\tt.Fatal(\"expected connection\")\n\t\t}\n\t})\n\n\tt.Run(\"multiple shards no conns\", func(t *testing.T) {\n\t\ts.conns = []*Conn{nil, nil}\n\t\tif s.Pick(Token(nil), nil) != nil {\n\t\t\tt.Fatal(\"expected nil\")\n\t\t}\n\t\tif s.Pick(Token(nil), nil) != nil {\n\t\t\tt.Fatal(\"expected nil\")\n\t\t}\n\t})\n}\n\nfunc hammerConnPicker(t *testing.T, wg *sync.WaitGroup, s *scyllaConnPicker, loops int) {\n\tt.Helper()\n\tfor i := 0; i < loops; i++ {\n\t\tif c := s.Pick(nil, nil); c == nil {\n\t\t\tt.Error(\"unexpected nil\")\n\t\t}\n\t}\n\twg.Done()\n}\n\nfunc TestScyllaConnPickerHammerPickNilToken(t *testing.T) {\n\tt.Parallel()\n\n\ts := scyllaConnPicker{\n\t\tnrShards:  4,\n\t\tmsbIgnore: 12,\n\t}\n\ts.conns = make([]*Conn, 100)\n\tfor i := range s.conns {\n\t\tif i%7 == 0 {\n\t\t\tcontinue\n\t\t}\n\t\ts.conns[i] = &Conn{\n\t\t\tstreams: streams.New(),\n\t\t}\n\t}\n\n\tn := runtime.GOMAXPROCS(0)\n\tloops := 10000 / n\n\n\tvar wg sync.WaitGroup\n\twg.Add(n)\n\tfor i := 0; i < n; i++ {\n\t\tgo hammerConnPicker(t, &wg, &s, loops)\n\t}\n\twg.Wait()\n}\n\nfunc TestScyllaConnPickerRemove(t *testing.T) {\n\tt.Parallel()\n\n\ts := scyllaConnPicker{\n\t\tnrShards:  4,\n\t\tmsbIgnore: 12,\n\t}\n\n\tconn := mockConn(0)\n\ts.Put(conn)\n\ts.Put(mockConn(1))\n\n\tif s.nrConns != 2 {\n\t\tt.Error(\"added 2 connections, expected connection count to be 2\")\n\t}\n\n\ts.Remove(conn)\n\tif s.nrConns != 1 {\n\t\tt.Errorf(\"removed 1 connection, expected connection count to be 1 but was %d\", s.nrConns)\n\t}\n\n\tif s.conns[0] != nil {\n\t\tt.Errorf(\"Expected %v to be removed from it's position\", conn)\n\t}\n}\n\nfunc TestScyllaConnPickerShardOf(t *testing.T) {\n\tt.Parallel()\n\n\ts := scyllaConnPicker{\n\t\tnrShards:  4,\n\t\tmsbIgnore: 12,\n\t}\n\tfor _, test := range scyllaShardOfTests {\n\t\tif shard := s.shardOf(int64Token(test.token)); shard != test.shard {\n\t\t\tt.Errorf(\"wrong scylla shard calculated for token %d, expected %d, got %d\", test.token, test.shard, shard)\n\t\t}\n\t}\n}\n\nfunc TestScyllaRandomConnPicker(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"max iterations\", func(t *testing.T) {\n\t\ts := &scyllaConnPicker{\n\t\t\tnrShards:  4,\n\t\t\tmsbIgnore: 12,\n\t\t\tpos:       math.MaxUint64,\n\t\t\tconns:     []*Conn{nil, mockConn(1)},\n\t\t}\n\n\t\tif s.Pick(Token(nil), nil) == nil {\n\t\t\tt.Fatal(\"expected connection\")\n\t\t}\n\t})\n\n\tt.Run(\"async access of max iterations\", func(t *testing.T) {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tdefer cancel()\n\n\t\ts := &scyllaConnPicker{\n\t\t\tnrShards:  4,\n\t\t\tmsbIgnore: 12,\n\t\t\tpos:       math.MaxUint64,\n\t\t\tconns:     []*Conn{nil, mockConn(1)},\n\t\t}\n\n\t\tvar wg sync.WaitGroup\n\t\tconnCh := make(chan *Conn, 9)\n\t\tfor i := 0; i < 3; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfor i := 0; i < 3; i++ {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase connCh <- s.Pick(Token(nil), nil):\n\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\t\tclose(connCh)\n\n\t\tif s.pos != 8 {\n\t\t\tt.Fatalf(\"expected position to be 8 | actual %d\", s.pos)\n\t\t}\n\t\tif len(connCh) != 9 {\n\t\t\tt.Fatalf(\"expected 9 connection picks, got %d\", len(connCh))\n\t\t}\n\t\tfor conn := range connCh {\n\t\t\tif conn == nil {\n\t\t\t\tt.Fatal(\"expected connection, got nil\")\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc TestScyllaRateLimitingExtParsing(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"init framer without cql extensions\", func(t *testing.T) {\n\t\t// mock connection without cql extensions, expected to have the `rateLimitingErrorCode`\n\t\t// field set to 0 (default, signifying no code)\n\t\tconn := mockConn(0)\n\t\tf := newFramerWithExts(conn.compressor, conn.version, conn.cqlProtoExts, conn.logger)\n\t\tif f.rateLimitingErrorCode != 0 {\n\t\t\tt.Error(\"expected to have rateLimitingErrorCode set to 0 (no code) after framer init\")\n\t\t}\n\t})\n\n\tconst mockCode = 42\n\tt.Run(\"init framer with cql extensions\", func(t *testing.T) {\n\t\t// create a mock connection, add `lwt` cql protocol extension to it,\n\t\t// ensure that framer recognizes this extension and adjusts appropriately\n\t\tconn := mockConn(0)\n\t\tconn.cqlProtoExts = []cqlProtocolExtension{\n\t\t\t&rateLimitExt{\n\t\t\t\trateLimitErrorCode: mockCode,\n\t\t\t},\n\t\t}\n\t\tframerWithRateLimitExt := newFramerWithExts(conn.compressor, conn.version, conn.cqlProtoExts, conn.logger)\n\t\tif framerWithRateLimitExt.rateLimitingErrorCode != mockCode {\n\t\t\tt.Error(\"expected to have rateLimitingErrorCode set to mockCode after framer init\")\n\t\t}\n\t})\n}\n\nfunc TestScyllaLWTExtParsing(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"init framer without cql extensions\", func(t *testing.T) {\n\t\t// mock connection without cql extensions, expected not to have\n\t\t// the `flagLWT` field being set in the framer created out of it\n\t\tconn := mockConn(0)\n\t\tf := newFramerWithExts(conn.compressor, conn.version, conn.cqlProtoExts, conn.logger)\n\t\tif f.flagLWT != 0 {\n\t\t\tt.Error(\"expected to have LWT flag uninitialized after framer init\")\n\t\t}\n\t})\n\n\tt.Run(\"init framer with cql extensions\", func(t *testing.T) {\n\t\t// create a mock connection, add `lwt` cql protocol extension to it,\n\t\t// ensure that framer recognizes this extension and adjusts appropriately\n\t\tconn := mockConn(0)\n\t\tconn.cqlProtoExts = []cqlProtocolExtension{\n\t\t\t&lwtAddMetadataMarkExt{\n\t\t\t\tlwtOptMetaBitMask: 1,\n\t\t\t},\n\t\t}\n\t\tframerWithLwtExt := newFramerWithExts(conn.compressor, conn.version, conn.cqlProtoExts, conn.logger)\n\t\tif framerWithLwtExt.flagLWT == 0 {\n\t\t\tt.Error(\"expected to have LWT flag to be set after framer init\")\n\t\t}\n\t})\n}\n\nfunc TestScyllaPortIterator(t *testing.T) {\n\tt.Parallel()\n\n\tfor _shardCount := 1; _shardCount <= 64; _shardCount++ {\n\t\tshardCount := _shardCount\n\t\tt.Run(fmt.Sprintf(\"shard count %d\", shardCount), func(t *testing.T) {\n\t\t\tfor shardID := 0; shardID < shardCount; shardID++ {\n\t\t\t\t// Count by brute force ports that can be used to connect to requested shard\n\t\t\t\texpectedPortCount := 0\n\t\t\t\tfor i := scyllaPortBasedBalancingMin; i <= scyllaPortBasedBalancingMax; i++ {\n\t\t\t\t\tif i%shardCount == shardID {\n\t\t\t\t\t\texpectedPortCount++\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Enumerate all ports using the port iterator and assert various things\n\t\t\t\titerator := newScyllaPortIterator(shardID, shardCount)\n\t\t\t\tactualPortCount := 0\n\t\t\t\tpreviousPort := 0\n\n\t\t\t\tfor {\n\t\t\t\t\tportU16, ok := iterator.Next()\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\n\t\t\t\t\tport := int(portU16)\n\n\t\t\t\t\tif port < scyllaPortBasedBalancingMin || port > scyllaPortBasedBalancingMax {\n\t\t\t\t\t\tt.Errorf(\"expected port %d generated from iterator to be in range [%d..%d]\",\n\t\t\t\t\t\t\tport, scyllaPortBasedBalancingMin, scyllaPortBasedBalancingMax)\n\t\t\t\t\t}\n\n\t\t\t\t\tif port <= previousPort {\n\t\t\t\t\t\tt.Errorf(\"expected port %d generated from iterator to be larger than the previous generated port %d\",\n\t\t\t\t\t\t\tport, previousPort)\n\t\t\t\t\t}\n\n\t\t\t\t\tactualShardOfPort := scyllaShardForSourcePort(portU16, shardCount)\n\t\t\t\t\tif actualShardOfPort != shardID {\n\t\t\t\t\t\tt.Errorf(\"expected port %d returned from iterator to belong to shard %d, but belongs to %d\",\n\t\t\t\t\t\t\tport, shardID, actualShardOfPort)\n\t\t\t\t\t}\n\n\t\t\t\t\tpreviousPort = port\n\t\t\t\t\tactualPortCount++\n\t\t\t\t}\n\n\t\t\t\tif expectedPortCount != actualPortCount {\n\t\t\t\t\tt.Errorf(\"expected port iterator to generate %d ports, but got %d\",\n\t\t\t\t\t\texpectedPortCount, actualPortCount)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestScyllaConnPickerHandleShardCountChange(t *testing.T) {\n\ttests := []struct {\n\t\tname             string\n\t\tinitialShards    int\n\t\tnewShards        int\n\t\tinitialConns     []int // shard IDs of initial connections\n\t\texpectedMigrated int\n\t\texpectedClosed   int\n\t}{\n\t\t{\n\t\t\tname:             \"shard increase from 4 to 8\",\n\t\t\tinitialShards:    4,\n\t\t\tnewShards:        8,\n\t\t\tinitialConns:     []int{0, 2, 3},\n\t\t\texpectedMigrated: 3, // All initial connections survive\n\t\t\texpectedClosed:   0,\n\t\t},\n\t\t{\n\t\t\tname:             \"shard decrease from 8 to 4\",\n\t\t\tinitialShards:    8,\n\t\t\tnewShards:        4,\n\t\t\tinitialConns:     []int{0, 2, 5, 7},\n\t\t\texpectedMigrated: 2, // Only shards 0, 2 survive\n\t\t\texpectedClosed:   2, // Shards 5, 7 get closed\n\t\t},\n\t\t{\n\t\t\tname:             \"no change same count\",\n\t\t\tinitialShards:    8,\n\t\t\tnewShards:        8,\n\t\t\tinitialConns:     []int{1, 3, 5},\n\t\t\texpectedMigrated: 4, // All initial connections survive + new one\n\t\t\texpectedClosed:   0,\n\t\t},\n\t\t{\n\t\t\tname:             \"massive decrease from 16 to 2\",\n\t\t\tinitialShards:    16,\n\t\t\tnewShards:        2,\n\t\t\tinitialConns:     []int{0, 1, 5, 8, 12, 15},\n\t\t\texpectedMigrated: 2, // Only shards 0, 1 survive\n\t\t\texpectedClosed:   4, // Shards 5, 8, 12, 15 get closed\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tlogger := &testLogger{}\n\t\t\tpicker := &scyllaConnPicker{\n\t\t\t\tlogger:                     logger,\n\t\t\t\tdisableShardAwarePortUntil: new(atomic.Value),\n\t\t\t\thostId:                     tUUID(99),\n\t\t\t\taddress:                    \"192.168.1.1:9042\", // Regular port\n\t\t\t\tconns:                      make([]*Conn, tt.initialShards),\n\t\t\t\texcessConns:                make([]*Conn, 0),\n\t\t\t\tnrShards:                   tt.initialShards,\n\t\t\t\tmsbIgnore:                  12,\n\t\t\t\tnrConns:                    0,\n\t\t\t\tpos:                        0,\n\t\t\t\tlastAttemptedShard:         0,\n\t\t\t\tshardAwarePortDisabled:     false,\n\t\t\t\texcessConnsLimitRate:       0.1,\n\t\t\t}\n\t\t\tpicker.disableShardAwarePortUntil.Store(time.Time{})\n\n\t\t\tvar connectionsToCheck []*Conn\n\n\t\t\t// Add initial connections\n\t\t\tfor _, shardID := range tt.initialConns {\n\t\t\t\tconn := mockConnForPicker(shardID, tt.initialShards)\n\t\t\t\terr := picker.Put(conn)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tif shardID >= tt.newShards {\n\t\t\t\t\tconnectionsToCheck = append(connectionsToCheck, conn)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Verify initial state\n\t\t\tassert.Equal(t, tt.initialShards, picker.nrShards)\n\t\t\tassert.Equal(t, len(tt.initialConns), picker.nrConns)\n\n\t\t\t// Execute topology change\n\t\t\tnewConn := mockConnForPicker(0, tt.newShards)\n\t\t\terr := picker.Put(newConn)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Allow background goroutine to complete\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\n\t\t\t// Verify new topology\n\t\t\tassert.Equal(t, tt.newShards, picker.nrShards)\n\t\t\tassert.Equal(t, len(picker.conns), tt.newShards)\n\n\t\t\t// Count migrated connections\n\t\t\tmigratedCount := 0\n\t\t\tfor _, conn := range picker.conns {\n\t\t\t\tif conn != nil {\n\t\t\t\t\tmigratedCount++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tassert.Equal(t, tt.expectedMigrated, migratedCount)\n\n\t\t\t// Verify connections that should be closed are actually closed\n\t\t\tclosedCount := 0\n\t\t\tfor _, conn := range connectionsToCheck {\n\t\t\t\tif conn.Closed() {\n\t\t\t\t\tclosedCount++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tassert.Equal(t, tt.expectedClosed, closedCount,\n\t\t\t\t\"Expected %d connections to be closed, but %d were closed\",\n\t\t\t\ttt.expectedClosed, closedCount)\n\t\t})\n\t}\n}\n\nfunc mockConn(shard int) *Conn {\n\treturn &Conn{\n\t\tstreams: streams.New(),\n\t\tscyllaSupported: ScyllaConnectionFeatures{\n\t\t\tshard: shard,\n\t\t\tScyllaHostFeatures: ScyllaHostFeatures{\n\t\t\t\tnrShards:          4,\n\t\t\t\tmsbIgnore:         12,\n\t\t\t\tpartitioner:       \"org.apache.cassandra.dht.Murmur3Partitioner\",\n\t\t\t\tshardingAlgorithm: \"biased-token-round-robin\",\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc mockConnForPicker(shard, nrShards int) *Conn {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tconn1, conn2 := net.Pipe()\n\t_ = conn2.Close()\n\n\treturn &Conn{\n\t\tscyllaSupported: ScyllaConnectionFeatures{\n\t\t\tshard: shard,\n\t\t\tScyllaHostFeatures: ScyllaHostFeatures{\n\t\t\t\tnrShards:  nrShards,\n\t\t\t\tmsbIgnore: 12,\n\t\t\t},\n\t\t},\n\t\tconn:    conn1,\n\t\taddr:    fmt.Sprintf(\"192.168.1.%d:9042\", shard+1),\n\t\tclosed:  false,\n\t\tmu:      sync.Mutex{},\n\t\tlogger:  &testLogger{},\n\t\tctx:     ctx,\n\t\tcancel:  cancel,\n\t\tcalls:   make(map[int]*callReq),\n\t\tstreams: streams.New(),\n\t}\n}\n"
  },
  {
    "path": "scylla_tokens_test.go",
    "content": "package gocql\n\nvar scyllaShardOfTests = []struct {\n\ttoken int64\n\tshard int\n}{\n\t{-9219783007514621794, 3},\n\t{-9218910161940551519, 3},\n\t{-9217148724365525195, 1},\n\t{-9215501124608928187, 2},\n\t{-9212118992469829075, 1},\n\t{-9212030144112870790, 2},\n\t{-9211258281348643746, 2},\n\t{-9210074338140524409, 3},\n\t{-9209452951616763088, 0},\n\t{-9209003334210078074, 0},\n\t{-9208107013070640916, 1},\n\t{-9206869989664156496, 2},\n\t{-9203470406378964547, 1},\n\t{-9202339907670320874, 2},\n\t{-9202275438383790249, 2},\n\t{-9200541087111340967, 0},\n\t{-9198480270279608398, 2},\n\t{-9196697686091266284, 3},\n\t{-9196174198992597486, 0},\n\t{-9194615357326961625, 1},\n\t{-9193113491748958345, 2},\n\t{-9192459871981514371, 3},\n\t{-9190526603941545655, 1},\n\t{-9189326718574265489, 2},\n\t{-9187895885180493903, 3},\n\t{-9186904031908562648, 0},\n\t{-9185241105530461414, 1},\n\t{-9185031262400478274, 2},\n\t{-9184265387969214261, 2},\n\t{-9180945271818496043, 1},\n\t{-9177781200259901224, 0},\n\t{-9175479359685319229, 2},\n\t{-9174998497001176631, 2},\n\t{-9172370230482781505, 1},\n\t{-9168648833848204743, 0},\n\t{-9167865830883491911, 1},\n\t{-9165003895616112063, 3},\n\t{-9163092042366917964, 1},\n\t{-9159429206437382746, 0},\n\t{-9156628358613672159, 3},\n\t{-9145990555828476571, 0},\n\t{-9145984070346097828, 0},\n\t{-9145653916006550014, 1},\n\t{-9145227631306399119, 1},\n\t{-9144242661317651188, 2},\n\t{-9142801888302425882, 3},\n\t{-9141024881712510010, 1},\n\t{-9135654074943507558, 1},\n\t{-9134471177307058759, 2},\n\t{-9133450519921711534, 3},\n\t{-9132179582023424885, 0},\n\t{-9128845389688502876, 3},\n\t{-9126855626742569135, 1},\n\t{-9126468196331425497, 2},\n\t{-9125869882319112283, 2},\n\t{-9122205444002044894, 1},\n\t{-9122124704753907281, 1},\n\t{-9119852417382055127, 3},\n\t{-9108996115070661611, 1},\n\t{-9105962620655078102, 0},\n\t{-9104488397628566176, 1},\n\t{-9102490257548627547, 3},\n\t{-9100044309397920724, 1},\n\t{-9097647687687441987, 3},\n\t{-9096130690687366280, 1},\n\t{-9094167670298826740, 2},\n\t{-9092647747436307833, 0},\n\t{-9090605585241100926, 1},\n\t{-9090460421272311919, 2},\n\t{-9089682013175894585, 2},\n\t{-9086591152376149795, 1},\n\t{-9086156579028515398, 1},\n\t{-9085700796937931307, 2},\n\t{-9084840568084277047, 3},\n\t{-9083314180003896592, 0},\n\t{-9083218787485222603, 0},\n\t{-9079203666276610372, 0},\n\t{-9077619300681531189, 1},\n\t{-9073899858137616676, 0},\n\t{-9072429865350033657, 2},\n\t{-9070732684693488973, 3},\n\t{-9068162850065914690, 1},\n\t{-9066106911671959188, 3},\n\t{-9065794362167854621, 3},\n\t{-9060720433166640362, 0},\n\t{-9059058429610502260, 1},\n\t{-9057041435787997979, 3},\n\t{-9055571171737281638, 1},\n\t{-9053550620084185626, 2},\n\t{-9051961176178065334, 0},\n\t{-9050949256565904929, 1},\n\t{-9044743511154156117, 2},\n\t{-9034146976767032545, 0},\n\t{-9032837933556123687, 1},\n\t{-9030843123924115008, 3},\n\t{-9028318934664121759, 1},\n\t{-9027195046628036333, 2},\n\t{-9025877019451973201, 3},\n\t{-9021845321518921582, 2},\n\t{-9021684946663807180, 3},\n\t{-9021391864262786543, 3},\n\t{-9018654448679225135, 1},\n\t{-9017910576289779427, 2},\n\t{-9011578186521064224, 0},\n\t{-9010196012589019855, 1},\n\t{-9008364600031197426, 2},\n\t{-9007249462924898062, 3},\n\t{-9001909576140939925, 0},\n\t{-9001857208489190644, 0},\n\t{-9001835025464037958, 0},\n\t{-9001169953992264486, 1},\n\t{-9000459115829895779, 1},\n\t{-8997727458472373131, 0},\n\t{-8997672619639292323, 0},\n\t{-8996600295949411295, 1},\n\t{-8995689744426462869, 2},\n\t{-8994008005758461826, 3},\n\t{-8992681528412498651, 0},\n\t{-8991768123515142274, 1},\n\t{-8991467277747639021, 1},\n\t{-8990822821394141367, 2},\n\t{-8989729945845236311, 3},\n\t{-8988845307229810281, 0},\n\t{-8988427316811599555, 0},\n\t{-8987983753017434450, 1},\n\t{-8983115793326873493, 1},\n\t{-8983086844895242739, 1},\n\t{-8981890906573758340, 2},\n\t{-8980395476532088132, 3},\n\t{-8975145032863711407, 0},\n\t{-8974746983465009593, 0},\n\t{-8974164694278947968, 1},\n\t{-8973772633339379555, 1},\n\t{-8972366641844733980, 2},\n\t{-8970803836946395296, 0},\n\t{-8970535559841294174, 0},\n\t{-8968974983101949757, 1},\n\t{-8968892069542755690, 2},\n\t{-8960650631469933104, 1},\n\t{-8959255750778599523, 2},\n\t{-8953396776625136216, 3},\n\t{-8951615565843131215, 1},\n\t{-8951102612976094316, 1},\n\t{-8949963041853689354, 2},\n\t{-8949810590745335399, 2},\n\t{-8949784640726681190, 2},\n\t{-8949334117415806646, 3},\n\t{-8946966673935406601, 1},\n\t{-8946767485005137880, 1},\n\t{-8945963928449701913, 2},\n\t{-8945371869540957686, 2},\n\t{-8945239009942620356, 3},\n\t{-8945116731660105709, 3},\n\t{-8944475579543785027, 3},\n\t{-8943692667418059849, 0},\n\t{-8942901061978944084, 1},\n\t{-8939748931550317608, 3},\n\t{-8938679124732642340, 0},\n\t{-8937971959266143183, 1},\n\t{-8937104296003173554, 2},\n\t{-8934283921670948718, 0},\n\t{-8933054975554443806, 1},\n\t{-8931482356358984226, 3},\n\t{-8928959646490609172, 1},\n\t{-8928387227904079120, 1},\n\t{-8927549807600016124, 2},\n\t{-8926697058461788553, 3},\n\t{-8925747283359504181, 0},\n\t{-8923380316857811053, 2},\n\t{-8922582611698436293, 3},\n\t{-8918171797824175287, 3},\n\t{-8915814954952854897, 1},\n\t{-8912752475837100265, 3},\n\t{-8912540227104816085, 0},\n\t{-8911066216677875757, 1},\n\t{-8910551173422668769, 1},\n\t{-8908802930504231128, 3},\n\t{-8904966686288057759, 2},\n\t{-8904372279862096262, 3},\n\t{-8903824986915019470, 3},\n\t{-8903360949336071968, 0},\n\t{-8898438179411912073, 0},\n\t{-8896914538224349415, 1},\n\t{-8896360492911583677, 2},\n\t{-8895591046743457495, 3},\n\t{-8891175580066418563, 3},\n\t{-8890180610327932722, 3},\n\t{-8888744564811465701, 1},\n\t{-8888564657618562037, 1},\n\t{-8886682084755450990, 3},\n\t{-8882606779991583263, 2},\n\t{-8882062035164834986, 3},\n\t{-8877728006448550298, 2},\n\t{-8876462831121647619, 0},\n\t{-8873585190160663864, 2},\n\t{-8869785478182536414, 2},\n\t{-8868029989929923849, 3},\n\t{-8865831648690043653, 1},\n\t{-8860825946126019428, 2},\n\t{-8859003956954505643, 3},\n\t{-8858001676873636536, 0},\n\t{-8854951543355195101, 3},\n\t{-8849938990466738405, 3},\n\t{-8849704314664442652, 3},\n\t{-8847866554259590809, 1},\n\t{-8846974795410482266, 2},\n\t{-8845685019202984672, 3},\n\t{-8844882533362836650, 0},\n\t{-8844659956697746942, 0},\n\t{-8844257545131977123, 0},\n\t{-8843034454487328123, 1},\n\t{-8842601616960620517, 2},\n\t{-8839039456611521855, 1},\n\t{-8838859987399394830, 1},\n\t{-8833124622276138633, 2},\n\t{-8830032519419637331, 1},\n\t{-8822439709392298791, 0},\n\t{-8821427899115331156, 0},\n\t{-8819951120330048172, 2},\n\t{-8815270273792620404, 2},\n\t{-8813833124794343910, 3},\n\t{-8812664555648833579, 0},\n\t{-8811727670611115180, 1},\n\t{-8808672535964284764, 0},\n\t{-8808601300750787698, 0},\n\t{-8808031375963918595, 0},\n\t{-8807968659296814722, 0},\n\t{-8806675296706545492, 2},\n\t{-8806101794329977984, 2},\n\t{-8801970776860053540, 2},\n\t{-8798628438609891883, 1},\n\t{-8789553083015140754, 1},\n\t{-8789494705227999890, 1},\n\t{-8787034000357658753, 3},\n\t{-8775385252264980252, 1},\n\t{-8772832906275798096, 0},\n\t{-8772495734273833210, 0},\n\t{-8772174643258493699, 0},\n\t{-8771653192088662075, 1},\n\t{-8769354943203848641, 3},\n\t{-8768569886866708171, 3},\n\t{-8766495561833593642, 1},\n\t{-8765537918602003531, 2},\n\t{-8764303545778534735, 3},\n\t{-8761220513767182658, 2},\n\t{-8761164659607525373, 2},\n\t{-8758942732260072898, 0},\n\t{-8756882226765508222, 2},\n\t{-8755213741187638266, 3},\n\t{-8755016822125610320, 3},\n\t{-8753980144030598881, 0},\n\t{-8751047520592856827, 3},\n\t{-8750693121820192812, 3},\n\t{-8749876757270066354, 0},\n\t{-8749082587872467859, 1},\n\t{-8747183699980377729, 2},\n\t{-8746837533061351316, 3},\n\t{-8745902267294206290, 0},\n\t{-8744901735518734872, 0},\n\t{-8744279366062525256, 1},\n\t{-8744242917179719505, 1},\n\t{-8742589525335533309, 3},\n\t{-8738918780723998982, 2},\n\t{-8738640371071776390, 2},\n\t{-8738320438179690851, 2},\n\t{-8737913853561915873, 3},\n\t{-8737654491999137154, 3},\n\t{-8736425500288678266, 0},\n\t{-8735511428551356372, 1},\n\t{-8734226567915098893, 2},\n\t{-8732656541702377155, 3},\n\t{-8730780887984762083, 1},\n\t{-8730032953181465675, 2},\n\t{-8729728248134888651, 2},\n\t{-8729537205320247895, 2},\n\t{-8726631609258350604, 1},\n\t{-8719673675991649792, 3},\n\t{-8715942829879108231, 2},\n\t{-8713817866637997581, 0},\n\t{-8713235141188388676, 1},\n\t{-8710026033658477134, 3},\n\t{-8709455175357331570, 0},\n\t{-8708478959256766071, 1},\n\t{-8705845965690512004, 3},\n\t{-8702764679476038089, 2},\n\t{-8702294914617418886, 2},\n\t{-8698219858997879183, 2},\n\t{-8695842967577853796, 0},\n\t{-8694289930557945258, 1},\n\t{-8693343273833565723, 2},\n\t{-8691799375689065662, 0},\n\t{-8690008299103932739, 1},\n\t{-8686822069518232256, 0},\n\t{-8686553803468375282, 0},\n\t{-8684269390224206829, 2},\n\t{-8681804423163734411, 1},\n\t{-8677003794839760523, 1},\n\t{-8672349742562912316, 1},\n\t{-8669333074300031356, 0},\n\t{-8668870770756856653, 0},\n\t{-8668772057914638282, 0},\n\t{-8661513751550755684, 3},\n\t{-8660407108660952693, 0},\n\t{-8654356242011138044, 1},\n\t{-8653688090610516442, 1},\n\t{-8650756654208074358, 0},\n\t{-8649474919300467994, 1},\n\t{-8643829250806418031, 2},\n\t{-8642446847437341342, 3},\n\t{-8640485967118393069, 1},\n\t{-8638398587362786176, 3},\n\t{-8634498893535145030, 3},\n\t{-8631793409250861568, 1},\n\t{-8630678691436498925, 2},\n\t{-8629806435278869430, 3},\n\t{-8628844787156112881, 0},\n\t{-8628451325709326965, 0},\n\t{-8627445306253582442, 1},\n\t{-8625210573491404264, 3},\n\t{-8624038236276653656, 0},\n\t{-8623365200431438942, 0},\n\t{-8622885554884634024, 1},\n\t{-8622176728713857632, 1},\n\t{-8622087440407858116, 2},\n\t{-8620819812742745793, 3},\n\t{-8619341306075018491, 0},\n\t{-8618367675718400367, 1},\n\t{-8617788353003901138, 1},\n\t{-8616669637118265485, 2},\n\t{-8613485937573063009, 1},\n\t{-8613196177022776082, 1},\n\t{-8609180518710886544, 1},\n\t{-8604283912744699815, 1},\n\t{-8601757538878082634, 0},\n\t{-8599932959975673411, 1},\n\t{-8598683624458050255, 2},\n\t{-8597972294187660525, 3},\n\t{-8590473406867427513, 2},\n\t{-8583864590204372642, 3},\n\t{-8582803546476954912, 0},\n\t{-8582588166754404233, 1},\n\t{-8582515637205675089, 1},\n\t{-8579055303790739373, 0},\n\t{-8577547012485896127, 1},\n\t{-8573937138034522235, 0},\n\t{-8573655228765444718, 1},\n\t{-8573379270252509593, 1},\n\t{-8572086417016232524, 2},\n\t{-8568585774495497786, 1},\n\t{-8568438303436956086, 1},\n\t{-8567484190854003453, 2},\n\t{-8566370499098902907, 3},\n\t{-8565099365536511691, 0},\n\t{-8564295532138797541, 1},\n\t{-8562432275188785766, 3},\n\t{-8561569892018217133, 3},\n\t{-8560590287561398043, 0},\n\t{-8560347848166038849, 0},\n\t{-8559319822564321006, 1},\n\t{-8557058741648472552, 3},\n\t{-8555874984190908568, 0},\n\t{-8554443619930992800, 2},\n\t{-8553004642340115293, 3},\n\t{-8552467776436301709, 3},\n\t{-8551711348753821057, 0},\n\t{-8551649181508365590, 0},\n\t{-8549496230554455523, 2},\n\t{-8549167782466536956, 2},\n\t{-8548665128497669648, 3},\n\t{-8547692886936802752, 0},\n\t{-8545970590581923360, 1},\n\t{-8542036410629753097, 1},\n\t{-8540465467042529887, 2},\n\t{-8539889868690973983, 3},\n\t{-8539791775427497845, 3},\n\t{-8539582955068356044, 3},\n\t{-8539073847134826047, 3},\n\t{-8533287058408907839, 0},\n\t{-8533083406846856996, 1},\n\t{-8532946484795827615, 1},\n\t{-8532203920559643847, 1},\n\t{-8530587345278308289, 3},\n\t{-8524837578608761942, 0},\n\t{-8521118904139218716, 3},\n\t{-8520455435863883396, 0},\n\t{-8519137813822130633, 1},\n\t{-8514806405479319244, 1},\n\t{-8514796186603935400, 1},\n\t{-8514050721846210610, 2},\n\t{-8513853483467452066, 2},\n\t{-8512439189184951872, 3},\n\t{-8509523127630632020, 2},\n\t{-8506436841734745781, 0},\n\t{-8505035946016406028, 2},\n\t{-8504262047033039069, 2},\n\t{-8502506709139732540, 0},\n\t{-8501410389981439153, 1},\n\t{-8500949573468944775, 1},\n\t{-8499332519511648440, 3},\n\t{-8498559063575674910, 3},\n\t{-8497974368011647622, 0},\n\t{-8496648776118834308, 1},\n\t{-8495045114587314715, 2},\n\t{-8490103713184203144, 3},\n\t{-8485461275243533430, 3},\n\t{-8484091542298091675, 0},\n\t{-8483698769767406782, 0},\n\t{-8482007217409229133, 2},\n\t{-8481522505374571945, 2},\n\t{-8480329620741332569, 3},\n\t{-8479540725713627483, 0},\n\t{-8475279295355581427, 0},\n\t{-8473828066928014798, 1},\n\t{-8473751669843379927, 1},\n\t{-8471514019531691804, 3},\n\t{-8470694603663626540, 0},\n\t{-8469899105534108482, 1},\n\t{-8467346370116500732, 3},\n\t{-8465046400200259596, 1},\n\t{-8464728709759645268, 1},\n\t{-8463572192211102153, 2},\n\t{-8459795533268608644, 2},\n\t{-8459785543572838456, 2},\n\t{-8459280090157509505, 2},\n\t{-8458458385762615566, 3},\n\t{-8455325930966454216, 2},\n\t{-8454960839471478619, 2},\n\t{-8449043774552522263, 3},\n\t{-8445506221730907556, 2},\n\t{-8441400010740778171, 2},\n\t{-8440771700544956085, 3},\n\t{-8440688495120230302, 3},\n\t{-8435070196955213200, 0},\n\t{-8434907410219775733, 0},\n\t{-8432371535523876839, 2},\n\t{-8432363831625304734, 2},\n\t{-8431798484932836316, 3},\n\t{-8428774134953824393, 1},\n\t{-8421099371731028715, 0},\n\t{-8419354185222706444, 2},\n\t{-8417120049028743807, 0},\n\t{-8416497296990146903, 0},\n\t{-8415683328128979423, 1},\n\t{-8415127921297226828, 1},\n\t{-8413083173323803051, 3},\n\t{-8412069940792355445, 0},\n\t{-8411382930996905407, 1},\n\t{-8409784240205182299, 2},\n\t{-8408490719920727576, 3},\n\t{-8405009540046335716, 2},\n\t{-8403634009763728108, 0},\n\t{-8402123526929444170, 1},\n\t{-8401598368850656636, 1},\n\t{-8399748517516631169, 3},\n\t{-8398145746429616858, 0},\n\t{-8395999254791712562, 2},\n\t{-8395260530766916784, 3},\n\t{-8394478841990517560, 0},\n\t{-8388413583856715057, 1},\n\t{-8387672882787740834, 2},\n\t{-8387168430284168819, 2},\n\t{-8387094803912313492, 2},\n\t{-8387074512141759261, 2},\n\t{-8384893706871858624, 0},\n\t{-8383985495630390913, 1},\n\t{-8383894456009207223, 1},\n\t{-8382573019622317730, 2},\n\t{-8377619854364117982, 3},\n\t{-8376488801924003757, 0},\n\t{-8373913399574645799, 2},\n\t{-8373419214044244059, 2},\n\t{-8369032425975313013, 2},\n\t{-8368549845846954257, 3},\n\t{-8367858159285207899, 3},\n\t{-8367610095872028000, 0},\n\t{-8367549184173066531, 0},\n\t{-8367120991245542759, 0},\n\t{-8364321713298490471, 2},\n\t{-8361884287202140375, 1},\n\t{-8358860115010827712, 3},\n\t{-8357430617580290143, 1},\n\t{-8353180418110558552, 0},\n\t{-8352763372689441011, 1},\n\t{-8351537537669063019, 2},\n\t{-8351006652984738107, 2},\n\t{-8350024552130708141, 3},\n\t{-8346545786916311365, 2},\n\t{-8345540821347024540, 3},\n\t{-8342112292946974880, 2},\n\t{-8339040728487871104, 1},\n\t{-8339005856507062736, 1},\n\t{-8338854521362905181, 1},\n\t{-8338094180190166070, 2},\n\t{-8336580678769266918, 3},\n\t{-8336299032910419213, 3},\n\t{-8336051024809654132, 0},\n\t{-8334422891159978946, 1},\n\t{-8326290797705283744, 0},\n\t{-8325342960557061012, 1},\n\t{-8323042734129659215, 3},\n\t{-8322671511628216748, 3},\n\t{-8320255622010060895, 2},\n\t{-8319596953646915440, 2},\n\t{-8317456214049413758, 0},\n\t{-8316690175436868029, 1},\n\t{-8316652874708819614, 1},\n\t{-8316316880915081047, 1},\n\t{-8316197264741883055, 1},\n\t{-8315477624831318816, 2},\n\t{-8315169655343619250, 2},\n\t{-8313298616084598611, 0},\n\t{-8312195898846365046, 1},\n\t{-8309808854855876136, 3},\n\t{-8308355491273903660, 0},\n\t{-8307566336394387808, 1},\n\t{-8305759486226031547, 3},\n\t{-8305220119003647557, 3},\n\t{-8304589256831767464, 0},\n\t{-8303614282647025208, 0},\n\t{-8303110787469750596, 1},\n\t{-8301969265617118971, 2},\n\t{-8301431163645948800, 2},\n\t{-8298736404649928953, 1},\n\t{-8296627456093501180, 3},\n\t{-8296449349543571877, 3},\n\t{-8294875504603859921, 0},\n\t{-8294723474216249353, 0},\n\t{-8290965418788732930, 0},\n\t{-8290644590637034734, 0},\n\t{-8289033544838143166, 1},\n\t{-8288998612620410954, 1},\n\t{-8287215240968950425, 3},\n\t{-8283449255035247719, 2},\n\t{-8282375366710057449, 3},\n\t{-8278828103680226284, 2},\n\t{-8278552617328622388, 3},\n\t{-8275995731875641701, 1},\n\t{-8273111408424141355, 0},\n\t{-8273052696125238590, 0},\n\t{-8270766545201551571, 2},\n\t{-8270259233929505281, 2},\n\t{-8266783678487798757, 1},\n\t{-8265748113882696357, 2},\n\t{-8265640029804085770, 2},\n\t{-8264960494676468461, 3},\n\t{-8264586923374603149, 3},\n\t{-8262958171224427210, 1},\n\t{-8256103142222743001, 3},\n\t{-8255461772141702609, 3},\n\t{-8247061437346180334, 3},\n\t{-8243938201190679083, 1},\n\t{-8239704930383216851, 1},\n\t{-8237233854167132587, 3},\n\t{-8234515644965821638, 2},\n\t{-8233213657486196085, 3},\n\t{-8231943422673463045, 0},\n\t{-8223303916461868061, 0},\n\t{-8222108409741375147, 1},\n\t{-8221546357842636370, 1},\n\t{-8221528922157970495, 1},\n\t{-8220999818478773849, 2},\n\t{-8220598297367534642, 2},\n\t{-8220195410468954025, 2},\n\t{-8218810134276172915, 0},\n\t{-8218424901606405428, 0},\n\t{-8215586791933333747, 3},\n\t{-8211197630521909247, 2},\n\t{-8209093341571548698, 0},\n\t{-8207766192162305292, 2},\n\t{-8205119319121463289, 0},\n\t{-8203698733632199750, 1},\n\t{-8203628927328766437, 1},\n\t{-8201925780717530320, 3},\n\t{-8200136513863707575, 0},\n\t{-8194683581531803428, 1},\n\t{-8193982101216933743, 2},\n\t{-8193667489132494997, 2},\n\t{-8193572047985582129, 2},\n\t{-8192911466999677781, 3},\n\t{-8192185008131646249, 3},\n\t{-8189951415467077951, 1},\n\t{-8187130076508936924, 0},\n\t{-8186116272708412686, 1},\n\t{-8183440858276076728, 3},\n\t{-8180952284103274279, 1},\n\t{-8179380470230486498, 3},\n\t{-8173807306347547330, 0},\n\t{-8172171197867775285, 1},\n\t{-8169425467099585043, 0},\n\t{-8169203381757641145, 0},\n\t{-8167003379306200572, 2},\n\t{-8166168691558738770, 2},\n\t{-8166007710971503662, 3},\n\t{-8165476724478035500, 3},\n\t{-8163128774244699711, 1},\n\t{-8161078427841652309, 3},\n\t{-8160686924256522793, 3},\n\t{-8160514661736768058, 0},\n\t{-8158364274569363963, 1},\n\t{-8157976470872947127, 2},\n\t{-8157736753134290423, 2},\n\t{-8157491123616025879, 2},\n\t{-8150186264403995132, 1},\n\t{-8149758591835234130, 1},\n\t{-8149619838857454522, 1},\n\t{-8149479037624232307, 1},\n\t{-8149128021065051280, 2},\n\t{-8145324027774362378, 1},\n\t{-8144644007658534241, 2},\n\t{-8143188420527983546, 3},\n\t{-8143018037088803741, 3},\n\t{-8142247589362881634, 0},\n\t{-8138337625745378051, 3},\n\t{-8136807372149818542, 1},\n\t{-8129864702238010737, 3},\n\t{-8126755579614410814, 1},\n\t{-8126246140550618846, 2},\n\t{-8124207955818292845, 0},\n\t{-8120287786937763361, 3},\n\t{-8118807516152413619, 1},\n\t{-8114065141929769781, 1},\n\t{-8113731542609340577, 1},\n\t{-8110437254484803155, 0},\n\t{-8106964496292752676, 3},\n\t{-8104651571181294011, 1},\n\t{-8100584857928968960, 1},\n\t{-8099009219653496977, 2},\n\t{-8098588230467470125, 3},\n\t{-8095667600947641688, 1},\n\t{-8095646378979037890, 1},\n\t{-8092954289066200413, 0},\n\t{-8092390129237554639, 0},\n\t{-8088876253718557598, 3},\n\t{-8088315559490257673, 0},\n\t{-8087702294524216978, 0},\n\t{-8083355931455517436, 0},\n\t{-8082489000653926150, 1},\n\t{-8082402818656404057, 1},\n\t{-8081170769789281576, 2},\n\t{-8078553735343112288, 0},\n\t{-8077785987856351995, 1},\n\t{-8070500606084551350, 3},\n\t{-8070472479818134131, 3},\n\t{-8066238707937535916, 3},\n\t{-8065037361862400737, 0},\n\t{-8064375597626074579, 1},\n\t{-8061820775890735860, 3},\n\t{-8059336090042517652, 1},\n\t{-8059141974754427524, 2},\n\t{-8053099187153857152, 3},\n\t{-8046902469358790329, 0},\n\t{-8040120546131891062, 2},\n\t{-8039724830444517412, 3},\n\t{-8034405218402590186, 0},\n\t{-8032449261037029458, 1},\n\t{-8032110605872969129, 2},\n\t{-8029781628709824769, 0},\n\t{-8027126256806052396, 2},\n\t{-8026293319067632665, 3},\n\t{-8025484020483209259, 3},\n\t{-8024352892742505070, 0},\n\t{-8024334150394593621, 0},\n\t{-8023957809019968725, 1},\n\t{-8023821807599215005, 1},\n\t{-8021355074387850438, 3},\n\t{-8018626820857744053, 2},\n\t{-8017799262965255787, 2},\n\t{-8013498413404766176, 2},\n\t{-8012823788799540889, 3},\n\t{-8010064929144775679, 1},\n\t{-8006140495059837586, 1},\n\t{-8004190488731998497, 2},\n\t{-8002689860111014626, 0},\n\t{-8002248753638091732, 0},\n\t{-8000716191578504492, 1},\n\t{-7998073064202605212, 0},\n\t{-7997327973990558837, 0},\n\t{-7995870780743388171, 2},\n\t{-7992635613610837962, 1},\n\t{-7988744458505091489, 0},\n\t{-7987324815915822515, 1},\n\t{-7985447422447494243, 3},\n\t{-7985345534775578535, 3},\n\t{-7981832206554147257, 2},\n\t{-7980324694072893774, 0},\n\t{-7976693287690656996, 3},\n\t{-7975542422893311089, 0},\n\t{-7973687030757825828, 1},\n\t{-7973325363128557171, 2},\n\t{-7970852764926200285, 0},\n\t{-7970779282316506155, 0},\n\t{-7968119469071268318, 2},\n\t{-7965989310863216311, 0},\n\t{-7962496574859087105, 3},\n\t{-7961483364245672667, 0},\n\t{-7960817023828069909, 1},\n\t{-7960506093750069850, 1},\n\t{-7955570704870779330, 2},\n\t{-7954374529903310952, 3},\n\t{-7953885461175197051, 3},\n\t{-7953460103581126297, 3},\n\t{-7953216249345286292, 0},\n\t{-7952267944930043572, 0},\n\t{-7951693035429606922, 1},\n\t{-7950834041268878582, 2},\n\t{-7949668023231451945, 3},\n\t{-7947703822977921021, 1},\n\t{-7946337409887484401, 2},\n\t{-7945733347521513181, 2},\n\t{-7944717781978222404, 3},\n\t{-7942115815306396558, 1},\n\t{-7941983985633065542, 2},\n\t{-7941843043654188132, 2},\n\t{-7941343652311220382, 2},\n\t{-7940325285073302361, 3},\n\t{-7934926690315343476, 0},\n\t{-7931301959496434245, 3},\n\t{-7930364097488798670, 0},\n\t{-7927881575703674630, 2},\n\t{-7921856201819807125, 3},\n\t{-7917286439997527769, 0},\n\t{-7914934565301762430, 2},\n\t{-7914573765007474392, 2},\n\t{-7912899376876339458, 3},\n\t{-7910233769622991782, 2},\n\t{-7907820214829005224, 0},\n\t{-7906132536562408211, 1},\n\t{-7904941402001275627, 3},\n\t{-7904609972982593784, 3},\n\t{-7902099888536578567, 1},\n\t{-7897873526219194099, 1},\n\t{-7895495464357695402, 3},\n\t{-7892643771871154539, 1},\n\t{-7891931494989106148, 2},\n\t{-7885804659777714470, 3},\n\t{-7884919004001330791, 0},\n\t{-7884227075765356984, 1},\n\t{-7882930431715808577, 2},\n\t{-7882360420749882932, 3},\n\t{-7879697982714656294, 1},\n\t{-7878453878110052646, 2},\n\t{-7877784811230391590, 3},\n\t{-7876701405943677217, 0},\n\t{-7876380466207432883, 0},\n\t{-7875027439717362859, 1},\n\t{-7874288201479223924, 2},\n\t{-7872978173701415611, 3},\n\t{-7871455228270757474, 0},\n\t{-7869873280293211507, 2},\n\t{-7866644670530561229, 1},\n\t{-7865772788087043991, 1},\n\t{-7862753807120225300, 0},\n\t{-7861682998465459389, 1},\n\t{-7860156530441649360, 2},\n\t{-7854826274888546347, 3},\n\t{-7853115693683699219, 1},\n\t{-7851825953188822446, 2},\n\t{-7850384381874396299, 3},\n\t{-7848053055769255628, 1},\n\t{-7846180831178163940, 3},\n\t{-7844132380570303554, 1},\n\t{-7839383618088726663, 1},\n\t{-7838450008418096385, 2},\n\t{-7835340979145260523, 0},\n\t{-7834879838205002274, 1},\n\t{-7834721550147569919, 1},\n\t{-7834159489789374325, 1},\n\t{-7828174358691711687, 3},\n\t{-7827111423911915825, 0},\n\t{-7825160405744827929, 1},\n\t{-7823568714666188974, 3},\n\t{-7823058064208708539, 3},\n\t{-7820971560922921379, 1},\n\t{-7818149409220416537, 0},\n\t{-7817760746492805061, 0},\n\t{-7816876533723557700, 1},\n\t{-7814774301286679390, 3},\n\t{-7813939127787407977, 3},\n\t{-7813280774364608175, 0},\n\t{-7811921678218430528, 1},\n\t{-7809380002083508101, 3},\n\t{-7808618676106873172, 0},\n\t{-7807966406443809495, 1},\n\t{-7807205876735161744, 1},\n\t{-7797259105003095572, 2},\n\t{-7796260449738116705, 3},\n\t{-7794080179707686963, 1},\n\t{-7793932296539560961, 1},\n\t{-7793848964995052033, 1},\n\t{-7793594124935415606, 1},\n\t{-7793551395791736688, 1},\n\t{-7792490438861861870, 2},\n\t{-7785423911667768423, 1},\n\t{-7784021490804660866, 2},\n\t{-7782249671568911276, 3},\n\t{-7781671380889357378, 0},\n\t{-7777371381864307889, 0},\n\t{-7774595997679591041, 2},\n\t{-7774050549619834739, 3},\n\t{-7773872054281516820, 3},\n\t{-7772185972351577978, 0},\n\t{-7771665432510293098, 1},\n\t{-7771631245946568521, 1},\n\t{-7769194870688400314, 3},\n\t{-7765928522741201423, 2},\n\t{-7765598300688973433, 2},\n\t{-7763635279921807562, 0},\n\t{-7763144956411445346, 0},\n\t{-7762201638516290911, 1},\n\t{-7761785350382611535, 2},\n\t{-7755387254545997473, 3},\n\t{-7753034197013421892, 1},\n\t{-7751727826668026344, 3},\n\t{-7749964060672122974, 0},\n\t{-7748177469919360596, 2},\n\t{-7747654907169657295, 2},\n\t{-7746399002542767188, 3},\n\t{-7746011137284640072, 0},\n\t{-7745643539098322464, 0},\n\t{-7740095797463983663, 1},\n\t{-7739841299645566948, 1},\n\t{-7738547231463112062, 2},\n\t{-7738348936092451899, 2},\n\t{-7737579143497774513, 3},\n\t{-7737197715356175867, 3},\n\t{-7733099578905213766, 3},\n\t{-7731801278750838612, 0},\n\t{-7731116229990556253, 1},\n\t{-7730537662479118831, 1},\n\t{-7730072839208012435, 2},\n\t{-7728952079476078551, 3},\n\t{-7728408242308346087, 3},\n\t{-7724697852490240089, 3},\n\t{-7721183435990539102, 2},\n\t{-7716331903207141841, 2},\n\t{-7716105577982902248, 2},\n\t{-7713832375844466161, 0},\n\t{-7713295027695448756, 1},\n\t{-7711940686791312023, 2},\n\t{-7710538587176049956, 3},\n\t{-7709938987330016866, 0},\n\t{-7706049385623308933, 3},\n\t{-7704050807940704589, 1},\n\t{-7703879123810870944, 1},\n\t{-7702255845777537611, 3},\n\t{-7698466250930323173, 2},\n\t{-7697127437072468925, 3},\n\t{-7694845365599072732, 1},\n\t{-7693280720373436622, 2},\n\t{-7692064139413784352, 0},\n\t{-7691066508329632009, 0},\n\t{-7690089177007586387, 1},\n\t{-7685515592646238795, 1},\n\t{-7684868564941164525, 2},\n\t{-7681549608396735037, 1},\n\t{-7681289924393628930, 1},\n\t{-7680942099333869506, 1},\n\t{-7680562744198021819, 2},\n\t{-7680484778832324747, 2},\n\t{-7680139717364205342, 2},\n\t{-7675511291550547936, 2},\n\t{-7674684868966459365, 3},\n\t{-7672355118140828776, 1},\n\t{-7671980573437355806, 1},\n\t{-7670292711882707405, 3},\n\t{-7669383304241768918, 0},\n\t{-7668634370684207969, 0},\n\t{-7668427521231152733, 1},\n\t{-7667462791902095893, 1},\n\t{-7665995510776444737, 3},\n\t{-7663721130251120816, 1},\n\t{-7663407716123317373, 1},\n\t{-7660690089505698137, 3},\n\t{-7658609580293159254, 1},\n\t{-7658273149305138922, 2},\n\t{-7657968495827113939, 2},\n\t{-7656743483920160316, 3},\n\t{-7655086853913074549, 0},\n\t{-7652275185585428739, 3},\n\t{-7651663563074173674, 3},\n\t{-7649572935555404193, 1},\n\t{-7647463282213790976, 3},\n\t{-7647311758400010833, 3},\n\t{-7646657388757320970, 0},\n\t{-7643726882669486253, 3},\n\t{-7643269654907408171, 3},\n\t{-7642573395645746580, 0},\n\t{-7641624701402297523, 0},\n\t{-7633534238732450383, 0},\n\t{-7632810725834693743, 0},\n\t{-7632727583406990655, 0},\n\t{-7629886053918178666, 3},\n\t{-7622408165253721598, 1},\n\t{-7620359729475626044, 3},\n\t{-7619544826747441500, 0},\n\t{-7618005168644775859, 1},\n\t{-7611569210046022125, 3},\n\t{-7610085201496075116, 0},\n\t{-7607227581626135569, 3},\n\t{-7604891437199856147, 1},\n\t{-7604237573224217213, 2},\n\t{-7602474169368385043, 3},\n\t{-7599588765998697843, 2},\n\t{-7598670484928833357, 3},\n\t{-7596312723512963087, 1},\n\t{-7596301585837983948, 1},\n\t{-7591901425454689020, 1},\n\t{-7591265201391449981, 1},\n\t{-7590741430834099423, 2},\n\t{-7589981075046979219, 2},\n\t{-7589082418510972320, 3},\n\t{-7588297545645205845, 0},\n\t{-7587779253892574044, 0},\n\t{-7586266067489064939, 2},\n\t{-7578991577855542511, 0},\n\t{-7566915388914894643, 3},\n\t{-7566520002942106585, 3},\n\t{-7566101983133902182, 3},\n\t{-7560757067526793099, 0},\n\t{-7560471121158384855, 0},\n\t{-7560470657637083585, 0},\n\t{-7556116364839694042, 0},\n\t{-7552641539528901402, 3},\n\t{-7552544479599556095, 3},\n\t{-7551731016763091421, 0},\n\t{-7548497617522355640, 3},\n\t{-7547859112955371680, 0},\n\t{-7545569852061893506, 2},\n\t{-7543420314744007932, 0},\n\t{-7535171381064191416, 3},\n\t{-7534673974804393795, 3},\n\t{-7533569887213953281, 0},\n\t{-7533360556034376736, 1},\n\t{-7532241961069512317, 2},\n\t{-7526696533756584508, 2},\n\t{-7521825462681864943, 3},\n\t{-7521593636298229120, 3},\n\t{-7518312055483287728, 2},\n\t{-7517858198600330972, 2},\n\t{-7515979944547978395, 0},\n\t{-7512507461475127024, 3},\n\t{-7508954644057070829, 2},\n\t{-7507849086826712203, 3},\n\t{-7506489082667012668, 0},\n\t{-7503949509002093288, 3},\n\t{-7501855559511602182, 1},\n\t{-7500356053507891509, 2},\n\t{-7499356407379847183, 3},\n\t{-7497081829842644178, 1},\n\t{-7497013943899383700, 1},\n\t{-7496990568775955103, 1},\n\t{-7495223944998901987, 2},\n\t{-7494550171770361109, 3},\n\t{-7494064536477347636, 3},\n\t{-7492868655702746123, 0},\n\t{-7489002208588113683, 0},\n\t{-7488439262166615771, 0},\n\t{-7487740895096503232, 1},\n\t{-7484047186343380074, 0},\n\t{-7481917754636737579, 2},\n\t{-7479234415595162360, 1},\n\t{-7477376951330018236, 2},\n\t{-7476336351020487694, 3},\n\t{-7475996308745057900, 3},\n\t{-7475248318862206229, 0},\n\t{-7469381848148914946, 1},\n\t{-7466962550721270949, 0},\n\t{-7462418328782223524, 0},\n\t{-7460634311665536834, 1},\n\t{-7457605305917013309, 0},\n\t{-7456970692117565630, 0},\n\t{-7451095759082818745, 2},\n\t{-7450172384100755149, 2},\n\t{-7450086782004343641, 2},\n\t{-7449927587636755434, 3},\n\t{-7445219205161511012, 3},\n\t{-7444462906014988720, 3},\n\t{-7438299805042817268, 1},\n\t{-7436814199680356289, 2},\n\t{-7436255053152730801, 3},\n\t{-7435787148500924165, 3},\n\t{-7435174776550970408, 0},\n\t{-7434557089201418806, 0},\n\t{-7432982383438286781, 2},\n\t{-7432906420609666755, 2},\n\t{-7432844809800011307, 2},\n\t{-7429020473927981688, 1},\n\t{-7427768596567275769, 2},\n\t{-7424267790237246093, 1},\n\t{-7422741547464078852, 3},\n\t{-7422723616938787317, 3},\n\t{-7421008072013168983, 0},\n\t{-7418529895755143096, 3},\n\t{-7416826662577041073, 0},\n\t{-7413741776763884829, 3},\n\t{-7413335510583257715, 3},\n\t{-7408828329958861081, 3},\n\t{-7403601833953004855, 0},\n\t{-7400760265995484294, 2},\n\t{-7398057061005167831, 1},\n\t{-7397642702856021659, 1},\n\t{-7394602946214729100, 0},\n\t{-7394522806892590377, 0},\n\t{-7391905752806225901, 2},\n\t{-7386573594494892035, 3},\n\t{-7385372136438356481, 0},\n\t{-7384857237623907443, 0},\n\t{-7384319973339250282, 1},\n\t{-7384221104548414778, 1},\n\t{-7383355554840233492, 2},\n\t{-7381877812575104441, 3},\n\t{-7380933923067355735, 0},\n\t{-7377024704699264999, 3},\n\t{-7376718558837285694, 0},\n\t{-7376657206155865939, 0},\n\t{-7371657027925604611, 0},\n\t{-7371170921961388748, 1},\n\t{-7367556751295685508, 0},\n\t{-7366599053879878519, 1},\n\t{-7365533437689267431, 2},\n\t{-7357769122981123216, 0},\n\t{-7356524743152382329, 2},\n\t{-7351086221196239200, 2},\n\t{-7350174473442059218, 3},\n\t{-7347139605035414656, 2},\n\t{-7346556748293810686, 2},\n\t{-7339950496735599076, 0},\n\t{-7339326132986362114, 1},\n\t{-7337693661483376532, 2},\n\t{-7335882684517088008, 0},\n\t{-7334146427657464301, 1},\n\t{-7334075305089381704, 2},\n\t{-7331263471527853572, 0},\n\t{-7328516542055458577, 2},\n\t{-7325077059887644351, 2},\n\t{-7324232570291438735, 2},\n\t{-7321556248582414990, 1},\n\t{-7316302795767765118, 1},\n\t{-7308050018745371185, 1},\n\t{-7306032579581698770, 2},\n\t{-7305257421776272638, 3},\n\t{-7304557467583338379, 0},\n\t{-7303294128599744041, 1},\n\t{-7299462945423595155, 0},\n\t{-7298403559371907041, 1},\n\t{-7297647431181106659, 2},\n\t{-7296976238423384452, 2},\n\t{-7290440884884584716, 0},\n\t{-7286310379290086957, 0},\n\t{-7283845012868637984, 2},\n\t{-7283748467267468512, 2},\n\t{-7282933452920384904, 3},\n\t{-7282882213175962113, 3},\n\t{-7282726466488193668, 3},\n\t{-7282649474331948995, 3},\n\t{-7282216763919916004, 0},\n\t{-7278744586710677386, 3},\n\t{-7277905979007774889, 3},\n\t{-7273849588486140939, 3},\n\t{-7273708428338148277, 3},\n\t{-7271599211692026140, 1},\n\t{-7271278502222449881, 1},\n\t{-7265630799292543245, 2},\n\t{-7262214302212097483, 1},\n\t{-7258924430515878578, 0},\n\t{-7258341144218849815, 1},\n\t{-7255035402635558476, 0},\n\t{-7253311941672531862, 1},\n\t{-7253110720377960232, 1},\n\t{-7250050917922664961, 0},\n\t{-7248647522678551327, 1},\n\t{-7246717854580392576, 3},\n\t{-7243051044161949858, 2},\n\t{-7242905632840755339, 3},\n\t{-7238758885705931359, 2},\n\t{-7237292616476807856, 3},\n\t{-7231501938654758539, 1},\n\t{-7231486162736521500, 1},\n\t{-7231462416148937458, 1},\n\t{-7229454270309182650, 2},\n\t{-7228551434945991524, 3},\n\t{-7227234249068800594, 0},\n\t{-7225669950936650344, 2},\n\t{-7223418401567346313, 0},\n\t{-7222808170590301362, 0},\n\t{-7221004787933409463, 2},\n\t{-7214749678489349850, 0},\n\t{-7209393644421858129, 0},\n\t{-7207691134757919293, 2},\n\t{-7205597440305934506, 0},\n\t{-7204227410191801032, 1},\n\t{-7200565058834996949, 0},\n\t{-7199273504671936358, 1},\n\t{-7197281336986766950, 3},\n\t{-7196877669425387864, 3},\n\t{-7194950831844046343, 1},\n\t{-7192886698799613238, 3},\n\t{-7190591846886263164, 1},\n\t{-7190413781197847668, 1},\n\t{-7186777650291192945, 0},\n\t{-7186162404463873368, 1},\n\t{-7184657777827709219, 2},\n\t{-7183761432055077457, 3},\n\t{-7183284297892615766, 3},\n\t{-7182896287045052491, 0},\n\t{-7179981051467129472, 2},\n\t{-7175305387402304924, 3},\n\t{-7174507628514245047, 3},\n\t{-7173895258267958047, 0},\n\t{-7173444954313401290, 0},\n\t{-7173326451538569253, 0},\n\t{-7172947036856277462, 1},\n\t{-7170799601780680947, 3},\n\t{-7170787011754503264, 3},\n\t{-7170723543925850895, 3},\n\t{-7167739093618756438, 1},\n\t{-7164546404973077237, 0},\n\t{-7164451266049387909, 0},\n\t{-7164374129055247405, 0},\n\t{-7162572829968807800, 2},\n\t{-7161393507582785731, 3},\n\t{-7161179619534233588, 3},\n\t{-7160316710125094997, 0},\n\t{-7158151898532794741, 2},\n\t{-7156678131449587982, 3},\n\t{-7155956544587615449, 0},\n\t{-7155444042079914075, 0},\n\t{-7155433816363925663, 0},\n\t{-7154355310132502764, 1},\n\t{-7152785470957936838, 3},\n\t{-7150789376019568667, 0},\n\t{-7149507981894962189, 1},\n\t{-7149392681350978944, 2},\n\t{-7147366867943006415, 3},\n\t{-7147240747177985569, 3},\n\t{-7139848333144823921, 2},\n\t{-7139608418394846766, 2},\n\t{-7139131737791243843, 3},\n\t{-7138124335193217774, 0},\n\t{-7134221825764623244, 3},\n\t{-7133731041475885991, 3},\n\t{-7130160008508673468, 3},\n\t{-7129086363622898385, 0},\n\t{-7126632274838341040, 2},\n\t{-7126629832257075332, 2},\n\t{-7125697875181621568, 3},\n\t{-7121563629031344122, 2},\n\t{-7117615918298289600, 2},\n\t{-7116928599625160236, 2},\n\t{-7116522127744902326, 3},\n\t{-7115663873485689946, 0},\n\t{-7115023936172587141, 0},\n\t{-7113178507723511270, 2},\n\t{-7112861987496530169, 2},\n\t{-7109992380766067596, 1},\n\t{-7108809445447329207, 2},\n\t{-7108754721650855371, 2},\n\t{-7105735950686544948, 0},\n\t{-7104032785860948829, 2},\n\t{-7103704764580401125, 2},\n\t{-7100369672270449867, 1},\n\t{-7099384693957328672, 2},\n\t{-7097252315782952082, 0},\n\t{-7094568751480404166, 2},\n\t{-7090988674857736112, 1},\n\t{-7090618429146118706, 2},\n\t{-7090055669493233434, 2},\n\t{-7087977425336368518, 0},\n\t{-7084788152046588315, 3},\n\t{-7081837350751310482, 2},\n\t{-7080816226208123532, 2},\n\t{-7073774124269649069, 1},\n\t{-7067570781685192808, 2},\n\t{-7067092685997808537, 3},\n\t{-7066682616614494386, 3},\n\t{-7065828702461009970, 0},\n\t{-7064527941369366415, 1},\n\t{-7062264674312153974, 3},\n\t{-7061196050477031074, 0},\n\t{-7058856521768192874, 2},\n\t{-7058170064942405879, 3},\n\t{-7057343502674040662, 3},\n\t{-7056080956168825000, 0},\n\t{-7054217101155765915, 2},\n\t{-7053026116236305029, 3},\n\t{-7050388470879780610, 1},\n\t{-7050068659621904364, 2},\n\t{-7047788252254184504, 0},\n\t{-7044566070593902579, 3},\n\t{-7041520375533868531, 1},\n\t{-7039565572552413074, 3},\n\t{-7038515241722310587, 0},\n\t{-7038046786757607254, 0},\n\t{-7037255905809320951, 1},\n\t{-7036560852421995331, 2},\n\t{-7034673341431048482, 3},\n\t{-7034193572155587960, 0},\n\t{-7032941450655817785, 1},\n\t{-7026989704396571067, 2},\n\t{-7022837130142310158, 2},\n\t{-7022750172422450180, 2},\n\t{-7021521067927726436, 3},\n\t{-7021286551888271850, 3},\n\t{-7020804438073087736, 0},\n\t{-7019096248039430202, 1},\n\t{-7015177723741709715, 1},\n\t{-7012752879079057654, 3},\n\t{-7010186207550039178, 1},\n\t{-7009747483944313205, 2},\n\t{-7009020201051453396, 2},\n\t{-7007132800397069053, 0},\n\t{-7007001094496596446, 0},\n\t{-7006497400750396348, 0},\n\t{-6998767601890521924, 3},\n\t{-6993306866832654932, 0},\n\t{-6991444975733544978, 2},\n\t{-6991186466584406387, 2},\n\t{-6990962638370173482, 2},\n\t{-6987291144830204080, 2},\n\t{-6985591466354193759, 3},\n\t{-6983976584358990546, 0},\n\t{-6983531924416226728, 1},\n\t{-6981877696337775221, 2},\n\t{-6978908133159843540, 1},\n\t{-6977444310291547672, 2},\n\t{-6977028095309674060, 3},\n\t{-6975584108393929539, 0},\n\t{-6972644089185790661, 3},\n\t{-6972259927345838412, 3},\n\t{-6972004782936715847, 3},\n\t{-6970867686287908840, 0},\n\t{-6969870711992353933, 1},\n\t{-6968783043651178551, 2},\n\t{-6967299681242205982, 3},\n\t{-6966384669914971543, 0},\n\t{-6964755158194156885, 2},\n\t{-6961720774803234500, 0},\n\t{-6961641437890217240, 0},\n\t{-6961517782218093253, 0},\n\t{-6961469917897685172, 0},\n\t{-6960725118479664210, 1},\n\t{-6956419355754849549, 1},\n\t{-6953989651934172472, 3},\n\t{-6951274487698281227, 2},\n\t{-6950171709337752837, 3},\n\t{-6950000658174796331, 3},\n\t{-6949158367877261371, 3},\n\t{-6947746032680390427, 1},\n\t{-6946725152830415331, 2},\n\t{-6945242062240517469, 3},\n\t{-6943441574797612512, 0},\n\t{-6941856327972159009, 2},\n\t{-6941377050327390181, 2},\n\t{-6939946814247903714, 0},\n\t{-6934565593884852335, 0},\n\t{-6933204182963180839, 2},\n\t{-6931050389490557231, 3},\n\t{-6930812536426342199, 0},\n\t{-6924310740580351990, 1},\n\t{-6923227897201298409, 2},\n\t{-6921985847822447357, 0},\n\t{-6921860225947264735, 0},\n\t{-6919744354322491337, 2},\n\t{-6917373244500615974, 0},\n\t{-6915858189103129671, 1},\n\t{-6915052870012368694, 2},\n\t{-6913400606665765821, 3},\n\t{-6907503500910832172, 0},\n\t{-6904878688066422647, 3},\n\t{-6902849012335252068, 1},\n\t{-6902288212182879230, 1},\n\t{-6900170369213001226, 3},\n\t{-6899467874873806359, 0},\n\t{-6888743429760184621, 1},\n\t{-6884866039735341988, 1},\n\t{-6882856472575557905, 2},\n\t{-6880879564552318441, 0},\n\t{-6875791902404425353, 1},\n\t{-6870575731095334169, 1},\n\t{-6868407865188205743, 3},\n\t{-6868265883684808629, 3},\n\t{-6867377613317140587, 0},\n\t{-6866182931887709300, 1},\n\t{-6865513466846010928, 2},\n\t{-6865401455876770805, 2},\n\t{-6865057393455238633, 2},\n\t{-6864700673968546779, 2},\n\t{-6864207506670221508, 3},\n\t{-6863277907951517867, 0},\n\t{-6862062847142697450, 1},\n\t{-6860906693413172948, 2},\n\t{-6859497996029273613, 3},\n\t{-6858064002933077311, 0},\n\t{-6857050327266745189, 1},\n\t{-6849039686762461107, 0},\n\t{-6844907218643915246, 0},\n\t{-6844394904837766950, 0},\n\t{-6838743321761321902, 1},\n\t{-6838585598965727961, 2},\n\t{-6838048013292878356, 2},\n\t{-6834046751483542391, 2},\n\t{-6833680150854797160, 2},\n\t{-6832087776190079215, 3},\n\t{-6830716738502255278, 1},\n\t{-6830328417467790861, 1},\n\t{-6830012044549900414, 1},\n\t{-6828207144802685998, 3},\n\t{-6828087243069919038, 3},\n\t{-6828064420747915164, 3},\n\t{-6827238943304168747, 0},\n\t{-6827151486528237204, 0},\n\t{-6826073336896416334, 1},\n\t{-6825752175434553702, 1},\n\t{-6821157867965945495, 1},\n\t{-6820761116525759498, 1},\n\t{-6819229804648378826, 3},\n\t{-6818312533517750143, 0},\n\t{-6815126457389527682, 2},\n\t{-6812645139602253043, 1},\n\t{-6812497847266549486, 1},\n\t{-6811320387455758249, 2},\n\t{-6809996512675770261, 3},\n\t{-6808562785420373011, 0},\n\t{-6808225072569239527, 1},\n\t{-6807305782631681189, 1},\n\t{-6806459857822381122, 2},\n\t{-6806418841020236219, 2},\n\t{-6805081141332636843, 3},\n\t{-6803741024943841129, 1},\n\t{-6799383075546240611, 0},\n\t{-6796480424201323230, 3},\n\t{-6786455746626068843, 0},\n\t{-6786126735777108974, 0},\n\t{-6786090945480774262, 0},\n\t{-6785265254524789424, 1},\n\t{-6781943141017326846, 0},\n\t{-6781352357061165548, 0},\n\t{-6775919642850837836, 1},\n\t{-6773984304220719101, 3},\n\t{-6771914178185133715, 1},\n\t{-6771330334984705183, 1},\n\t{-6768452040300922908, 0},\n\t{-6767274134170451117, 1},\n\t{-6764669514921254403, 3},\n\t{-6762098567347597396, 2},\n\t{-6758373873301264852, 1},\n\t{-6758060122668637663, 1},\n\t{-6757220109891788205, 2},\n\t{-6756228706178354511, 3},\n\t{-6755090411825565343, 0},\n\t{-6753150292571846119, 1},\n\t{-6752511061798576539, 2},\n\t{-6751886343274326098, 3},\n\t{-6751007667347572737, 3},\n\t{-6748851544696594100, 1},\n\t{-6748718347421941309, 1},\n\t{-6744087546846332053, 2},\n\t{-6743377419958495680, 2},\n\t{-6742673255926970001, 3},\n\t{-6742643286169076485, 3},\n\t{-6741096085435582261, 0},\n\t{-6739735813616739415, 1},\n\t{-6739527784091321069, 2},\n\t{-6737868873389138832, 3},\n\t{-6737704631000508705, 3},\n\t{-6734446102347509321, 2},\n\t{-6734335707818795683, 2},\n\t{-6731981768517727835, 0},\n\t{-6731197932316706765, 1},\n\t{-6729829327957380214, 2},\n\t{-6724517494527357612, 3},\n\t{-6721306548855749173, 2},\n\t{-6718320174832526973, 0},\n\t{-6718309834221153142, 0},\n\t{-6716342147434951176, 2},\n\t{-6714393889281358615, 0},\n\t{-6713325845803953102, 1},\n\t{-6710648244041542975, 3},\n\t{-6707360887093597324, 2},\n\t{-6704490925642341814, 1},\n\t{-6703766776590934004, 1},\n\t{-6700613299245164135, 0},\n\t{-6698069856676001280, 2},\n\t{-6696552701277188729, 0},\n\t{-6695909936695340826, 0},\n\t{-6695264219163230084, 1},\n\t{-6694029941279898028, 2},\n\t{-6693465998643829650, 3},\n\t{-6690548925577575225, 1},\n\t{-6690149474014233837, 1},\n\t{-6689073478480157408, 2},\n\t{-6687985977460349245, 3},\n\t{-6685999510281546149, 1},\n\t{-6685874464742504110, 1},\n\t{-6678726624917026287, 0},\n\t{-6676803490694526992, 1},\n\t{-6671541703464584373, 2},\n\t{-6668770708523915103, 0},\n\t{-6667726463429914635, 1},\n\t{-6665762395429790059, 3},\n\t{-6664224759161908177, 0},\n\t{-6662649181191020617, 2},\n\t{-6662519435036539932, 2},\n\t{-6661682318922512712, 3},\n\t{-6656644431104706414, 3},\n\t{-6656210220711588090, 0},\n\t{-6650743839450723177, 0},\n\t{-6648464291737180836, 2},\n\t{-6647877811951456276, 3},\n\t{-6645513639367983993, 1},\n\t{-6641941821865538954, 0},\n\t{-6641938732358325937, 0},\n\t{-6640867343256435757, 1},\n\t{-6635782576346717906, 2},\n\t{-6634810562207235046, 3},\n\t{-6634245382486847694, 3},\n\t{-6634233744230884953, 3},\n\t{-6624655203701066328, 0},\n\t{-6623716524238618439, 0},\n\t{-6622123801617431633, 2},\n\t{-6620807532144591504, 3},\n\t{-6618419636726524936, 1},\n\t{-6614493161702618060, 1},\n\t{-6611221479997965440, 0},\n\t{-6610604726748851579, 0},\n\t{-6610326174492154726, 0},\n\t{-6601689533908283254, 0},\n\t{-6599683001267326539, 2},\n\t{-6596613318960784443, 1},\n\t{-6595968431576836144, 1},\n\t{-6595807175416576179, 1},\n\t{-6591911974989665251, 1},\n\t{-6590549034643371619, 2},\n\t{-6584895884231782271, 3},\n\t{-6582663666016741344, 1},\n\t{-6579872493153739339, 3},\n\t{-6579673860271064709, 0},\n\t{-6576945804156556718, 2},\n\t{-6575161425452188343, 0},\n\t{-6573514003557299939, 1},\n\t{-6573472058693474801, 1},\n\t{-6573178458348024664, 1},\n\t{-6564822788685245206, 1},\n\t{-6560246201045120730, 1},\n\t{-6559572378420388655, 1},\n\t{-6559548584970578201, 1},\n\t{-6558767320798555083, 2},\n\t{-6557989010547115623, 3},\n\t{-6557553469111505721, 3},\n\t{-6556215174673457652, 0},\n\t{-6555593195312602850, 1},\n\t{-6553868239610059926, 2},\n\t{-6551529453795565863, 1},\n\t{-6550010798354129967, 2},\n\t{-6543908798129291680, 3},\n\t{-6539803717113268273, 3},\n\t{-6537740417101318899, 1},\n\t{-6537193185950883220, 1},\n\t{-6532437887837570130, 2},\n\t{-6532284323463464748, 2},\n\t{-6530339250394354597, 3},\n\t{-6529552848909839327, 0},\n\t{-6527890982526054651, 2},\n\t{-6514496966741365937, 1},\n\t{-6513949888608640963, 2},\n\t{-6513006183815677580, 3},\n\t{-6512299129690871716, 3},\n\t{-6510706840231257734, 1},\n\t{-6509878219692902761, 2},\n\t{-6506434651450759056, 1},\n\t{-6505588609690363566, 1},\n\t{-6495722111972982560, 2},\n\t{-6492973044843929692, 1},\n\t{-6489301408111198524, 0},\n\t{-6488868472483888979, 0},\n\t{-6487574502802186627, 1},\n\t{-6487019461031460914, 2},\n\t{-6486642147506089756, 2},\n\t{-6483255432186999194, 1},\n\t{-6479855438827011179, 0},\n\t{-6477150953757928682, 3},\n\t{-6474143845585541711, 1},\n\t{-6473151545612167794, 2},\n\t{-6469780010792248766, 1},\n\t{-6469688380321372178, 1},\n\t{-6468576221078796776, 2},\n\t{-6467669790966208698, 3},\n\t{-6466853857690627436, 0},\n\t{-6464013001673729165, 2},\n\t{-6463308317603382013, 3},\n\t{-6462822105872595979, 3},\n\t{-6462305212495918847, 0},\n\t{-6460283860747384252, 2},\n\t{-6458610451591925600, 3},\n\t{-6458172755152091211, 3},\n\t{-6457560571002518887, 0},\n\t{-6455374513303479280, 2},\n\t{-6441583908814349281, 2},\n\t{-6438797140142098624, 1},\n\t{-6438191977637515788, 1},\n\t{-6436637831158602014, 3},\n\t{-6435534578399239442, 0},\n\t{-6433925332139169421, 1},\n\t{-6427522423272258632, 3},\n\t{-6425710773899196642, 0},\n\t{-6424525607544936108, 1},\n\t{-6421691077905628627, 0},\n\t{-6414185287718883405, 3},\n\t{-6413919981551186809, 3},\n\t{-6410270650915812607, 2},\n\t{-6409319759830000755, 3},\n\t{-6407976732258260134, 0},\n\t{-6407726134414296387, 0},\n\t{-6404095862905492112, 0},\n\t{-6403787025100544374, 0},\n\t{-6399269499806112567, 0},\n\t{-6399167395579320661, 0},\n\t{-6396938554982307105, 2},\n\t{-6395403482056156891, 3},\n\t{-6393565848182836569, 1},\n\t{-6390882098664662563, 3},\n\t{-6390713399501304652, 3},\n\t{-6390101771116126458, 0},\n\t{-6389705280449443210, 0},\n\t{-6388641200704259072, 1},\n\t{-6386497522653060497, 3},\n\t{-6384433584579720933, 1},\n\t{-6383767549989322911, 2},\n\t{-6383695550516410472, 2},\n\t{-6382595738687190995, 3},\n\t{-6380896045986351480, 0},\n\t{-6379283844457683693, 2},\n\t{-6379233215573147775, 2},\n\t{-6378283198058326751, 2},\n\t{-6377883899103070823, 3},\n\t{-6373771883741652598, 2},\n\t{-6370989835511065229, 1},\n\t{-6370638315408038455, 1},\n\t{-6370465553823215475, 1},\n\t{-6369253187726445226, 2},\n\t{-6367979327009290100, 0},\n\t{-6364189278105479874, 3},\n\t{-6364104244996347062, 3},\n\t{-6363170425827513544, 0},\n\t{-6361030363968743328, 2},\n\t{-6360516836927124196, 2},\n\t{-6359197031449257286, 3},\n\t{-6357329054072592106, 1},\n\t{-6355698737721326690, 3},\n\t{-6353642844296568655, 0},\n\t{-6353531582000570842, 0},\n\t{-6352545141053791594, 1},\n\t{-6351390344091978544, 2},\n\t{-6350339354909484178, 3},\n\t{-6349488165822347131, 0},\n\t{-6347872654346857262, 1},\n\t{-6345411045377788997, 0},\n\t{-6343660744343987405, 1},\n\t{-6341372715775164483, 3},\n\t{-6341035230576411873, 0},\n\t{-6339356166360898552, 1},\n\t{-6338344571697806676, 2},\n\t{-6336890949883822487, 3},\n\t{-6334992048030130908, 1},\n\t{-6333004421750264353, 3},\n\t{-6332255304185599836, 3},\n\t{-6332140489315477382, 3},\n\t{-6329256495565683968, 2},\n\t{-6327686587849709293, 3},\n\t{-6326705343439634894, 0},\n\t{-6325935326519630349, 1},\n\t{-6325449458020786074, 1},\n\t{-6321777179853981135, 1},\n\t{-6319878259995297030, 2},\n\t{-6319691025550655159, 2},\n\t{-6318912020475470463, 3},\n\t{-6315191691319460556, 2},\n\t{-6313608889880960650, 0},\n\t{-6313453937726583872, 0},\n\t{-6311331489376513851, 2},\n\t{-6310510034542565920, 3},\n\t{-6310404506171627205, 3},\n\t{-6309062842064382514, 0},\n\t{-6303271995453261706, 1},\n\t{-6302600461933577047, 2},\n\t{-6300981843966988086, 3},\n\t{-6300022183960029724, 0},\n\t{-6297865868408307913, 2},\n\t{-6297622299168358335, 2},\n\t{-6293637609629269943, 2},\n\t{-6288078627657537049, 3},\n\t{-6285802677032078429, 1},\n\t{-6285540904821833991, 1},\n\t{-6282528885081137808, 3},\n\t{-6279454455201726952, 2},\n\t{-6279262802875204128, 2},\n\t{-6274296627173458305, 3},\n\t{-6270118237034163192, 3},\n\t{-6269179109184062310, 3},\n\t{-6268834376601993570, 0},\n\t{-6262149255782260224, 2},\n\t{-6256540974046911482, 3},\n\t{-6254764454434098499, 0},\n\t{-6253127462655508581, 2},\n\t{-6251306665038730498, 3},\n\t{-6250910687019869619, 0},\n\t{-6249621277049346546, 1},\n\t{-6247739019258957913, 2},\n\t{-6244343187543884992, 1},\n\t{-6243308664502318579, 2},\n\t{-6241975194137073476, 0},\n\t{-6241965000484191778, 0},\n\t{-6239394481677494936, 2},\n\t{-6238473051891572305, 3},\n\t{-6238275162470059171, 3},\n\t{-6233575572051838656, 3},\n\t{-6233469821617458215, 3},\n\t{-6232954253297445887, 0},\n\t{-6231669275785178623, 1},\n\t{-6230933852448506461, 1},\n\t{-6228759665193379801, 3},\n\t{-6226623937508448776, 1},\n\t{-6223336308353154382, 0},\n\t{-6222461407729743106, 1},\n\t{-6221707394449098291, 2},\n\t{-6215382281023038255, 3},\n\t{-6215070955868281266, 3},\n\t{-6210171661706761351, 0},\n\t{-6206596060561340488, 3},\n\t{-6206560478041763945, 3},\n\t{-6204180010887807428, 1},\n\t{-6201590192064311674, 3},\n\t{-6198164392899410886, 2},\n\t{-6197327109545488740, 3},\n\t{-6197304492206684586, 3},\n\t{-6195568298487616494, 1},\n\t{-6195293972815193047, 1},\n\t{-6195056070214948518, 1},\n\t{-6192791429212434030, 3},\n\t{-6186761957109079921, 1},\n\t{-6183843028537268270, 3},\n\t{-6176518773136480216, 2},\n\t{-6175479418043578545, 3},\n\t{-6175475562242912924, 3},\n\t{-6174413546799114308, 0},\n\t{-6171357350974313784, 2},\n\t{-6170965414410863148, 3},\n\t{-6167954887735159285, 1},\n\t{-6164991567962109851, 0},\n\t{-6164979487814790739, 0},\n\t{-6161771262210994536, 3},\n\t{-6161496085196157736, 3},\n\t{-6160889756828249303, 0},\n\t{-6160685397509095714, 0},\n\t{-6160643328094543049, 0},\n\t{-6159195509202095473, 1},\n\t{-6158165270327779039, 2},\n\t{-6155669438294406169, 0},\n\t{-6155060509751376575, 1},\n\t{-6153877848675576817, 2},\n\t{-6150839721267825568, 0},\n\t{-6150106051366801621, 1},\n\t{-6149554400743443952, 2},\n\t{-6146165778212168607, 1},\n\t{-6144592814422786180, 2},\n\t{-6143343459808537923, 3},\n\t{-6141964760556938144, 0},\n\t{-6139557678947072980, 2},\n\t{-6139217231456176166, 3},\n\t{-6137886572258149084, 0},\n\t{-6137147525405124488, 1},\n\t{-6136104566669857742, 2},\n\t{-6134116505493838215, 3},\n\t{-6134041888089849335, 3},\n\t{-6129444909104237756, 3},\n\t{-6128028614455371050, 1},\n\t{-6123600302638150943, 1},\n\t{-6122709240341326439, 1},\n\t{-6120817785555186756, 3},\n\t{-6116637049332014288, 3},\n\t{-6116198226289167461, 3},\n\t{-6114956508545185121, 0},\n\t{-6114763798028913394, 0},\n\t{-6114454093866253722, 1},\n\t{-6112175535451687045, 3},\n\t{-6108679509749399381, 2},\n\t{-6107332951254750790, 3},\n\t{-6102909007684646798, 3},\n\t{-6102335724670652872, 0},\n\t{-6097950572081657206, 3},\n\t{-6095679246504270354, 1},\n\t{-6095440000555220397, 2},\n\t{-6093726008280628528, 3},\n\t{-6093424013863286745, 3},\n\t{-6092369347969426688, 0},\n\t{-6091441118021768450, 1},\n\t{-6090848490068329202, 2},\n\t{-6090542148318706660, 2},\n\t{-6090094990406064871, 2},\n\t{-6087717619147488443, 1},\n\t{-6084059528307391595, 0},\n\t{-6078109322464198937, 1},\n\t{-6077621399284344570, 1},\n\t{-6076795525182041517, 2},\n\t{-6073138584397374586, 1},\n\t{-6070555857643945479, 0},\n\t{-6070466010001683347, 0},\n\t{-6067791711758164636, 2},\n\t{-6067211875992808044, 3},\n\t{-6066299331195078942, 0},\n\t{-6064577008390968780, 1},\n\t{-6060376249554002166, 1},\n\t{-6059769807045895755, 1},\n\t{-6052302564438415117, 0},\n\t{-6052172502907770525, 0},\n\t{-6050776280744008102, 1},\n\t{-6050367673603093691, 2},\n\t{-6049460540333895800, 2},\n\t{-6047757054387388809, 0},\n\t{-6046422559751190956, 1},\n\t{-6035748971603842799, 3},\n\t{-6032263610660198426, 2},\n\t{-6026530539461404790, 3},\n\t{-6025377825780538247, 0},\n\t{-6021559468462908647, 3},\n\t{-6021095565774817083, 0},\n\t{-6019447253941324155, 1},\n\t{-6018575894188502206, 2},\n\t{-6017622394028991004, 3},\n\t{-6016903506663549638, 3},\n\t{-6015339382035474874, 1},\n\t{-6007394627291377405, 0},\n\t{-6004522141235558329, 2},\n\t{-6004205340176528865, 3},\n\t{-6001500310255415235, 1},\n\t{-6001477337263005838, 1},\n\t{-6000964336112028681, 2},\n\t{-6000803121987176268, 2},\n\t{-5997240089258847870, 1},\n\t{-5995824515505548093, 2},\n\t{-5994910184412586510, 3},\n\t{-5993591923551292682, 0},\n\t{-5991727320260368035, 2},\n\t{-5987139798144393854, 2},\n\t{-5983753909578336571, 1},\n\t{-5981763345755523663, 3},\n\t{-5981732692970168162, 3},\n\t{-5980859052850917182, 3},\n\t{-5975754728999844233, 0},\n\t{-5975202785036719524, 0},\n\t{-5971968271713170716, 3},\n\t{-5968242418118831247, 3},\n\t{-5967158257899347127, 0},\n\t{-5964244406161037070, 2},\n\t{-5962070818752329190, 0},\n\t{-5961683655490074031, 0},\n\t{-5961292286393492860, 1},\n\t{-5959905247456295278, 2},\n\t{-5955576833281232025, 2},\n\t{-5954718536480195669, 3},\n\t{-5953595230854016883, 0},\n\t{-5948722947920353055, 0},\n\t{-5948590176018945102, 0},\n\t{-5947726679925042183, 1},\n\t{-5946049414066205564, 2},\n\t{-5945589939151508007, 3},\n\t{-5945286909460386592, 3},\n\t{-5942561737736782132, 1},\n\t{-5938715201559427227, 1},\n\t{-5937961136312120320, 2},\n\t{-5937664473184663856, 2},\n\t{-5934863386777334045, 0},\n\t{-5933746884292946225, 1},\n\t{-5928999763670681271, 1},\n\t{-5927238655873720297, 3},\n\t{-5914001391538883327, 3},\n\t{-5910108228262783873, 2},\n\t{-5910070035238383301, 2},\n\t{-5905613608056516201, 2},\n\t{-5905204170605625904, 3},\n\t{-5901732494563660346, 2},\n\t{-5901697403238152621, 2},\n\t{-5900649047715306855, 3},\n\t{-5900221707795305519, 3},\n\t{-5899623831633158157, 0},\n\t{-5899245390688017471, 0},\n\t{-5895293550285184718, 3},\n\t{-5890793062525243333, 3},\n\t{-5888535120336897741, 1},\n\t{-5887091014153440542, 3},\n\t{-5887077264733057906, 3},\n\t{-5887059288424105279, 3},\n\t{-5884957082729718630, 1},\n\t{-5884885966623076788, 1},\n\t{-5883543343336310577, 2},\n\t{-5882649532375577578, 3},\n\t{-5880958654863311066, 0},\n\t{-5880886073335081746, 0},\n\t{-5877697706312135521, 3},\n\t{-5877072052452534503, 0},\n\t{-5869317614783131409, 2},\n\t{-5868057940995810756, 0},\n\t{-5859954025123610707, 3},\n\t{-5859807342507156009, 3},\n\t{-5858780058499612615, 0},\n\t{-5858052149125301197, 1},\n\t{-5851612021392286828, 2},\n\t{-5850325694538373424, 3},\n\t{-5848785670364660740, 1},\n\t{-5846121880585994243, 3},\n\t{-5843186203125985279, 2},\n\t{-5841338347125806970, 3},\n\t{-5840664452801973116, 0},\n\t{-5840088897429328297, 0},\n\t{-5839710988038317101, 1},\n\t{-5839619008701717869, 1},\n\t{-5839018459562026115, 1},\n\t{-5838977031672241708, 1},\n\t{-5837923235960690720, 2},\n\t{-5837850648565674641, 2},\n\t{-5832105622417378933, 0},\n\t{-5829322055872919791, 2},\n\t{-5829161992951060435, 2},\n\t{-5828716266408076202, 3},\n\t{-5824518420828278330, 2},\n\t{-5820881797831749827, 2},\n\t{-5816822613542206529, 1},\n\t{-5814705123892063743, 3},\n\t{-5813275254012145731, 0},\n\t{-5809821811632482967, 3},\n\t{-5808797853830028762, 0},\n\t{-5808392680720087885, 1},\n\t{-5808117208878705901, 1},\n\t{-5806941399783369191, 2},\n\t{-5806828944967857068, 2},\n\t{-5806662241152748420, 2},\n\t{-5806310465280737457, 2},\n\t{-5805485177440891929, 3},\n\t{-5804203232077319375, 0},\n\t{-5803805048179761437, 1},\n\t{-5802312341917665909, 2},\n\t{-5800236586540978139, 0},\n\t{-5800076104539027983, 0},\n\t{-5799184096267164481, 1},\n\t{-5797848800657749201, 2},\n\t{-5794654896467148854, 1},\n\t{-5794063584458818684, 1},\n\t{-5792850457182940850, 2},\n\t{-5787199053170677427, 3},\n\t{-5786543547059476633, 0},\n\t{-5786295362185140183, 0},\n\t{-5780497518834877549, 1},\n\t{-5780063746529401023, 2},\n\t{-5780058343428484579, 2},\n\t{-5779646526432301479, 2},\n\t{-5779546203690935175, 2},\n\t{-5778349572657904941, 3},\n\t{-5777344207402541420, 0},\n\t{-5776209475474932732, 1},\n\t{-5774416178082571764, 3},\n\t{-5774315939910588213, 3},\n\t{-5773004096694998527, 0},\n\t{-5770594945753361230, 2},\n\t{-5769764604273889726, 3},\n\t{-5768468573610928610, 0},\n\t{-5766505170344286613, 2},\n\t{-5762970578609068064, 1},\n\t{-5762614110878601368, 1},\n\t{-5752917436558829739, 2},\n\t{-5751533182213976090, 3},\n\t{-5743987862639610532, 2},\n\t{-5742837527832852499, 3},\n\t{-5742728304324450446, 3},\n\t{-5742255313089622305, 3},\n\t{-5740729277847591049, 1},\n\t{-5740222729565681701, 1},\n\t{-5738288317901621193, 3},\n\t{-5737287393675878901, 0},\n\t{-5736240877935235106, 1},\n\t{-5733084185873698453, 3},\n\t{-5728728612870908779, 3},\n\t{-5727619925034594113, 0},\n\t{-5722079581828740718, 1},\n\t{-5716308923666156515, 2},\n\t{-5714724113611357071, 0},\n\t{-5714615541660346081, 0},\n\t{-5713760406579185550, 1},\n\t{-5707492524814606446, 2},\n\t{-5700031596634870440, 1},\n\t{-5698448041167822394, 2},\n\t{-5697682397915502201, 3},\n\t{-5694890177208537614, 1},\n\t{-5694856321071229986, 1},\n\t{-5692566283031219515, 3},\n\t{-5691036158511379954, 1},\n\t{-5689631257399651273, 2},\n\t{-5689558007939473990, 2},\n\t{-5689182779320343522, 2},\n\t{-5686658793116455398, 1},\n\t{-5684349833650478597, 3},\n\t{-5683671591248112646, 3},\n\t{-5680850554012438089, 2},\n\t{-5677376187118919562, 1},\n\t{-5675243393592861519, 3},\n\t{-5674365969210543707, 0},\n\t{-5669553431493780222, 0},\n\t{-5663725347997841118, 1},\n\t{-5663512103798184096, 1},\n\t{-5663309836005992930, 1},\n\t{-5660048603298790214, 0},\n\t{-5657101388407893123, 3},\n\t{-5656680936784549584, 3},\n\t{-5656263270511225191, 0},\n\t{-5653353707532285591, 2},\n\t{-5649438177728197194, 2},\n\t{-5647434114998721183, 0},\n\t{-5646770445409016928, 0},\n\t{-5646767989091546476, 0},\n\t{-5643255548055204267, 3},\n\t{-5641600146342662464, 1},\n\t{-5637701053242426906, 0},\n\t{-5635786868678388544, 2},\n\t{-5633097462745768955, 0},\n\t{-5631160285944104406, 2},\n\t{-5629662934957765368, 3},\n\t{-5626397083165812896, 2},\n\t{-5621258638282690649, 3},\n\t{-5621127974436018505, 3},\n\t{-5620452694640365554, 0},\n\t{-5619810903102968134, 0},\n\t{-5617960707478384152, 2},\n\t{-5617324249957241321, 2},\n\t{-5616563630546386704, 3},\n\t{-5614695161768747634, 1},\n\t{-5614343715206873111, 1},\n\t{-5613571559372950390, 2},\n\t{-5612144261475393456, 3},\n\t{-5611745035151004312, 3},\n\t{-5611009341948329515, 0},\n\t{-5609561769425814737, 1},\n\t{-5609361431272501848, 1},\n\t{-5607382463416843021, 3},\n\t{-5606542653449815364, 0},\n\t{-5605042492256938836, 1},\n\t{-5604614461401967622, 2},\n\t{-5604444713787379404, 2},\n\t{-5602764933133368940, 3},\n\t{-5599340665484058919, 2},\n\t{-5599294684613758921, 2},\n\t{-5596462766476707763, 1},\n\t{-5596438330801042302, 1},\n\t{-5595126004979014460, 2},\n\t{-5594701303999037494, 2},\n\t{-5591638296503961268, 1},\n\t{-5587199406160075695, 1},\n\t{-5585723929151322045, 2},\n\t{-5580736176425647338, 3},\n\t{-5579598886738537741, 0},\n\t{-5579444472052314707, 0},\n\t{-5578556051772988908, 1},\n\t{-5576620690357831727, 2},\n\t{-5575769257558458467, 3},\n\t{-5570741777297255304, 0},\n\t{-5569514042049213472, 1},\n\t{-5568500797983381038, 2},\n\t{-5567751603197764136, 2},\n\t{-5567359688058368077, 3},\n\t{-5564794563757944818, 1},\n\t{-5562580791154953715, 3},\n\t{-5560452780724481908, 1},\n\t{-5558355547799087007, 3},\n\t{-5557448687536128796, 3},\n\t{-5557276036715867181, 0},\n\t{-5556910763995764145, 0},\n\t{-5554566470569962024, 2},\n\t{-5553479587129446960, 3},\n\t{-5552955413790145431, 3},\n\t{-5550065975063008592, 2},\n\t{-5548705048683266731, 3},\n\t{-5543468891435970734, 0},\n\t{-5542636836385482086, 1},\n\t{-5540362191832701619, 3},\n\t{-5539527087083434287, 3},\n\t{-5530942149214946570, 3},\n\t{-5527744795508802908, 2},\n\t{-5524796414288371854, 0},\n\t{-5524541366996154603, 1},\n\t{-5523919793741503564, 1},\n\t{-5522456330938970917, 3},\n\t{-5521943171148311155, 3},\n\t{-5521536460591708952, 3},\n\t{-5517288244340479241, 3},\n\t{-5515851662681028422, 0},\n\t{-5515831076577235427, 0},\n\t{-5515633070474136197, 1},\n\t{-5515199962948010682, 1},\n\t{-5514870345574072232, 1},\n\t{-5514613066503085021, 2},\n\t{-5514215298502191579, 2},\n\t{-5512440634747795171, 3},\n\t{-5512003267608309385, 0},\n\t{-5507076511698703305, 0},\n\t{-5504358124972858655, 3},\n\t{-5503377443106243352, 0},\n\t{-5502319138122552501, 0},\n\t{-5502191975923970390, 1},\n\t{-5500544832648779103, 2},\n\t{-5499292636277080474, 3},\n\t{-5499080112007462990, 3},\n\t{-5496000756057725416, 2},\n\t{-5494726634517981713, 3},\n\t{-5493333829325368753, 0},\n\t{-5492073147545914215, 2},\n\t{-5491841562115827740, 2},\n\t{-5489199369077314999, 0},\n\t{-5488934768323399579, 0},\n\t{-5487952731275160550, 1},\n\t{-5487136893580856547, 2},\n\t{-5485345106935442275, 0},\n\t{-5481007267789029779, 3},\n\t{-5478490562174259867, 2},\n\t{-5476576912801499208, 3},\n\t{-5472250321127637490, 3},\n\t{-5471332534680759777, 0},\n\t{-5468111483789349336, 3},\n\t{-5466349205228466317, 0},\n\t{-5466013031666420801, 1},\n\t{-5463785630593359906, 3},\n\t{-5463242731657101181, 3},\n\t{-5461515911566278676, 1},\n\t{-5459527425136614317, 2},\n\t{-5454575132172821077, 3},\n\t{-5450483394940942029, 2},\n\t{-5449248427097354491, 0},\n\t{-5448445717926354533, 0},\n\t{-5448183344898469783, 1},\n\t{-5447646263958951996, 1},\n\t{-5442524195361505386, 2},\n\t{-5440678581814775902, 3},\n\t{-5440309246548971917, 0},\n\t{-5438099174503930974, 1},\n\t{-5437873074248408236, 2},\n\t{-5436824012974022621, 3},\n\t{-5436611961162705839, 3},\n\t{-5435792376054095623, 0},\n\t{-5433613781658773038, 1},\n\t{-5432462295380878414, 3},\n\t{-5432114204577588111, 3},\n\t{-5431072992975085575, 0},\n\t{-5430140834124040603, 1},\n\t{-5422607874532281563, 3},\n\t{-5419856609020354342, 2},\n\t{-5419053686406675431, 2},\n\t{-5418122542875603493, 3},\n\t{-5418041515056817603, 3},\n\t{-5416497630528714230, 1},\n\t{-5416034870762205398, 1},\n\t{-5411283789767690529, 1},\n\t{-5409977454231509655, 2},\n\t{-5407619944238414627, 1},\n\t{-5405128988855145566, 3},\n\t{-5399425154680423251, 0},\n\t{-5398251006076562168, 1},\n\t{-5396646468148662242, 2},\n\t{-5393418496119004816, 1},\n\t{-5390963412000624836, 3},\n\t{-5390601925913933087, 0},\n\t{-5386601830652639078, 3},\n\t{-5384411677445067841, 1},\n\t{-5383352037660172997, 2},\n\t{-5380215922526749815, 1},\n\t{-5374689224512951539, 2},\n\t{-5374107232570356222, 2},\n\t{-5372439262726269948, 0},\n\t{-5372411674999096397, 0},\n\t{-5371327678932016869, 1},\n\t{-5368625300074697211, 3},\n\t{-5366691332889381881, 1},\n\t{-5366439598704547871, 1},\n\t{-5364292145142915311, 3},\n\t{-5362071638590124707, 1},\n\t{-5360426712748302184, 2},\n\t{-5359232005084796593, 0},\n\t{-5356051159561591235, 2},\n\t{-5355835024518793238, 3},\n\t{-5354880558566612764, 3},\n\t{-5354406834793543347, 0},\n\t{-5351441552593394734, 2},\n\t{-5350982773747000918, 3},\n\t{-5344853047422093147, 0},\n\t{-5343978277108675528, 1},\n\t{-5342731969020123767, 2},\n\t{-5340578321255665712, 0},\n\t{-5339278877302920460, 1},\n\t{-5337756457703186169, 3},\n\t{-5332747985549737410, 3},\n\t{-5331925210594118629, 0},\n\t{-5328631463982503305, 3},\n\t{-5327869734693528484, 3},\n\t{-5327839656105672510, 3},\n\t{-5326415971987506540, 1},\n\t{-5325720175558380174, 1},\n\t{-5323187287929944734, 0},\n\t{-5322900966699956089, 0},\n\t{-5322300808657613268, 0},\n\t{-5320595427506558879, 2},\n\t{-5318521916100024569, 0},\n\t{-5317433666105507102, 1},\n\t{-5317126802133407007, 1},\n\t{-5307573677060359199, 1},\n\t{-5306922994580998318, 2},\n\t{-5306794349527260446, 2},\n\t{-5305127458695560497, 0},\n\t{-5297708183304154525, 2},\n\t{-5297282118865418393, 3},\n\t{-5297245636533346789, 3},\n\t{-5294848948258846675, 1},\n\t{-5293377031823166388, 2},\n\t{-5291797599243872284, 3},\n\t{-5291760462792277868, 3},\n\t{-5288619989468578322, 2},\n\t{-5284459505981439747, 2},\n\t{-5284354102624275531, 2},\n\t{-5282708820311100933, 0},\n\t{-5278604717646940931, 3},\n\t{-5277069649499464211, 1},\n\t{-5276664321970943111, 1},\n\t{-5274386457344274680, 3},\n\t{-5273808349476238502, 3},\n\t{-5273075611705304211, 0},\n\t{-5271390040461561671, 2},\n\t{-5270963193282499097, 2},\n\t{-5270726720487222345, 2},\n\t{-5267239218325224507, 1},\n\t{-5265118169892000174, 3},\n\t{-5265016110310601865, 3},\n\t{-5264041159625350106, 0},\n\t{-5263244051079102379, 1},\n\t{-5262762141804440042, 1},\n\t{-5262683775438557520, 1},\n\t{-5258307907599402484, 1},\n\t{-5257646681173119148, 2},\n\t{-5256988356335551296, 2},\n\t{-5255670804996085146, 0},\n\t{-5254783394915826106, 0},\n\t{-5254424419428673227, 1},\n\t{-5251885543289955947, 3},\n\t{-5248263410612557441, 2},\n\t{-5245928255340910715, 0},\n\t{-5241084954485676321, 0},\n\t{-5239249045238660587, 2},\n\t{-5238855867206833270, 2},\n\t{-5237988022502100812, 3},\n\t{-5236537867664629478, 1},\n\t{-5236444219007459153, 1},\n\t{-5233406080267388751, 3},\n\t{-5232412598627684909, 0},\n\t{-5231816320156232231, 1},\n\t{-5221707034393984796, 2},\n\t{-5220986182173293448, 2},\n\t{-5220165873326677824, 3},\n\t{-5215223138473580380, 3},\n\t{-5212751681284358775, 2},\n\t{-5212305281791657985, 2},\n\t{-5211614086919348430, 3},\n\t{-5211307721706029108, 3},\n\t{-5209975805866958972, 0},\n\t{-5209199170859139265, 1},\n\t{-5206164747817021090, 3},\n\t{-5204441667730518496, 1},\n\t{-5204235901132667876, 1},\n\t{-5203491954606281743, 2},\n\t{-5202127912086710904, 3},\n\t{-5200816742750491295, 0},\n\t{-5199699222754213082, 1},\n\t{-5198857509880348751, 2},\n\t{-5197236321445898576, 3},\n\t{-5196223118779185603, 0},\n\t{-5196121090566806152, 0},\n\t{-5195791591533267993, 1},\n\t{-5194688130367392229, 2},\n\t{-5193709875431218533, 3},\n\t{-5193357943657938637, 3},\n\t{-5192903605668416798, 3},\n\t{-5191917709250527934, 0},\n\t{-5184757118606011039, 3},\n\t{-5184530317961488766, 3},\n\t{-5183884636334254102, 3},\n\t{-5183617010069245198, 0},\n\t{-5180404946730959781, 2},\n\t{-5179487393384558857, 3},\n\t{-5176504716103879365, 2},\n\t{-5174875226115557661, 3},\n\t{-5173912931405489553, 0},\n\t{-5171717320508064790, 2},\n\t{-5171629777865883349, 2},\n\t{-5171555043511855039, 2},\n\t{-5170616653714730441, 3},\n\t{-5169934740335330344, 0},\n\t{-5167540477009665325, 2},\n\t{-5161686639220067415, 3},\n\t{-5161049479861756704, 0},\n\t{-5157642182853009198, 3},\n\t{-5156937362304975751, 3},\n\t{-5156275646299893794, 0},\n\t{-5155481074623917332, 1},\n\t{-5149400603777938286, 2},\n\t{-5148434599804172606, 3},\n\t{-5146328674912038004, 1},\n\t{-5142223214106967298, 0},\n\t{-5140570578771181976, 2},\n\t{-5139282656038280568, 3},\n\t{-5137338804828268449, 1},\n\t{-5136436034318817205, 1},\n\t{-5135947475024812391, 2},\n\t{-5125648645737328630, 3},\n\t{-5123088821064099475, 1},\n\t{-5121725746378458737, 2},\n\t{-5118586581467447316, 1},\n\t{-5114662081166532752, 1},\n\t{-5109454754791282233, 1},\n\t{-5105732514881254310, 1},\n\t{-5105558910145551546, 1},\n\t{-5103408411524981692, 3},\n\t{-5100878244403619134, 1},\n\t{-5100852449300312276, 1},\n\t{-5100510705547514636, 1},\n\t{-5095502385501718017, 2},\n\t{-5095307809472325888, 2},\n\t{-5094395489932226499, 3},\n\t{-5094264456747936197, 3},\n\t{-5094033370584215789, 3},\n\t{-5091736688906945885, 1},\n\t{-5088434469575865230, 0},\n\t{-5085542453411885095, 3},\n\t{-5084604466143069318, 3},\n\t{-5083655156822667384, 0},\n\t{-5079161466599766061, 0},\n\t{-5077437972755116566, 2},\n\t{-5074682300026209687, 0},\n\t{-5073614622058833252, 1},\n\t{-5073402585731890632, 1},\n\t{-5073397737337874058, 1},\n\t{-5070088429523050068, 0},\n\t{-5068861855813154374, 1},\n\t{-5068526843484069453, 2},\n\t{-5066589709579414792, 3},\n\t{-5060428630355414206, 1},\n\t{-5060080781628701567, 1},\n\t{-5060013472184425604, 1},\n\t{-5055272552173607211, 2},\n\t{-5054876211334751308, 2},\n\t{-5053050689410555081, 3},\n\t{-5051377301356016614, 1},\n\t{-5043530886164321276, 0},\n\t{-5043153527465775731, 0},\n\t{-5041292650184788926, 2},\n\t{-5037219543135336145, 2},\n\t{-5033656962231141962, 1},\n\t{-5032527916254818814, 2},\n\t{-5032036757213758275, 2},\n\t{-5030558856659131498, 3},\n\t{-5029920112752322857, 0},\n\t{-5020700312773736984, 0},\n\t{-5016742905311933110, 0},\n\t{-5015228222576151951, 1},\n\t{-5014363174803746991, 2},\n\t{-5013811634212997403, 2},\n\t{-5010703442906645260, 1},\n\t{-5009849043302403088, 2},\n\t{-5009254016366942159, 2},\n\t{-5007799251163635980, 0},\n\t{-5007124619047970829, 0},\n\t{-5006682216699136691, 1},\n\t{-5004874386266052595, 2},\n\t{-5000710896363955743, 2},\n\t{-5000152308381522707, 2},\n\t{-4998938400332066905, 0},\n\t{-4996823334878306523, 1},\n\t{-4996654215152623560, 2},\n\t{-4991197589422879648, 2},\n\t{-4988658350545544010, 1},\n\t{-4983332920183961152, 1},\n\t{-4982546690130916642, 2},\n\t{-4982516479631140380, 2},\n\t{-4981146103902529891, 3},\n\t{-4980841468236328214, 0},\n\t{-4976592492689997952, 3},\n\t{-4976224437144196852, 0},\n\t{-4975939219453018600, 0},\n\t{-4972898029745948170, 3},\n\t{-4971424511400114418, 0},\n\t{-4969740291049685087, 1},\n\t{-4969463949686710398, 2},\n\t{-4969104315186509326, 2},\n\t{-4966534857378424546, 0},\n\t{-4966119218957518266, 1},\n\t{-4964534536534549854, 2},\n\t{-4963785652766867162, 3},\n\t{-4962050875221106380, 0},\n\t{-4959516320610841244, 3},\n\t{-4956542545304898984, 1},\n\t{-4955487920482744449, 2},\n\t{-4954927891204721414, 3},\n\t{-4953924679062632002, 0},\n\t{-4951866248219015491, 1},\n\t{-4951139231763874703, 2},\n\t{-4949088353158080713, 0},\n\t{-4946518029393824446, 2},\n\t{-4944839588321918816, 0},\n\t{-4941981821602892394, 2},\n\t{-4932940324188180605, 2},\n\t{-4926535798589625391, 0},\n\t{-4926241335036264832, 0},\n\t{-4925796587969193277, 1},\n\t{-4924055798812744989, 2},\n\t{-4919035218919208669, 3},\n\t{-4918052235596060978, 3},\n\t{-4917809685617656211, 0},\n\t{-4917638975846390902, 0},\n\t{-4917300107565742122, 0},\n\t{-4917236602780645529, 0},\n\t{-4916794223449858962, 1},\n\t{-4911516589511161655, 1},\n\t{-4910949219842705334, 2},\n\t{-4910536147546886229, 2},\n\t{-4908522960974967869, 0},\n\t{-4906960218854491162, 1},\n\t{-4904781830609650647, 3},\n\t{-4903242827307200629, 1},\n\t{-4896860683801786083, 2},\n\t{-4896849754059983666, 2},\n\t{-4894851229303410597, 0},\n\t{-4891519093649931897, 3},\n\t{-4888023992652221449, 2},\n\t{-4887959752256510216, 2},\n\t{-4887845318213502508, 2},\n\t{-4885287953892531707, 0},\n\t{-4883218843925761175, 2},\n\t{-4881051016984397884, 0},\n\t{-4879942821936229373, 1},\n\t{-4879611786541315667, 2},\n\t{-4877921182374090868, 3},\n\t{-4874249212053665464, 2},\n\t{-4874010361430588785, 3},\n\t{-4872777162774248744, 0},\n\t{-4871447711271581343, 1},\n\t{-4871249571359782266, 1},\n\t{-4870834360041101797, 1},\n\t{-4865863835263933873, 2},\n\t{-4865371389809966768, 2},\n\t{-4862210694030833877, 1},\n\t{-4858825736074737297, 0},\n\t{-4858542213461876478, 0},\n\t{-4857362727681001546, 1},\n\t{-4854678498559698043, 0},\n\t{-4851869817782247538, 2},\n\t{-4847344244667648284, 2},\n\t{-4845607813370061168, 0},\n\t{-4843180828489849300, 2},\n\t{-4842097842146962106, 3},\n\t{-4841333732663314815, 0},\n\t{-4841181207222055129, 0},\n\t{-4839108153486289381, 2},\n\t{-4837883279450790537, 3},\n\t{-4837233048180842088, 3},\n\t{-4835218748423840413, 1},\n\t{-4833232644757996837, 3},\n\t{-4830051133950141363, 2},\n\t{-4826898855822236082, 0},\n\t{-4826430511057931074, 1},\n\t{-4826338070554219785, 1},\n\t{-4825953256895355413, 1},\n\t{-4825448586273649995, 2},\n\t{-4821862325318471663, 1},\n\t{-4819781065049956145, 3},\n\t{-4818995353087626309, 3},\n\t{-4813557317925471817, 0},\n\t{-4812124958503027704, 1},\n\t{-4811813066355538293, 2},\n\t{-4807910218652461766, 1},\n\t{-4807852449082845473, 1},\n\t{-4806514943625509940, 2},\n\t{-4805045245139102066, 0},\n\t{-4803995691261591303, 1},\n\t{-4803349985737015151, 1},\n\t{-4800382969258635060, 0},\n\t{-4798424048143666412, 2},\n\t{-4797610444716158293, 2},\n\t{-4797306662990335749, 3},\n\t{-4794717831080081187, 1},\n\t{-4793296205530171295, 2},\n\t{-4791593016730960067, 0},\n\t{-4791238708597264878, 0},\n\t{-4789904410265471880, 1},\n\t{-4779178343139787664, 3},\n\t{-4778209828139427195, 0},\n\t{-4775539693400245240, 2},\n\t{-4774959687383610891, 2},\n\t{-4773429019313660707, 0},\n\t{-4769260692642680202, 0},\n\t{-4766621310274825876, 2},\n\t{-4762900661303769103, 1},\n\t{-4758378996349173503, 1},\n\t{-4754620928613649830, 1},\n\t{-4754253937633976765, 1},\n\t{-4747191700190733942, 3},\n\t{-4746604330404571649, 0},\n\t{-4746505594134442713, 0},\n\t{-4741431591025034463, 0},\n\t{-4739912967485081963, 2},\n\t{-4739329085425870364, 2},\n\t{-4738550323190747036, 3},\n\t{-4737433913749659540, 0},\n\t{-4737304670691166223, 0},\n\t{-4736983633384435377, 0},\n\t{-4734750563932957673, 2},\n\t{-4733910172381620349, 3},\n\t{-4733891583519598782, 3},\n\t{-4733634774104906672, 3},\n\t{-4733398939432662353, 3},\n\t{-4732593032021858686, 0},\n\t{-4730914899414744944, 2},\n\t{-4728172259949389069, 0},\n\t{-4727916498288597347, 0},\n\t{-4727794693550724911, 0},\n\t{-4726019226877706201, 2},\n\t{-4725270935207140997, 3},\n\t{-4723748945063187964, 0},\n\t{-4722442779976302420, 1},\n\t{-4721401549406389817, 2},\n\t{-4720994846808957936, 2},\n\t{-4720488453028895900, 3},\n\t{-4719258288844216881, 0},\n\t{-4717733386118379464, 1},\n\t{-4716253367680249664, 3},\n\t{-4715725848594724738, 3},\n\t{-4710702916417813035, 0},\n\t{-4710572361354397696, 0},\n\t{-4709966675297352985, 0},\n\t{-4708118120562827197, 2},\n\t{-4707537429285632316, 2},\n\t{-4705983913402524320, 0},\n\t{-4705231169545999098, 0},\n\t{-4704116416622451339, 1},\n\t{-4702351735373991496, 3},\n\t{-4701764280626417302, 3},\n\t{-4698084369789267227, 3},\n\t{-4695585675625201225, 1},\n\t{-4694258396396986106, 2},\n\t{-4690731007560645374, 1},\n\t{-4690556995731549923, 1},\n\t{-4688769720405001308, 3},\n\t{-4684708095985816942, 3},\n\t{-4684695029068869546, 3},\n\t{-4684222485141860970, 3},\n\t{-4681833310692867186, 1},\n\t{-4680548677201305450, 2},\n\t{-4680172177871606383, 3},\n\t{-4678873362263130599, 0},\n\t{-4674869417196444743, 3},\n\t{-4674691099535765984, 0},\n\t{-4673302528767406827, 1},\n\t{-4672561038115523082, 1},\n\t{-4671871945764623680, 2},\n\t{-4670567886345620379, 3},\n\t{-4667562882376350140, 2},\n\t{-4667218722195082952, 2},\n\t{-4662697209525852183, 2},\n\t{-4662603989406886509, 2},\n\t{-4659994131661891267, 1},\n\t{-4656801354467441996, 3},\n\t{-4654294432280869953, 2},\n\t{-4654182173899701813, 2},\n\t{-4653872321981084756, 2},\n\t{-4652775573311524477, 3},\n\t{-4648450953659344637, 3},\n\t{-4647612781712755563, 0},\n\t{-4646710187752899228, 0},\n\t{-4645655625434552997, 1},\n\t{-4645511024212851929, 1},\n\t{-4643689013147431986, 3},\n\t{-4639266646006595743, 3},\n\t{-4637174299809092594, 1},\n\t{-4633421647102713624, 0},\n\t{-4632200829506322140, 1},\n\t{-4627572914254313882, 1},\n\t{-4623208620257384437, 1},\n\t{-4622800900009632030, 2},\n\t{-4615250638068998557, 0},\n\t{-4613178334429917610, 2},\n\t{-4610620133616642135, 0},\n\t{-4610599795335092897, 0},\n\t{-4610101323551528906, 1},\n\t{-4609292032814548307, 2},\n\t{-4606415062729030268, 0},\n\t{-4604984331714684278, 1},\n\t{-4603074222154359762, 3},\n\t{-4599861807933115141, 2},\n\t{-4597127249293679259, 0},\n\t{-4595436456670287526, 2},\n\t{-4592246642337519804, 1},\n\t{-4589533449883538396, 3},\n\t{-4588611536874075013, 0},\n\t{-4588074070662873769, 0},\n\t{-4587388476261305620, 1},\n\t{-4585542934115940376, 3},\n\t{-4582697147304426976, 1},\n\t{-4581661831705042623, 2},\n\t{-4580663703985051255, 3},\n\t{-4579027551067414344, 1},\n\t{-4576363354771071998, 3},\n\t{-4576222456131952665, 3},\n\t{-4574208681095108402, 1},\n\t{-4571466779238782088, 3},\n\t{-4570026187497261953, 1},\n\t{-4565633717702658059, 0},\n\t{-4565116358035735954, 1},\n\t{-4564612545758619460, 1},\n\t{-4562770914024123658, 3},\n\t{-4562721811059824378, 3},\n\t{-4561785873721971802, 0},\n\t{-4561363598780301789, 0},\n\t{-4559446090756373250, 2},\n\t{-4558459138600834922, 3},\n\t{-4556377361319183276, 1},\n\t{-4554040251095883995, 3},\n\t{-4553689820529357924, 3},\n\t{-4551779164546056142, 1},\n\t{-4549076572087129824, 3},\n\t{-4547688629031365870, 0},\n\t{-4546864579398325329, 1},\n\t{-4543521680766897536, 0},\n\t{-4542595122528538331, 1},\n\t{-4542499078834861537, 1},\n\t{-4541033932054777737, 2},\n\t{-4537150516675728094, 2},\n\t{-4536517903068610811, 2},\n\t{-4526526068701337004, 3},\n\t{-4522753693990510577, 2},\n\t{-4522598948090011960, 3},\n\t{-4522056921300507992, 3},\n\t{-4519353631048453476, 2},\n\t{-4519284779918148163, 2},\n\t{-4519200920148599965, 2},\n\t{-4518972852354251300, 2},\n\t{-4518842920533332219, 2},\n\t{-4518650484171332393, 2},\n\t{-4517868548629276189, 3},\n\t{-4514547131282243799, 2},\n\t{-4514245341945593208, 2},\n\t{-4508803205300584625, 3},\n\t{-4499081433439364845, 0},\n\t{-4497674587365452438, 1},\n\t{-4492906579517039220, 1},\n\t{-4491939675940901343, 2},\n\t{-4491719280731191636, 2},\n\t{-4488548387505360223, 1},\n\t{-4486174306773576719, 3},\n\t{-4484678596657481726, 0},\n\t{-4483794462233584317, 1},\n\t{-4483785032915471680, 1},\n\t{-4477356668672593201, 3},\n\t{-4476297685227427284, 0},\n\t{-4474103850173054957, 2},\n\t{-4471701795992391617, 0},\n\t{-4467340103054293189, 0},\n\t{-4467336203569939194, 0},\n\t{-4464214863794381138, 2},\n\t{-4463667901673107188, 3},\n\t{-4459910984800214053, 2},\n\t{-4455401865063209534, 2},\n\t{-4451217145206705311, 2},\n\t{-4450487182305093856, 3},\n\t{-4448475363628171969, 0},\n\t{-4448266344021483674, 1},\n\t{-4447350998887115028, 1},\n\t{-4446410810705290341, 2},\n\t{-4446201414591381807, 2},\n\t{-4445606005343463931, 3},\n\t{-4444055855116269395, 0},\n\t{-4443502030650246051, 1},\n\t{-4443248050314220796, 1},\n\t{-4441092155114467044, 3},\n\t{-4437801878704656530, 2},\n\t{-4437007420935676978, 3},\n\t{-4434175212668565119, 1},\n\t{-4433163933883236209, 2},\n\t{-4432302371416702621, 3},\n\t{-4431351660348213619, 0},\n\t{-4428078134594632225, 3},\n\t{-4427712353279057290, 3},\n\t{-4427588386238766636, 3},\n\t{-4427107421599904597, 3},\n\t{-4426770926047084271, 0},\n\t{-4417600275683112731, 0},\n\t{-4417309497663099476, 0},\n\t{-4417273361583245543, 0},\n\t{-4414758944446414549, 2},\n\t{-4411455567527596217, 1},\n\t{-4407583084517622177, 1},\n\t{-4403085833655918309, 1},\n\t{-4400821712403147787, 3},\n\t{-4396360821975859528, 3},\n\t{-4393519688682690056, 1},\n\t{-4390174909945848982, 0},\n\t{-4390025216756011970, 0},\n\t{-4388650160949606616, 2},\n\t{-4385559562871266124, 0},\n\t{-4379930584539173362, 1},\n\t{-4379185727254278211, 2},\n\t{-4377144299298821192, 0},\n\t{-4376687615902103553, 0},\n\t{-4375651041726419862, 1},\n\t{-4373344997905034567, 3},\n\t{-4372897892747331138, 0},\n\t{-4371744299990688347, 1},\n\t{-4370917529886439699, 1},\n\t{-4368295940950441666, 0},\n\t{-4361490933227191698, 2},\n\t{-4361394730163158793, 2},\n\t{-4360064118806649535, 3},\n\t{-4357937955991924021, 1},\n\t{-4353325903928971697, 1},\n\t{-4350942178059917901, 3},\n\t{-4348766228246209535, 1},\n\t{-4347615193014774506, 2},\n\t{-4345765545292538456, 0},\n\t{-4344732026205427574, 1},\n\t{-4344419041661215904, 1},\n\t{-4343750233159048866, 1},\n\t{-4342335719565939093, 3},\n\t{-4338969298166626168, 2},\n\t{-4338821378092888407, 2},\n\t{-4331815229664943834, 0},\n\t{-4330670095648685953, 1},\n\t{-4327544673578189891, 0},\n\t{-4325133168399894989, 2},\n\t{-4319071977015497829, 3},\n\t{-4317710526289596374, 1},\n\t{-4315574563852770765, 2},\n\t{-4313927721056181069, 0},\n\t{-4310197755365846068, 3},\n\t{-4307955227299174590, 1},\n\t{-4304851935210358239, 0},\n\t{-4304439646632382700, 0},\n\t{-4304301207698444358, 1},\n\t{-4303204856120571019, 1},\n\t{-4301502797839848070, 3},\n\t{-4300504505501978335, 0},\n\t{-4300160892676710139, 0},\n\t{-4299927082893888286, 0},\n\t{-4299832988622941551, 0},\n\t{-4299028748738986908, 1},\n\t{-4297759236630095600, 2},\n\t{-4297320465257783603, 3},\n\t{-4294373277123326663, 1},\n\t{-4293191114563369416, 2},\n\t{-4292820624629565952, 3},\n\t{-4291905700340272690, 0},\n\t{-4291239224720770335, 0},\n\t{-4290883653258986699, 0},\n\t{-4290362251207186563, 1},\n\t{-4287936228176707415, 3},\n\t{-4284999263751335604, 2},\n\t{-4283278235954268913, 3},\n\t{-4281057395518207394, 1},\n\t{-4280489890656454178, 2},\n\t{-4280202669899116527, 2},\n\t{-4277415148129936043, 0},\n\t{-4274026569779475298, 3},\n\t{-4273107572553001980, 0},\n\t{-4271950217132835218, 1},\n\t{-4270350403991943761, 3},\n\t{-4268315464212555340, 0},\n\t{-4268138816781976260, 1},\n\t{-4267726277151817515, 1},\n\t{-4266515200836801306, 2},\n\t{-4261366394722917510, 3},\n\t{-4258837939710953673, 1},\n\t{-4258270982360468124, 1},\n\t{-4256063556976463363, 3},\n\t{-4255668840409032550, 0},\n\t{-4255638825816421901, 0},\n\t{-4251869571228206080, 3},\n\t{-4248753146841169456, 2},\n\t{-4243101971442096350, 3},\n\t{-4242760677387880385, 3},\n\t{-4242305407455055640, 0},\n\t{-4242101396975959924, 0},\n\t{-4241561317899344279, 0},\n\t{-4236321747242487876, 1},\n\t{-4234724936049240980, 2},\n\t{-4233863356371244139, 3},\n\t{-4233431554033538798, 3},\n\t{-4232842794859698268, 0},\n\t{-4231716155975095872, 1},\n\t{-4231021345161535354, 2},\n\t{-4225661690744912155, 2},\n\t{-4224943814131016378, 3},\n\t{-4223122339307918910, 1},\n\t{-4222267140063604901, 1},\n\t{-4221629084319753063, 2},\n\t{-4221057327769686285, 2},\n\t{-4218754759885102818, 0},\n\t{-4215196884482189181, 0},\n\t{-4204935900211475651, 1},\n\t{-4203323572718160470, 2},\n\t{-4196555312996833875, 0},\n\t{-4195445419333092387, 1},\n\t{-4191928069932749580, 0},\n\t{-4191506859048771468, 1},\n\t{-4190717390187902342, 1},\n\t{-4190120933356584045, 2},\n\t{-4189742212418008565, 2},\n\t{-4189278360872181855, 3},\n\t{-4189098064378700010, 3},\n\t{-4189062136462818778, 3},\n\t{-4188870437180598080, 3},\n\t{-4187023270060251009, 1},\n\t{-4186833421425795310, 1},\n\t{-4180565984494264806, 2},\n\t{-4178907430303257312, 0},\n\t{-4177920382258414955, 1},\n\t{-4176956983774723121, 2},\n\t{-4176948525797914138, 2},\n\t{-4173700745718737263, 1},\n\t{-4170646438718103830, 3},\n\t{-4160436777582262202, 0},\n\t{-4159715318569072964, 1},\n\t{-4157945119369451038, 3},\n\t{-4157887224649636337, 3},\n\t{-4153605021112475459, 2},\n\t{-4152560725523245576, 3},\n\t{-4150673962603509365, 1},\n\t{-4149750525713339994, 2},\n\t{-4141922532432088677, 1},\n\t{-4140958344663356700, 2},\n\t{-4138874891612903687, 3},\n\t{-4137256525278690375, 1},\n\t{-4136275267952281716, 2},\n\t{-4133254802538490627, 0},\n\t{-4130954407709287987, 2},\n\t{-4130193969752965711, 3},\n\t{-4129954525232364292, 3},\n\t{-4126676448278269567, 2},\n\t{-4126649819573295910, 2},\n\t{-4126022319757150219, 3},\n\t{-4117478652966258207, 2},\n\t{-4112494578058335835, 3},\n\t{-4109558497847283361, 1},\n\t{-4108999530852690122, 2},\n\t{-4107409498130039330, 3},\n\t{-4105712505779530935, 1},\n\t{-4100882093134726913, 1},\n\t{-4096974618499175749, 1},\n\t{-4090795883503224198, 2},\n\t{-4089890186851727381, 3},\n\t{-4087423745282220155, 1},\n\t{-4086055416696102866, 2},\n\t{-4083373563175178379, 1},\n\t{-4083163041990028050, 1},\n\t{-4080872684343453264, 3},\n\t{-4079715645315677776, 0},\n\t{-4079433759635812581, 0},\n\t{-4078373898870909190, 1},\n\t{-4077896571028456547, 2},\n\t{-4073167242895079479, 2},\n\t{-4073076567385986951, 2},\n\t{-4068459702867266788, 2},\n\t{-4059894304085030305, 2},\n\t{-4059677101225892588, 2},\n\t{-4059451909711416421, 2},\n\t{-4056696195379453294, 0},\n\t{-4053465730322555749, 3},\n\t{-4052844207042649723, 0},\n\t{-4052181449728743725, 0},\n\t{-4048212021062351146, 0},\n\t{-4046921116820503312, 1},\n\t{-4039190470714210410, 0},\n\t{-4037225144879793641, 2},\n\t{-4034334875957649468, 0},\n\t{-4031837968928549064, 3},\n\t{-4031367018381678751, 3},\n\t{-4030890765945694337, 3},\n\t{-4028682177843946708, 1},\n\t{-4027702632847308982, 2},\n\t{-4025687850568739708, 0},\n\t{-4025015917345141601, 1},\n\t{-4023141977444151869, 2},\n\t{-4022992180654516720, 2},\n\t{-4017659838436031667, 3},\n\t{-4015645187168478732, 1},\n\t{-4013548278083218596, 3},\n\t{-4012429591227980114, 0},\n\t{-4010630261469503347, 1},\n\t{-4009078060533900972, 3},\n\t{-4006825819617497234, 1},\n\t{-4006467550079862064, 1},\n\t{-4004470591263374033, 3},\n\t{-3997956394132363479, 1},\n\t{-3995335879432697408, 3},\n\t{-3994665980755925788, 0},\n\t{-3994009643922001454, 0},\n\t{-3992096059762536243, 2},\n\t{-3988936855091629080, 1},\n\t{-3987790902105876413, 2},\n\t{-3985743255213422837, 3},\n\t{-3982443398310480624, 2},\n\t{-3977406941901566518, 3},\n\t{-3976196008357446480, 0},\n\t{-3975688262285995778, 0},\n\t{-3974009097065662970, 2},\n\t{-3973615685383493036, 2},\n\t{-3972627677769518485, 3},\n\t{-3964654878906885299, 2},\n\t{-3961521658154015719, 1},\n\t{-3960218524112440332, 2},\n\t{-3947706325254027443, 1},\n\t{-3947044209485412800, 2},\n\t{-3946845662259312188, 2},\n\t{-3946752530248974545, 2},\n\t{-3946726374625185903, 2},\n\t{-3944348713026804818, 0},\n\t{-3941370931176305364, 3},\n\t{-3940804639175720640, 3},\n\t{-3936419407847602605, 3},\n\t{-3932708474861041482, 3},\n\t{-3932408532646671665, 3},\n\t{-3931601567313153666, 0},\n\t{-3930501556214092823, 1},\n\t{-3929651659870777663, 1},\n\t{-3929593208529092257, 1},\n\t{-3925890761287323678, 1},\n\t{-3925760531953829583, 1},\n\t{-3923797792409501443, 2},\n\t{-3922645388544992630, 3},\n\t{-3920651380743710045, 1},\n\t{-3920518588836006508, 1},\n\t{-3919896655614933618, 2},\n\t{-3918952642576587725, 3},\n\t{-3917982227897728604, 0},\n\t{-3914719156036376534, 3},\n\t{-3911737843708429833, 1},\n\t{-3909445695983139426, 3},\n\t{-3907891795395515714, 1},\n\t{-3907371012693288386, 1},\n\t{-3906851181590454148, 2},\n\t{-3905097212338945156, 3},\n\t{-3901110048445832368, 3},\n\t{-3899995630568833247, 0},\n\t{-3896710635716910813, 3},\n\t{-3896137430667412352, 3},\n\t{-3893349189582953959, 2},\n\t{-3887191153531825259, 3},\n\t{-3882110846608098066, 3},\n\t{-3881936367027667277, 0},\n\t{-3881548493247531224, 0},\n\t{-3878030389052407256, 3},\n\t{-3875078776692649664, 2},\n\t{-3870873335596276974, 1},\n\t{-3865268631032820428, 2},\n\t{-3865158825222636626, 3},\n\t{-3863902491904934154, 0},\n\t{-3863343039378540029, 0},\n\t{-3859462584613684179, 0},\n\t{-3859160025432150604, 0},\n\t{-3853372674280219518, 1},\n\t{-3852191315790415688, 2},\n\t{-3850181778053110089, 0},\n\t{-3850039277879998009, 0},\n\t{-3848215945990212029, 2},\n\t{-3847658431616730953, 2},\n\t{-3847114432168708931, 3},\n\t{-3845874285069279250, 0},\n\t{-3841936817396857991, 3},\n\t{-3840903107156260910, 0},\n\t{-3840866887006051362, 0},\n\t{-3840508224779668867, 0},\n\t{-3836182662852761251, 0},\n\t{-3834029713142847278, 2},\n\t{-3833625370742996598, 3},\n\t{-3831598919814223983, 0},\n\t{-3831419523526231787, 1},\n\t{-3829746373712532045, 2},\n\t{-3827486589728217862, 0},\n\t{-3824747940283909239, 2},\n\t{-3823354711099620962, 0},\n\t{-3818937968248472959, 0},\n\t{-3818708837457895937, 0},\n\t{-3818617610388909737, 0},\n\t{-3818327790062440044, 0},\n\t{-3817352330446250177, 1},\n\t{-3817212587026512720, 1},\n\t{-3810338444535036275, 3},\n\t{-3807535259076845296, 2},\n\t{-3806622383909582641, 3},\n\t{-3802561550704444403, 2},\n\t{-3802268439002071529, 2},\n\t{-3801009773909343550, 0},\n\t{-3798504134210806581, 2},\n\t{-3797481071386508797, 3},\n\t{-3793938017067969976, 2},\n\t{-3791609596159981991, 0},\n\t{-3790248883256837452, 1},\n\t{-3788920491993865323, 2},\n\t{-3784549378444147694, 2},\n\t{-3783102854818228214, 3},\n\t{-3780208376111087297, 2},\n\t{-3776500871258140115, 1},\n\t{-3775372266411640179, 2},\n\t{-3772988298449024198, 0},\n\t{-3771441800630258237, 2},\n\t{-3771173093867029902, 2},\n\t{-3768406467549387382, 0},\n\t{-3766440169414022031, 2},\n\t{-3763557199532202812, 1},\n\t{-3757770410038772221, 2},\n\t{-3757382991731304877, 2},\n\t{-3757313687557725022, 2},\n\t{-3755555916617884865, 0},\n\t{-3755210973340753899, 0},\n\t{-3753241624802660985, 2},\n\t{-3750173037840668314, 1},\n\t{-3747868018731742202, 3},\n\t{-3737915017325335522, 0},\n\t{-3737674853963626806, 0},\n\t{-3733791714789696032, 3},\n\t{-3732254071027355960, 1},\n\t{-3731251881087956386, 1},\n\t{-3729717476870947675, 3},\n\t{-3727255257665134604, 1},\n\t{-3725837184622941878, 2},\n\t{-3724869856450962728, 3},\n\t{-3722260448113296229, 1},\n\t{-3718210330048510601, 1},\n\t{-3717924030743621062, 1},\n\t{-3715285310683183290, 0},\n\t{-3710734326747495379, 0},\n\t{-3705340478030227643, 0},\n\t{-3703891554627211559, 2},\n\t{-3701836228696541005, 0},\n\t{-3701549597637477241, 0},\n\t{-3693357103559930697, 3},\n\t{-3692347733187309808, 0},\n\t{-3691242679305435502, 1},\n\t{-3688557835757145555, 3},\n\t{-3684778967206358669, 3},\n\t{-3684227970320085677, 3},\n\t{-3684198182839632523, 3},\n\t{-3684129629353653488, 3},\n\t{-3682562626841939283, 1},\n\t{-3679041728093413613, 0},\n\t{-3677390189902236104, 1},\n\t{-3676805645451162764, 2},\n\t{-3676596760113800761, 2},\n\t{-3675022415601315465, 3},\n\t{-3670702411074082301, 3},\n\t{-3670549181759777779, 3},\n\t{-3669328046939491203, 0},\n\t{-3668780196083141076, 1},\n\t{-3667796854335873812, 2},\n\t{-3665056123456277927, 0},\n\t{-3664989070885080130, 0},\n\t{-3663076268648970233, 2},\n\t{-3662714748005113234, 2},\n\t{-3661763166663553972, 3},\n\t{-3661258261691777727, 0},\n\t{-3660465958855695750, 0},\n\t{-3656103552599866830, 0},\n\t{-3655405725905958684, 1},\n\t{-3655092331148011998, 1},\n\t{-3651612326628463358, 0},\n\t{-3649661966402759569, 2},\n\t{-3647650046651736562, 0},\n\t{-3647314406368777422, 0},\n\t{-3645855594149553334, 1},\n\t{-3644667639156250902, 2},\n\t{-3642185541196793718, 1},\n\t{-3642146686459422430, 1},\n\t{-3641393954985786133, 1},\n\t{-3637701899508375059, 1},\n\t{-3634987398027286519, 3},\n\t{-3634633362529068333, 3},\n\t{-3634083373066101786, 0},\n\t{-3633566944729543736, 0},\n\t{-3623869800086531774, 1},\n\t{-3620607443584454007, 0},\n\t{-3620334976389475052, 0},\n\t{-3617673901042492536, 2},\n\t{-3615623573200245763, 0},\n\t{-3606463025141286711, 0},\n\t{-3606028262075046779, 1},\n\t{-3604290616869646380, 2},\n\t{-3601525103400866280, 1},\n\t{-3600714113938931997, 1},\n\t{-3598778365507195688, 3},\n\t{-3598097016299385573, 0},\n\t{-3596545459665962878, 1},\n\t{-3596051237381896662, 2},\n\t{-3595607972434835807, 2},\n\t{-3594959553849002071, 3},\n\t{-3592505226021320157, 1},\n\t{-3591228033666188028, 2},\n\t{-3586730428507504952, 2},\n\t{-3585621855113336780, 3},\n\t{-3585531322210372445, 3},\n\t{-3584618061560391210, 0},\n\t{-3583793249753093765, 0},\n\t{-3582132723143897358, 2},\n\t{-3580111592589829488, 0},\n\t{-3577863947036314627, 2},\n\t{-3575753539964566976, 0},\n\t{-3575562966952762376, 0},\n\t{-3575230846670000212, 0},\n\t{-3575217734905003875, 0},\n\t{-3573068427164585730, 2},\n\t{-3572641870364943167, 2},\n\t{-3568416852313692157, 2},\n\t{-3567428945704071328, 3},\n\t{-3564764681305495647, 1},\n\t{-3564580183380486994, 2},\n\t{-3564560653413030197, 2},\n\t{-3563841352760753092, 2},\n\t{-3561181797126166537, 1},\n\t{-3558768808113426905, 3},\n\t{-3556776655430198469, 0},\n\t{-3556198222251093700, 1},\n\t{-3555120438553611784, 2},\n\t{-3551910284819165679, 1},\n\t{-3551758456176501420, 1},\n\t{-3551601877709528504, 1},\n\t{-3551519734355853838, 1},\n\t{-3550824131267686051, 2},\n\t{-3550051674696333793, 2},\n\t{-3548619712167122188, 0},\n\t{-3545242467215591922, 3},\n\t{-3536795760782084425, 2},\n\t{-3531632801863993822, 3},\n\t{-3531573496007403061, 3},\n\t{-3531182831500027599, 3},\n\t{-3528246068657230264, 2},\n\t{-3526978752157748379, 3},\n\t{-3526856949879886652, 3},\n\t{-3526290033169052333, 0},\n\t{-3523275212513553601, 2},\n\t{-3521123935893694563, 0},\n\t{-3521099994116048911, 0},\n\t{-3518220866453423698, 3},\n\t{-3515719960042198778, 1},\n\t{-3515332735340211251, 1},\n\t{-3514949422883979425, 2},\n\t{-3514144482741915879, 2},\n\t{-3512488385783285732, 0},\n\t{-3509388025195512766, 3},\n\t{-3503715075844167889, 0},\n\t{-3502625201554596047, 1},\n\t{-3501959013470401823, 1},\n\t{-3501824729107532246, 1},\n\t{-3501525591674374417, 2},\n\t{-3499874069742834327, 3},\n\t{-3498967899681044491, 0},\n\t{-3490410408895038823, 3},\n\t{-3490122393352105490, 0},\n\t{-3488770483245217874, 1},\n\t{-3488441676400534025, 1},\n\t{-3487108689109952362, 2},\n\t{-3487096694279133354, 2},\n\t{-3485092775508295834, 0},\n\t{-3479258797548920484, 1},\n\t{-3478912114492015582, 2},\n\t{-3477806127219942756, 3},\n\t{-3476984015727581167, 3},\n\t{-3476425065361281372, 0},\n\t{-3475293848099064983, 1},\n\t{-3474818665183036127, 1},\n\t{-3473723081572526220, 2},\n\t{-3472690823324859637, 3},\n\t{-3471454426308390293, 0},\n\t{-3471186103366315916, 0},\n\t{-3467156631916511943, 0},\n\t{-3466872754954560381, 0},\n\t{-3466626492200126086, 1},\n\t{-3466534988804924454, 1},\n\t{-3459975566886031299, 2},\n\t{-3456776956616433374, 1},\n\t{-3455866806647757474, 2},\n\t{-3454515189897539714, 3},\n\t{-3450866503380392142, 3},\n\t{-3450111067052383597, 3},\n\t{-3446663453374307723, 2},\n\t{-3443308880832320543, 1},\n\t{-3441701061315523653, 3},\n\t{-3439683767828247785, 0},\n\t{-3437193246933548974, 3},\n\t{-3435687215249201245, 0},\n\t{-3435647926065263842, 0},\n\t{-3432817169758780417, 3},\n\t{-3429680916004916257, 1},\n\t{-3429586264177573903, 1},\n\t{-3428754660785313001, 2},\n\t{-3425648344964249934, 1},\n\t{-3424818652830634891, 2},\n\t{-3424769411899272309, 2},\n\t{-3424569213836991991, 2},\n\t{-3419601802268133055, 2},\n\t{-3417812579738055912, 0},\n\t{-3417031228649657947, 1},\n\t{-3415631573433393132, 2},\n\t{-3415054115816335037, 2},\n\t{-3408976057367425622, 0},\n\t{-3407078349390933291, 1},\n\t{-3405233504637255529, 3},\n\t{-3401716171101597763, 2},\n\t{-3399851518650429477, 0},\n\t{-3396783453795273693, 3},\n\t{-3396661321827641447, 3},\n\t{-3395328450948455539, 0},\n\t{-3394510743450546462, 1},\n\t{-3388968870557406156, 1},\n\t{-3386834539694802770, 3},\n\t{-3386602780530994262, 0},\n\t{-3385930866817031311, 0},\n\t{-3381798303721707389, 0},\n\t{-3379045812693669927, 2},\n\t{-3378562070000774436, 3},\n\t{-3377050221254756625, 0},\n\t{-3374149965185588595, 3},\n\t{-3372486101827579970, 0},\n\t{-3372065746597623858, 1},\n\t{-3370818462985142769, 2},\n\t{-3370434919986754362, 2},\n\t{-3367663948892971812, 0},\n\t{-3355978702487126680, 3},\n\t{-3352541153272338166, 2},\n\t{-3351276513764932882, 3},\n\t{-3348814805862882392, 1},\n\t{-3344033789111870793, 1},\n\t{-3340407044087364195, 1},\n\t{-3340321472963939727, 1},\n\t{-3333379280756722214, 3},\n\t{-3332797455289724293, 3},\n\t{-3332384951891485988, 0},\n\t{-3331355163318940516, 1},\n\t{-3328617007707819800, 3},\n\t{-3327427469768426787, 0},\n\t{-3325527183165443649, 2},\n\t{-3324569367862140704, 3},\n\t{-3324209036873770198, 3},\n\t{-3321740770911166597, 1},\n\t{-3321060258427533947, 2},\n\t{-3319091365273759708, 0},\n\t{-3318721963639951930, 0},\n\t{-3315011169039261364, 3},\n\t{-3312517882134904308, 1},\n\t{-3307139524247224030, 2},\n\t{-3304989087415578646, 0},\n\t{-3304558421153774586, 0},\n\t{-3303980324087273672, 1},\n\t{-3301951682089751074, 3},\n\t{-3301795439968858151, 3},\n\t{-3297013927583781784, 3},\n\t{-3295314009565962732, 1},\n\t{-3293579635464589912, 2},\n\t{-3292906284502253524, 3},\n\t{-3291092821923909755, 0},\n\t{-3288274383337205058, 3},\n\t{-3280848613358378899, 2},\n\t{-3280538524002610156, 2},\n\t{-3274696652326161192, 3},\n\t{-3270373439314657469, 3},\n\t{-3270120075578989114, 3},\n\t{-3268953453169295248, 0},\n\t{-3267428355727710463, 1},\n\t{-3265577930308573655, 3},\n\t{-3261040052295910161, 3},\n\t{-3260560440021876552, 0},\n\t{-3255619226375307094, 0},\n\t{-3254187120342866805, 1},\n\t{-3254052403079175996, 1},\n\t{-3253267128125167392, 2},\n\t{-3249646011836043835, 1},\n\t{-3248645513157513748, 2},\n\t{-3246801173833094893, 0},\n\t{-3245988122985274141, 0},\n\t{-3245630880978383934, 1},\n\t{-3243958383319994803, 2},\n\t{-3243468377085879355, 3},\n\t{-3241550456322569743, 0},\n\t{-3241441706509455798, 1},\n\t{-3240564475537303571, 1},\n\t{-3240369188784665646, 1},\n\t{-3238097112742850511, 3},\n\t{-3236456091159518317, 1},\n\t{-3234909254933017289, 2},\n\t{-3232311644671573092, 1},\n\t{-3230664728808407550, 2},\n\t{-3229706326498129479, 3},\n\t{-3228530513831255234, 0},\n\t{-3226228805335983411, 2},\n\t{-3219245238835681625, 0},\n\t{-3217222613906242169, 2},\n\t{-3216086005702890550, 3},\n\t{-3215274298416678785, 0},\n\t{-3214472757930364746, 0},\n\t{-3214050544002490581, 1},\n\t{-3213879476412530721, 1},\n\t{-3212981100756133514, 2},\n\t{-3210528520471277806, 0},\n\t{-3210454216905244546, 0},\n\t{-3208305553533761849, 2},\n\t{-3207188577808510646, 3},\n\t{-3199555118010160927, 2},\n\t{-3198269440142673193, 3},\n\t{-3198106800486363012, 3},\n\t{-3195524141222673715, 1},\n\t{-3193749624361171911, 3},\n\t{-3192509972666121344, 0},\n\t{-3190453011880384893, 2},\n\t{-3189094893150428389, 3},\n\t{-3188257537860388152, 0},\n\t{-3187637342371237068, 0},\n\t{-3187093202113223168, 1},\n\t{-3185671838408002047, 2},\n\t{-3182189875086726974, 1},\n\t{-3180867327701891940, 2},\n\t{-3172515683366594041, 2},\n\t{-3169504194940221721, 0},\n\t{-3167718672128395622, 2},\n\t{-3165154888494269488, 0},\n\t{-3162540706059349183, 3},\n\t{-3161529410792012243, 3},\n\t{-3161116258779731243, 0},\n\t{-3160142397786391907, 1},\n\t{-3159867125050712343, 1},\n\t{-3159220132029675555, 2},\n\t{-3157884745462274915, 3},\n\t{-3157415840296545541, 3},\n\t{-3155382289675741388, 1},\n\t{-3153897171957731208, 2},\n\t{-3148370026417268254, 3},\n\t{-3147773520358304241, 0},\n\t{-3147472979918559585, 0},\n\t{-3147246887211194130, 0},\n\t{-3146294492146933944, 1},\n\t{-3143913372815119405, 3},\n\t{-3141389625800121176, 1},\n\t{-3140137126562110275, 2},\n\t{-3137619231645727542, 1},\n\t{-3137116633909332704, 1},\n\t{-3136520158053355330, 2},\n\t{-3131608258298651445, 2},\n\t{-3126325270458551326, 3},\n\t{-3123363788132291544, 1},\n\t{-3122802400862241017, 2},\n\t{-3122291597050858986, 2},\n\t{-3118996283541541055, 1},\n\t{-3117656168243177884, 2},\n\t{-3117308297640582642, 3},\n\t{-3114545715534720612, 1},\n\t{-3113720043967491140, 2},\n\t{-3113454606644625252, 2},\n\t{-3109688182898815920, 2},\n\t{-3108653345976578385, 2},\n\t{-3105674842501390249, 1},\n\t{-3104632055148420124, 2},\n\t{-3103061709869921882, 3},\n\t{-3102727835206858005, 0},\n\t{-3101494146307049608, 1},\n\t{-3099552025829625036, 3},\n\t{-3095220004843598559, 2},\n\t{-3094631008242885900, 3},\n\t{-3093579945035760087, 0},\n\t{-3093546285880642726, 0},\n\t{-3091448159939143791, 2},\n\t{-3088559383659129995, 0},\n\t{-3084073240044805028, 0},\n\t{-3082751232336528177, 1},\n\t{-3082718710090373868, 1},\n\t{-3070440986312371031, 0},\n\t{-3066283459562763102, 0},\n\t{-3065469364052149342, 1},\n\t{-3062310534560370810, 0},\n\t{-3060399571900493299, 1},\n\t{-3058907577907580042, 3},\n\t{-3053933383542777318, 3},\n\t{-3050931945152186216, 2},\n\t{-3050705676232618819, 2},\n\t{-3048365360927019001, 0},\n\t{-3048181812089092560, 0},\n\t{-3045765304802213051, 2},\n\t{-3044155756817350001, 0},\n\t{-3043382581128414301, 0},\n\t{-3042223425772217827, 1},\n\t{-3042198820370562562, 1},\n\t{-3039677709134737180, 0},\n\t{-3038054118138634996, 1},\n\t{-3036963718801302257, 2},\n\t{-3035132353153526594, 0},\n\t{-3035087924518426939, 0},\n\t{-3034513849749441163, 0},\n\t{-3033577503049565179, 1},\n\t{-3032128820756774983, 2},\n\t{-3028087917408702857, 2},\n\t{-3027056759318988003, 3},\n\t{-3026368494770050903, 0},\n\t{-3024721786713613081, 1},\n\t{-3021990736514891644, 3},\n\t{-3021299044027774262, 0},\n\t{-3016381891277178911, 0},\n\t{-3016088645897321699, 1},\n\t{-3016019010263860657, 1},\n\t{-3015065975932513766, 2},\n\t{-3013325663745935157, 3},\n\t{-3009999693529812750, 2},\n\t{-3009962242305222554, 2},\n\t{-3009874410369691472, 2},\n\t{-3007833970348172929, 0},\n\t{-3007655667382102099, 0},\n\t{-3007556621048251005, 0},\n\t{-3004695844023007931, 3},\n\t{-3004537491915297049, 3},\n\t{-3003025485163478589, 0},\n\t{-3002356125129148167, 1},\n\t{-3000355705646074427, 3},\n\t{-3000169638802716243, 3},\n\t{-2995925710307321221, 3},\n\t{-2995465124179858689, 3},\n\t{-2995085254816452439, 3},\n\t{-2994787389200555039, 0},\n\t{-2989119741996774661, 1},\n\t{-2986135871263991702, 3},\n\t{-2985909310218282962, 3},\n\t{-2985406829667914796, 0},\n\t{-2985376992899876618, 0},\n\t{-2985130939934459879, 0},\n\t{-2984817815354149766, 0},\n\t{-2983977701424150580, 1},\n\t{-2983251700710829977, 2},\n\t{-2980292768352466115, 0},\n\t{-2979610368382278511, 1},\n\t{-2979031493721559096, 2},\n\t{-2978886170372167481, 2},\n\t{-2975384931016922414, 1},\n\t{-2975109778822127624, 1},\n\t{-2974200026559050122, 2},\n\t{-2973029401577758713, 3},\n\t{-2971792396654030652, 0},\n\t{-2971058436056497229, 1},\n\t{-2970276288698213383, 1},\n\t{-2969472269800168954, 2},\n\t{-2969136585714306617, 2},\n\t{-2968834007207290171, 3},\n\t{-2967988359986239606, 3},\n\t{-2966185045600388538, 1},\n\t{-2961472984067599286, 1},\n\t{-2959085327792863127, 3},\n\t{-2958944455508102573, 3},\n\t{-2958495265318181711, 0},\n\t{-2957922919879320296, 0},\n\t{-2957557020063116264, 1},\n\t{-2957015000160562380, 1},\n\t{-2956168349987879576, 2},\n\t{-2955106407496392412, 3},\n\t{-2953993001876355519, 0},\n\t{-2951949824205549444, 2},\n\t{-2949213252895512776, 0},\n\t{-2946259390648114489, 3},\n\t{-2945629509688367419, 3},\n\t{-2944936352116364376, 0},\n\t{-2944401406669179149, 0},\n\t{-2944192370739715913, 1},\n\t{-2943926592078462731, 1},\n\t{-2939317781372906190, 1},\n\t{-2938140639969569739, 2},\n\t{-2934876358760987525, 1},\n\t{-2934841802736090239, 1},\n\t{-2931391638305133402, 0},\n\t{-2930448426002858687, 1},\n\t{-2930317885884522603, 1},\n\t{-2929256417971226185, 2},\n\t{-2929025091375555793, 2},\n\t{-2928186182154178024, 3},\n\t{-2925823064813362016, 1},\n\t{-2925303788667834248, 1},\n\t{-2925185240161647580, 1},\n\t{-2924585279928243595, 2},\n\t{-2919033765628898750, 3},\n\t{-2918565801795507000, 3},\n\t{-2916799155289074409, 1},\n\t{-2912931812013164185, 0},\n\t{-2912667633493585245, 1},\n\t{-2910677004888599760, 2},\n\t{-2910288593546875789, 3},\n\t{-2909696952091197708, 3},\n\t{-2906974674261915332, 2},\n\t{-2902868075379588331, 1},\n\t{-2902467355927090088, 2},\n\t{-2901605971634111574, 2},\n\t{-2897604126597717645, 2},\n\t{-2894095634806670193, 1},\n\t{-2891600009458392643, 3},\n\t{-2891352482100609400, 3},\n\t{-2889494847559623685, 1},\n\t{-2888174525434464067, 2},\n\t{-2887146977075886385, 3},\n\t{-2886085511150327026, 0},\n\t{-2885036440653568397, 1},\n\t{-2884228525355842379, 2},\n\t{-2882759764482151938, 3},\n\t{-2881054469868882030, 1},\n\t{-2877132323400118979, 0},\n\t{-2876778202845575521, 0},\n\t{-2876357884749357003, 1},\n\t{-2876243126834354171, 1},\n\t{-2875881354414516993, 1},\n\t{-2875204273229100110, 2},\n\t{-2875053674100041780, 2},\n\t{-2873080573678951971, 0},\n\t{-2871878150272344861, 1},\n\t{-2871761014479493540, 1},\n\t{-2871056718164024161, 1},\n\t{-2868786030339209610, 0},\n\t{-2867758880701820873, 0},\n\t{-2866593548503039738, 1},\n\t{-2850436040329760435, 0},\n\t{-2844922368624790244, 1},\n\t{-2842173086214465716, 3},\n\t{-2841625417469392906, 0},\n\t{-2840348335916305209, 1},\n\t{-2840181641247245099, 1},\n\t{-2836103537117477124, 1},\n\t{-2835447800309966707, 1},\n\t{-2834635073309116078, 2},\n\t{-2831748460490928780, 0},\n\t{-2831457722830338420, 1},\n\t{-2830286156200643082, 2},\n\t{-2827549927003228241, 0},\n\t{-2825647522513387962, 2},\n\t{-2821799917296329275, 1},\n\t{-2820356457120821336, 3},\n\t{-2814448907684594333, 0},\n\t{-2814172722797196808, 0},\n\t{-2814026979740686048, 0},\n\t{-2813808066037064123, 0},\n\t{-2812923699110768422, 1},\n\t{-2812037930643576018, 2},\n\t{-2811281472468335628, 3},\n\t{-2810847788188549431, 3},\n\t{-2809939049776530220, 0},\n\t{-2808692452338734567, 1},\n\t{-2807522188789370463, 2},\n\t{-2807424403492885266, 2},\n\t{-2806904044473713807, 2},\n\t{-2804768431466270348, 0},\n\t{-2801417231179511683, 3},\n\t{-2801211363558834416, 0},\n\t{-2797715671839862080, 3},\n\t{-2791770266713470940, 0},\n\t{-2791318679785403711, 0},\n\t{-2788775053267039611, 3},\n\t{-2788434084269317380, 3},\n\t{-2788249579427109645, 3},\n\t{-2784304661527651503, 3},\n\t{-2782602654385281736, 0},\n\t{-2778591368205867845, 0},\n\t{-2775510863635900297, 2},\n\t{-2772480735206589687, 1},\n\t{-2772471558308158138, 1},\n\t{-2771435658939308309, 2},\n\t{-2770750621739066375, 3},\n\t{-2769666064965534382, 0},\n\t{-2767693470449517633, 1},\n\t{-2765843796276346110, 3},\n\t{-2765518096898429709, 3},\n\t{-2764986878473405609, 0},\n\t{-2764668695089193409, 0},\n\t{-2763894460595384595, 1},\n\t{-2763744741608549278, 1},\n\t{-2763126352734798464, 1},\n\t{-2762483522631819856, 2},\n\t{-2761687475643139997, 3},\n\t{-2761344446665085919, 3},\n\t{-2760310857239253481, 0},\n\t{-2757451406478904925, 2},\n\t{-2757311354829701692, 3},\n\t{-2752909251566610889, 2},\n\t{-2752102007595781595, 3},\n\t{-2750590661340944924, 0},\n\t{-2746907375151349632, 0},\n\t{-2744787791958448000, 2},\n\t{-2742432918660599673, 0},\n\t{-2734521183332627859, 3},\n\t{-2734172452662118168, 3},\n\t{-2726978072428881370, 1},\n\t{-2726544204970191583, 2},\n\t{-2722026758628418674, 2},\n\t{-2721695134105521871, 2},\n\t{-2721227536919337220, 3},\n\t{-2720458003988110516, 3},\n\t{-2720157737819232648, 0},\n\t{-2719248914510631491, 0},\n\t{-2716431663907121759, 3},\n\t{-2711031758245518374, 0},\n\t{-2709509947593878201, 1},\n\t{-2709356882553420218, 1},\n\t{-2708991041802633143, 1},\n\t{-2706013986291972030, 0},\n\t{-2704815631074933283, 1},\n\t{-2704812533221111541, 1},\n\t{-2701559381723346853, 0},\n\t{-2701227746563741300, 0},\n\t{-2699451087634401104, 2},\n\t{-2696746493502475357, 0},\n\t{-2693430577424558072, 3},\n\t{-2690793838798990786, 2},\n\t{-2689414004795868341, 3},\n\t{-2686335605283876475, 2},\n\t{-2685020173153700010, 3},\n\t{-2684359891153436597, 3},\n\t{-2683020834458716398, 0},\n\t{-2677257878216839285, 2},\n\t{-2677253822965273990, 2},\n\t{-2674385071050339127, 0},\n\t{-2669405136231004174, 1},\n\t{-2668096356389519994, 2},\n\t{-2667310263111273119, 2},\n\t{-2665770371230506131, 0},\n\t{-2665562977789950484, 0},\n\t{-2664078597450848025, 1},\n\t{-2663862630109708625, 2},\n\t{-2662070733667730874, 3},\n\t{-2659479873634149716, 1},\n\t{-2659421580781462996, 1},\n\t{-2658265902642449454, 2},\n\t{-2655833677725392164, 1},\n\t{-2653793725666412432, 2},\n\t{-2651417337147943553, 1},\n\t{-2650707952314243342, 1},\n\t{-2646504561842795329, 1},\n\t{-2643765842016028969, 3},\n\t{-2643271409307202084, 0},\n\t{-2641793006224554219, 1},\n\t{-2641494727422285946, 1},\n\t{-2640299308249964941, 2},\n\t{-2638474632880490386, 0},\n\t{-2633253853983562740, 1},\n\t{-2631957486563225609, 2},\n\t{-2631827224509519683, 2},\n\t{-2628238168044982436, 1},\n\t{-2626088472731423583, 3},\n\t{-2623510749398306210, 1},\n\t{-2623507700814777949, 1},\n\t{-2622030844209432632, 3},\n\t{-2620804688109058566, 0},\n\t{-2612980313345756424, 3},\n\t{-2612491947283313294, 3},\n\t{-2611987143115430356, 0},\n\t{-2611930154962527621, 0},\n\t{-2610131962043282043, 1},\n\t{-2609488693451151947, 2},\n\t{-2606421844193792590, 1},\n\t{-2605512685515091320, 1},\n\t{-2605066603016655746, 2},\n\t{-2603439783253443967, 3},\n\t{-2602157392147837405, 0},\n\t{-2601884862810405303, 1},\n\t{-2601726880141406840, 1},\n\t{-2599607694937117446, 3},\n\t{-2598780688510716158, 3},\n\t{-2598464089607860397, 0},\n\t{-2596894513350812598, 1},\n\t{-2594837831141368556, 3},\n\t{-2592879915624126391, 1},\n\t{-2592839555654494078, 1},\n\t{-2592648380431487301, 1},\n\t{-2591412302181975023, 2},\n\t{-2590671814904257939, 3},\n\t{-2588532071686642362, 0},\n\t{-2587979063926703402, 1},\n\t{-2587546593967157789, 1},\n\t{-2583859643274105804, 1},\n\t{-2582957789639975204, 1},\n\t{-2578460845274948580, 1},\n\t{-2577454251284469592, 2},\n\t{-2576731839920939528, 3},\n\t{-2575709256143108457, 0},\n\t{-2573492847310102784, 2},\n\t{-2570463657489891643, 0},\n\t{-2570295161905284703, 1},\n\t{-2570034565937227758, 1},\n\t{-2569728728174668592, 1},\n\t{-2568084386103904875, 3},\n\t{-2563202483826193329, 3},\n\t{-2556890261577588100, 1},\n\t{-2556291594037786151, 1},\n\t{-2556249788321562086, 1},\n\t{-2554738226859100049, 2},\n\t{-2554726948432886464, 2},\n\t{-2554375420730259082, 3},\n\t{-2552973254121783266, 0},\n\t{-2549790095232338506, 3},\n\t{-2549131592259074007, 3},\n\t{-2544429160103377085, 0},\n\t{-2542204822121035160, 2},\n\t{-2541937435593124584, 2},\n\t{-2541904243463923379, 2},\n\t{-2540999445248396437, 3},\n\t{-2540756610458846291, 3},\n\t{-2536950132192244438, 2},\n\t{-2529588574935931893, 1},\n\t{-2528543268206744059, 2},\n\t{-2527542245091545958, 3},\n\t{-2520773472914853889, 1},\n\t{-2520494934520288794, 1},\n\t{-2519971037715482051, 1},\n\t{-2519721592688257769, 2},\n\t{-2517148216631492904, 0},\n\t{-2510717586694072561, 2},\n\t{-2508914672754067796, 3},\n\t{-2506983947392144823, 1},\n\t{-2505569289864295943, 2},\n\t{-2502640347343931415, 1},\n\t{-2493929424080577156, 0},\n\t{-2492663436810150711, 2},\n\t{-2492557429029320646, 2},\n\t{-2490860029693268907, 3},\n\t{-2490539671891250844, 3},\n\t{-2489160173565555203, 1},\n\t{-2488970317392650908, 1},\n\t{-2487172360506888366, 2},\n\t{-2484972712628037609, 0},\n\t{-2483938747057027028, 1},\n\t{-2483765992321433167, 1},\n\t{-2479954096852560112, 1},\n\t{-2479545945226287108, 1},\n\t{-2477041915025336753, 3},\n\t{-2475854487857967818, 0},\n\t{-2475583354970251669, 1},\n\t{-2475429375189018484, 1},\n\t{-2473057620187280888, 3},\n\t{-2472806670717769836, 3},\n\t{-2470570833337184798, 1},\n\t{-2469095645915998745, 3},\n\t{-2466137118790664850, 1},\n\t{-2462472373490037680, 0},\n\t{-2460717584131255467, 2},\n\t{-2458751987429028564, 0},\n\t{-2457371398211384388, 1},\n\t{-2453403623771049237, 0},\n\t{-2452001652401666686, 2},\n\t{-2451761939127976070, 2},\n\t{-2451027032526555593, 3},\n\t{-2450472822117757904, 3},\n\t{-2448637985279892652, 1},\n\t{-2445829905964552172, 3},\n\t{-2441113679860449361, 3},\n\t{-2437466089971476711, 3},\n\t{-2437363183150595342, 3},\n\t{-2436926209927488215, 3},\n\t{-2436684224810308055, 3},\n\t{-2436014030076034526, 0},\n\t{-2435261912172079556, 1},\n\t{-2432335430744949835, 3},\n\t{-2431540776211677806, 0},\n\t{-2430773053521267509, 1},\n\t{-2430320630082785781, 1},\n\t{-2426430528563765517, 0},\n\t{-2423985596817697146, 3},\n\t{-2423335654357458541, 3},\n\t{-2420409292182096959, 2},\n\t{-2419530576701002007, 3},\n\t{-2415764723491766066, 2},\n\t{-2413247246157920033, 0},\n\t{-2412294328410363639, 1},\n\t{-2409804716312225570, 3},\n\t{-2409616212921387479, 3},\n\t{-2408206260232246513, 1},\n\t{-2407528963112167410, 1},\n\t{-2405462066239409862, 3},\n\t{-2405030140472630425, 3},\n\t{-2401358100142493613, 3},\n\t{-2400299222327285946, 0},\n\t{-2400054072475383755, 0},\n\t{-2393510691250235963, 2},\n\t{-2393062422701534566, 2},\n\t{-2392291701786707761, 3},\n\t{-2391567887120316420, 3},\n\t{-2387803325444302159, 3},\n\t{-2387798921219048278, 3},\n\t{-2382635096349054712, 3},\n\t{-2378123669179878196, 3},\n\t{-2375288823730654623, 2},\n\t{-2370338837053606078, 2},\n\t{-2367322814450039676, 1},\n\t{-2366039479091575566, 2},\n\t{-2364858613365894831, 3},\n\t{-2364557571549175480, 3},\n\t{-2364276823956302531, 0},\n\t{-2364202514752516204, 0},\n\t{-2362800393992846908, 1},\n\t{-2359794378307663322, 0},\n\t{-2359194611534084229, 0},\n\t{-2358642987132463338, 1},\n\t{-2358394947916532067, 1},\n\t{-2356745649482943695, 2},\n\t{-2351410673105450190, 3},\n\t{-2349953547325948950, 0},\n\t{-2348915884660651169, 1},\n\t{-2348520874838231421, 2},\n\t{-2347763199285130931, 2},\n\t{-2347608901841483902, 2},\n\t{-2346650695679353494, 3},\n\t{-2345438402707671605, 0},\n\t{-2344890772806313083, 1},\n\t{-2344008371097415587, 2},\n\t{-2342236413769668944, 3},\n\t{-2340902108506463950, 0},\n\t{-2336557099005459749, 0},\n\t{-2334864576100618300, 2},\n\t{-2334567840303958770, 2},\n\t{-2333171380648613565, 3},\n\t{-2332797567736410094, 0},\n\t{-2332452771379379317, 0},\n\t{-2326881073667208359, 1},\n\t{-2325221339677018822, 2},\n\t{-2324963065733339176, 3},\n\t{-2324296564662216125, 3},\n\t{-2322474442384912906, 1},\n\t{-2321646740569667768, 1},\n\t{-2315662514143358923, 3},\n\t{-2313663455636384906, 1},\n\t{-2312983150592732472, 1},\n\t{-2309436993511593956, 0},\n\t{-2306346631960459674, 3},\n\t{-2303951546040142087, 1},\n\t{-2302475115125550159, 2},\n\t{-2302173474816029425, 3},\n\t{-2298945058561512710, 2},\n\t{-2292127761772858435, 0},\n\t{-2289895894048857468, 2},\n\t{-2289448300200132791, 2},\n\t{-2287598839947545796, 0},\n\t{-2287370246140490934, 0},\n\t{-2286070584288675652, 1},\n\t{-2285717769717121975, 1},\n\t{-2283974089932545025, 3},\n\t{-2283534842932894702, 3},\n\t{-2278644570841188880, 0},\n\t{-2276962788116117924, 1},\n\t{-2276918984111311755, 1},\n\t{-2273555296203508850, 0},\n\t{-2273129003658115849, 1},\n\t{-2271894262228176093, 2},\n\t{-2271434335987435542, 2},\n\t{-2269490570227783645, 0},\n\t{-2269267201043165988, 0},\n\t{-2267837084103038835, 1},\n\t{-2267546619270730573, 2},\n\t{-2266381087296221357, 3},\n\t{-2264590300188505277, 0},\n\t{-2264034311573219335, 1},\n\t{-2260538635420795596, 0},\n\t{-2260399765831207912, 0},\n\t{-2260111136639590274, 0},\n\t{-2259407577075134292, 1},\n\t{-2258940458046940672, 1},\n\t{-2255683614229295145, 0},\n\t{-2251778530403867941, 0},\n\t{-2251118757187501568, 0},\n\t{-2249571901714279622, 1},\n\t{-2246818409949561067, 0},\n\t{-2246231317019685410, 0},\n\t{-2242931196513977463, 3},\n\t{-2240518190840678540, 2},\n\t{-2238160449036530410, 0},\n\t{-2238072987832679324, 0},\n\t{-2237959045764371301, 0},\n\t{-2237652779337160948, 0},\n\t{-2237539355295038106, 0},\n\t{-2237037299910945657, 1},\n\t{-2236843357771721212, 1},\n\t{-2235988601281361544, 2},\n\t{-2230269330653699961, 3},\n\t{-2227593600515311823, 1},\n\t{-2226202820425534042, 2},\n\t{-2218838641266795362, 1},\n\t{-2213565825338923107, 1},\n\t{-2209542968262942413, 1},\n\t{-2209048459249732751, 1},\n\t{-2208887266432228304, 2},\n\t{-2207394337060051276, 3},\n\t{-2206420529994797030, 0},\n\t{-2197907156764602539, 3},\n\t{-2196846097327658992, 0},\n\t{-2196246508661402006, 1},\n\t{-2194159296655350408, 3},\n\t{-2190081407205509116, 2},\n\t{-2189353681212350467, 3},\n\t{-2188827729859097094, 3},\n\t{-2188206200284358975, 0},\n\t{-2187672233148801599, 0},\n\t{-2187562275560676549, 1},\n\t{-2187269745230633973, 1},\n\t{-2186296730848403666, 2},\n\t{-2185626685292721891, 2},\n\t{-2184261868447329991, 3},\n\t{-2180146640829917379, 3},\n\t{-2179585624819235644, 0},\n\t{-2178943331147685730, 0},\n\t{-2178464304235614195, 1},\n\t{-2177815283452065000, 1},\n\t{-2174968374336463268, 0},\n\t{-2173900982644262860, 1},\n\t{-2173745866080571812, 1},\n\t{-2168866195085667593, 1},\n\t{-2168389958926116066, 2},\n\t{-2168357533157689284, 2},\n\t{-2167707770783977991, 2},\n\t{-2164044237336388750, 1},\n\t{-2160999103065661289, 0},\n\t{-2158447437837599140, 2},\n\t{-2154292854395452429, 2},\n\t{-2151599740303759032, 0},\n\t{-2149382042069993022, 2},\n\t{-2148953861311305472, 3},\n\t{-2148698104931987848, 3},\n\t{-2148112978694481096, 0},\n\t{-2147680066767827196, 0},\n\t{-2146997980960737122, 1},\n\t{-2144650084507999880, 3},\n\t{-2143325876430900376, 0},\n\t{-2142839851831308106, 0},\n\t{-2142251962803039670, 1},\n\t{-2142168437921494952, 1},\n\t{-2140593741715450337, 2},\n\t{-2140162815703417510, 3},\n\t{-2137246168928895700, 1},\n\t{-2133823962143610931, 0},\n\t{-2132703895584727152, 1},\n\t{-2132321366588068045, 2},\n\t{-2132179081571537488, 2},\n\t{-2131362256163948007, 2},\n\t{-2130778459546605117, 3},\n\t{-2127893530648175973, 2},\n\t{-2126982857092730068, 2},\n\t{-2126570221295548900, 3},\n\t{-2126372785527825876, 3},\n\t{-2124378884240244012, 1},\n\t{-2120829748743551728, 0},\n\t{-2120105962060951732, 0},\n\t{-2120089749864045610, 0},\n\t{-2117009309659868707, 3},\n\t{-2116442265483090548, 0},\n\t{-2113857355933120795, 2},\n\t{-2110414466527627175, 1},\n\t{-2106041704660502944, 1},\n\t{-2105629668702761656, 1},\n\t{-2104717372700672449, 2},\n\t{-2103440551476621601, 3},\n\t{-2103359812954352049, 3},\n\t{-2102090581730588687, 0},\n\t{-2099558733259532086, 3},\n\t{-2098631832592787072, 0},\n\t{-2092767992028643996, 1},\n\t{-2086585612680309883, 2},\n\t{-2083014707116449149, 1},\n\t{-2082234143181941548, 2},\n\t{-2075578911078594652, 0},\n\t{-2075236028193731601, 0},\n\t{-2074849443572064608, 1},\n\t{-2074500803028978729, 1},\n\t{-2074291420915547905, 1},\n\t{-2073782254202379797, 2},\n\t{-2073640161144212708, 2},\n\t{-2070416218094267614, 1},\n\t{-2066244733166682653, 0},\n\t{-2065945631444149730, 1},\n\t{-2065816100031120881, 1},\n\t{-2065367547164317761, 1},\n\t{-2063066970021814280, 3},\n\t{-2057715467631369210, 0},\n\t{-2055678213387146389, 2},\n\t{-2054425647941599057, 3},\n\t{-2052509234494603248, 1},\n\t{-2051540835092421323, 1},\n\t{-2047540345802755738, 1},\n\t{-2045145553822751633, 3},\n\t{-2044168067324692852, 0},\n\t{-2041297476165374960, 2},\n\t{-2041153306318831295, 3},\n\t{-2040998281588441000, 3},\n\t{-2039140054526868281, 0},\n\t{-2038639960848531541, 1},\n\t{-2034894975105011740, 0},\n\t{-2034263994484506584, 1},\n\t{-2030730410709817375, 0},\n\t{-2027266012545195185, 3},\n\t{-2024932223989693975, 1},\n\t{-2018225850289290900, 3},\n\t{-2017642800488073115, 3},\n\t{-2014492897832630471, 2},\n\t{-2012440653942637790, 0},\n\t{-2010466790131887032, 2},\n\t{-2003796251298459922, 0},\n\t{-2003687295190569078, 0},\n\t{-2003290280934108382, 0},\n\t{-1999065736768996681, 0},\n\t{-1998664787827093786, 0},\n\t{-1998658185151237190, 0},\n\t{-1998006241169437750, 1},\n\t{-1992862111869029652, 1},\n\t{-1992552636093442893, 2},\n\t{-1988596050595588632, 1},\n\t{-1985905916303620287, 0},\n\t{-1985243493435551548, 0},\n\t{-1984089034476011526, 1},\n\t{-1982446095162266356, 3},\n\t{-1979042922149102403, 2},\n\t{-1974300474725133899, 2},\n\t{-1972234929885691391, 0},\n\t{-1969501859756236498, 2},\n\t{-1963128678804256834, 0},\n\t{-1960541645596629114, 2},\n\t{-1959665170338960642, 3},\n\t{-1959042834804288103, 0},\n\t{-1958317717095864498, 0},\n\t{-1956572540252483451, 2},\n\t{-1948056953503939124, 1},\n\t{-1947965609073498075, 1},\n\t{-1947775742597290580, 2},\n\t{-1938211927945504317, 2},\n\t{-1937059331288790967, 3},\n\t{-1935755071906302991, 0},\n\t{-1934233713184352765, 2},\n\t{-1932322598268362288, 3},\n\t{-1932160284409598705, 3},\n\t{-1930524203030790570, 1},\n\t{-1930400927565874732, 1},\n\t{-1929100604147673969, 2},\n\t{-1929014292818998275, 2},\n\t{-1928880041489040166, 2},\n\t{-1926237471365538009, 1},\n\t{-1925866440768481944, 1},\n\t{-1924919078948974188, 2},\n\t{-1921046174871286486, 1},\n\t{-1920170160639724076, 2},\n\t{-1918071207995313420, 0},\n\t{-1913851401702855063, 0},\n\t{-1913575068283535842, 0},\n\t{-1912772188273378171, 1},\n\t{-1912194742595567221, 1},\n\t{-1912159111584559214, 1},\n\t{-1910996253744891098, 2},\n\t{-1910687496623733176, 2},\n\t{-1907559923903002166, 1},\n\t{-1906362554936993600, 2},\n\t{-1905564681716128912, 3},\n\t{-1903289686061509354, 1},\n\t{-1900089356707402545, 0},\n\t{-1899774909554183123, 0},\n\t{-1899005973765148518, 1},\n\t{-1897031735968314296, 3},\n\t{-1896674211807173063, 3},\n\t{-1896373754261125209, 3},\n\t{-1895927701202612382, 0},\n\t{-1895912572536160990, 0},\n\t{-1892915180655558698, 2},\n\t{-1892722070949314246, 2},\n\t{-1892702771721787932, 2},\n\t{-1891214090940897197, 0},\n\t{-1890730517298219501, 0},\n\t{-1890340822608936081, 1},\n\t{-1888301461224304007, 2},\n\t{-1888120051044284019, 3},\n\t{-1887898543783613101, 3},\n\t{-1886804392633259315, 0},\n\t{-1886360411030179843, 0},\n\t{-1883655437934737036, 2},\n\t{-1882054408396715960, 0},\n\t{-1879597933982587266, 2},\n\t{-1878494770591697774, 3},\n\t{-1878081758349364047, 3},\n\t{-1877428361111421756, 0},\n\t{-1876006441412808642, 1},\n\t{-1875909827106035810, 1},\n\t{-1875171502966337475, 2},\n\t{-1873473599933351724, 0},\n\t{-1871404493179677068, 1},\n\t{-1865138975261346060, 3},\n\t{-1863332318657203145, 1},\n\t{-1862299952057358843, 1},\n\t{-1861469782185774242, 2},\n\t{-1860659425200769019, 3},\n\t{-1859574769497374283, 0},\n\t{-1858454353806900987, 1},\n\t{-1857382477123995478, 2},\n\t{-1857360875346156082, 2},\n\t{-1856330511325720903, 3},\n\t{-1856181385788934415, 3},\n\t{-1854493237484912544, 0},\n\t{-1854466982102865542, 0},\n\t{-1850766465229825122, 0},\n\t{-1849623939966392797, 1},\n\t{-1848048114159325844, 2},\n\t{-1843934712774367925, 2},\n\t{-1843618648373916943, 2},\n\t{-1840977839819247764, 0},\n\t{-1833188255869195087, 3},\n\t{-1831068751191439147, 1},\n\t{-1830642009290768934, 2},\n\t{-1828892330421349941, 3},\n\t{-1826988244448057128, 1},\n\t{-1824951082240923237, 3},\n\t{-1824869417389715773, 3},\n\t{-1823816214574931290, 0},\n\t{-1822181949417990712, 1},\n\t{-1821852977674989408, 1},\n\t{-1820464635726684841, 3},\n\t{-1817352914908967107, 1},\n\t{-1816419573671727699, 2},\n\t{-1815588381446658795, 3},\n\t{-1814915237237971627, 0},\n\t{-1814105236715409039, 0},\n\t{-1808307347573127347, 1},\n\t{-1808040454012366204, 2},\n\t{-1805799861060122608, 0},\n\t{-1805423197207748539, 0},\n\t{-1804793000334610134, 1},\n\t{-1803732116860232196, 1},\n\t{-1799240499836713697, 1},\n\t{-1796372868237264224, 0},\n\t{-1792996523942718741, 3},\n\t{-1792617508371055034, 3},\n\t{-1792249732543921618, 0},\n\t{-1790675281054148868, 1},\n\t{-1787882135061100367, 0},\n\t{-1787632938159903226, 0},\n\t{-1784229534574110459, 3},\n\t{-1783829371775155162, 3},\n\t{-1783017331429437242, 0},\n\t{-1781919842759101092, 1},\n\t{-1779844930087365741, 3},\n\t{-1779071391116587009, 3},\n\t{-1778993948930827177, 3},\n\t{-1777712912186282667, 1},\n\t{-1776832295367414331, 1},\n\t{-1775081382941827680, 3},\n\t{-1775068639283931764, 3},\n\t{-1773093141839940101, 1},\n\t{-1770582882602923096, 3},\n\t{-1765987271807056595, 3},\n\t{-1764203793285297595, 1},\n\t{-1762849408292986841, 2},\n\t{-1762579735139109116, 2},\n\t{-1760031513045871638, 0},\n\t{-1758402347258004054, 2},\n\t{-1755867854034167015, 0},\n\t{-1755197594740793689, 1},\n\t{-1752867402584856984, 3},\n\t{-1751203181164321659, 0},\n\t{-1749531068569060412, 2},\n\t{-1746388478437347786, 0},\n\t{-1746163625865976983, 1},\n\t{-1745355841458698153, 1},\n\t{-1745161422465154799, 1},\n\t{-1743290637506269044, 3},\n\t{-1743000379198439020, 3},\n\t{-1741027670220260900, 1},\n\t{-1740509364539265718, 2},\n\t{-1739237417399340898, 3},\n\t{-1738871792183849957, 3},\n\t{-1736734728712005655, 1},\n\t{-1736257160434343463, 1},\n\t{-1734839074553675408, 3},\n\t{-1734605702515050498, 3},\n\t{-1733473721879637883, 0},\n\t{-1731987872898592864, 1},\n\t{-1727739427050789107, 1},\n\t{-1727652161941793587, 1},\n\t{-1727293510164503507, 1},\n\t{-1727162561878255052, 1},\n\t{-1724404612416028431, 0},\n\t{-1722524894756293483, 2},\n\t{-1719309985251970216, 0},\n\t{-1718992537710772484, 1},\n\t{-1715881090607322711, 3},\n\t{-1715384981194759232, 0},\n\t{-1714179046357577000, 1},\n\t{-1710731568862121365, 0},\n\t{-1709430540415023211, 1},\n\t{-1703641985898630462, 2},\n\t{-1693926576021654720, 3},\n\t{-1693523448777815659, 3},\n\t{-1692340115173495330, 0},\n\t{-1691746622763476746, 1},\n\t{-1680258347308247770, 3},\n\t{-1679313913073914417, 0},\n\t{-1679131447123428458, 0},\n\t{-1674358545206413156, 0},\n\t{-1670869536098236151, 3},\n\t{-1670699006914452166, 0},\n\t{-1668343002958002830, 2},\n\t{-1665852231236116178, 0},\n\t{-1664185483327441597, 1},\n\t{-1663743278102555502, 2},\n\t{-1663378417784252575, 2},\n\t{-1661511824664119347, 0},\n\t{-1659961907848432286, 1},\n\t{-1658467318361508662, 2},\n\t{-1650814621618233035, 1},\n\t{-1649674584801489828, 2},\n\t{-1647761775726938901, 0},\n\t{-1645097598965550290, 2},\n\t{-1644317587157914688, 3},\n\t{-1642468211353558340, 1},\n\t{-1641303804254275062, 2},\n\t{-1638749362147741438, 0},\n\t{-1638392507967679416, 0},\n\t{-1633989416563684334, 0},\n\t{-1631704072566276741, 2},\n\t{-1630607867523115358, 3},\n\t{-1627299494934524248, 2},\n\t{-1627194249203659720, 2},\n\t{-1626056433241636478, 3},\n\t{-1621991081661240609, 3},\n\t{-1621085018682709855, 0},\n\t{-1619377313320485674, 1},\n\t{-1615026744031117009, 1},\n\t{-1614568699164127899, 1},\n\t{-1614497073291791832, 2},\n\t{-1611643432082702196, 0},\n\t{-1608099982411886934, 3},\n\t{-1607170760890700760, 0},\n\t{-1605032997743499044, 2},\n\t{-1603969215275282995, 3},\n\t{-1601042909382343769, 1},\n\t{-1598891279397659633, 3},\n\t{-1598788545964291928, 3},\n\t{-1597455479019942120, 1},\n\t{-1597320620601795776, 1},\n\t{-1596114996776120447, 2},\n\t{-1593323235421682050, 0},\n\t{-1591708216857693180, 2},\n\t{-1588989238592335672, 0},\n\t{-1586251731860542940, 3},\n\t{-1584156435961454604, 0},\n\t{-1582536824893245425, 2},\n\t{-1578903591526917177, 1},\n\t{-1577748931079384631, 2},\n\t{-1577193525516580968, 3},\n\t{-1574259629920283241, 1},\n\t{-1572803616465303540, 3},\n\t{-1569029150689579426, 2},\n\t{-1563790370335052111, 3},\n\t{-1563188125699306320, 3},\n\t{-1562809307787587839, 3},\n\t{-1562751739078108004, 3},\n\t{-1560182473401270394, 2},\n\t{-1559848111411980933, 2},\n\t{-1557315843310898722, 0},\n\t{-1556221261721911328, 1},\n\t{-1554080502714318972, 3},\n\t{-1553530575504181180, 0},\n\t{-1553232530992959658, 0},\n\t{-1551267162716394812, 2},\n\t{-1549277539320998527, 3},\n\t{-1548035660739683959, 1},\n\t{-1545452979887143084, 3},\n\t{-1542627790006228968, 1},\n\t{-1537728697664832951, 2},\n\t{-1534951466587859449, 0},\n\t{-1533077901592371382, 2},\n\t{-1532642884956928868, 2},\n\t{-1529485069253607924, 1},\n\t{-1527802401599544087, 3},\n\t{-1527055371436496762, 3},\n\t{-1524349928567974230, 2},\n\t{-1523973936653775684, 2},\n\t{-1518444121745917668, 3},\n\t{-1517605853758432920, 0},\n\t{-1512843435409066707, 0},\n\t{-1512825500314012716, 0},\n\t{-1512287684307957039, 0},\n\t{-1512196060281246254, 0},\n\t{-1510465954280237946, 2},\n\t{-1510444808308034173, 2},\n\t{-1506915753189261675, 1},\n\t{-1506763572283983646, 1},\n\t{-1506160871283510880, 2},\n\t{-1505544264098902823, 2},\n\t{-1505304159737148308, 3},\n\t{-1501896388474219075, 2},\n\t{-1501010683791516153, 2},\n\t{-1500929039144384021, 2},\n\t{-1498441681979226385, 1},\n\t{-1498047809429285021, 1},\n\t{-1494113340401610555, 0},\n\t{-1494043790921563066, 1},\n\t{-1488614560406335935, 1},\n\t{-1487404051367138156, 2},\n\t{-1484210710786972740, 1},\n\t{-1479842367472189224, 1},\n\t{-1474815918600291735, 2},\n\t{-1473425692407981733, 3},\n\t{-1471473622549831966, 1},\n\t{-1468030945382313744, 0},\n\t{-1465974463340554483, 1},\n\t{-1465224092611572552, 2},\n\t{-1464973780294624678, 2},\n\t{-1464817042583527930, 2},\n\t{-1462795292621763301, 0},\n\t{-1462049816624651316, 1},\n\t{-1461280565681122556, 2},\n\t{-1455947091304858026, 2},\n\t{-1454162783543057607, 0},\n\t{-1452346188197411134, 2},\n\t{-1449827072838993314, 0},\n\t{-1448531631099878625, 1},\n\t{-1446112453325041567, 3},\n\t{-1445943711418849100, 3},\n\t{-1444490488567062739, 1},\n\t{-1440616756658800055, 0},\n\t{-1440201595199290569, 0},\n\t{-1439164830506807663, 1},\n\t{-1433741524132007675, 2},\n\t{-1433534058769220936, 2},\n\t{-1429446709107605997, 2},\n\t{-1427755346720345444, 3},\n\t{-1425878819485331282, 1},\n\t{-1416089963963272278, 2},\n\t{-1416076589999394053, 2},\n\t{-1414480315518395611, 3},\n\t{-1408905663687901977, 0},\n\t{-1406681831609473672, 2},\n\t{-1406360353076550129, 2},\n\t{-1404309548662345170, 0},\n\t{-1403136838690136074, 1},\n\t{-1399940900690963084, 0},\n\t{-1398678919921721829, 1},\n\t{-1398611792395107567, 1},\n\t{-1398314707890919864, 2},\n\t{-1396248357767677591, 3},\n\t{-1393479670595026840, 2},\n\t{-1391592344885264932, 0},\n\t{-1385871943806910318, 1},\n\t{-1384952133416615764, 1},\n\t{-1383631863631062324, 3},\n\t{-1380724317621571561, 1},\n\t{-1378913085668019563, 3},\n\t{-1378000885520110991, 0},\n\t{-1375408316901699687, 2},\n\t{-1373732083342434939, 3},\n\t{-1369078717692394053, 0},\n\t{-1368854053212096521, 0},\n\t{-1367844497320252663, 1},\n\t{-1366902003403090714, 1},\n\t{-1362623426886902239, 1},\n\t{-1361778231459323052, 2},\n\t{-1361247149346162270, 2},\n\t{-1357344194246196682, 2},\n\t{-1354413656017715812, 1},\n\t{-1354164235284583129, 1},\n\t{-1349663819090533573, 1},\n\t{-1347100097576767756, 3},\n\t{-1346383750776461839, 0},\n\t{-1338850645441468878, 2},\n\t{-1338796224011019793, 2},\n\t{-1333917434186680394, 3},\n\t{-1331856836553673957, 1},\n\t{-1329101611515589465, 3},\n\t{-1328428949757312575, 0},\n\t{-1327935382244843453, 0},\n\t{-1325168693740124782, 3},\n\t{-1325152932883192573, 3},\n\t{-1323724782579371354, 0},\n\t{-1320801626707566503, 2},\n\t{-1320304058393305082, 3},\n\t{-1320155345480619674, 3},\n\t{-1319774336460305454, 3},\n\t{-1319654097239040179, 3},\n\t{-1317279198152880086, 2},\n\t{-1315697465148623624, 3},\n\t{-1315595291204178765, 3},\n\t{-1314642015978588599, 0},\n\t{-1314506396527354713, 0},\n\t{-1302076497759566196, 3},\n\t{-1301146133434801499, 0},\n\t{-1299101443779584876, 2},\n\t{-1298781291506435097, 2},\n\t{-1285654953148083366, 2},\n\t{-1284984431032969819, 2},\n\t{-1283664301162782501, 3},\n\t{-1281460906375894565, 1},\n\t{-1277957415329035942, 0},\n\t{-1276350644752352739, 2},\n\t{-1271007628369261666, 3},\n\t{-1270748765140350517, 3},\n\t{-1267775765877796973, 1},\n\t{-1265793239351691300, 3},\n\t{-1265025951471657763, 0},\n\t{-1263155161144952833, 2},\n\t{-1262194379779126736, 2},\n\t{-1261763548157370125, 3},\n\t{-1261214427717145557, 3},\n\t{-1258575267463600936, 2},\n\t{-1258352383571134350, 2},\n\t{-1253530287168879212, 2},\n\t{-1252944263126520647, 3},\n\t{-1252712739372795816, 3},\n\t{-1252002263984871304, 3},\n\t{-1246575837405344446, 0},\n\t{-1245576336262250434, 1},\n\t{-1242970933191876325, 0},\n\t{-1242134053646083860, 0},\n\t{-1241838785875603366, 1},\n\t{-1240924489907435312, 1},\n\t{-1239531207691803586, 3},\n\t{-1234980902435470782, 3},\n\t{-1233470947513703105, 0},\n\t{-1224699012311108401, 0},\n\t{-1224000270275042778, 0},\n\t{-1221843336532441859, 2},\n\t{-1221328215574000509, 3},\n\t{-1220678477799309883, 3},\n\t{-1218549083573929154, 1},\n\t{-1217090296851042491, 3},\n\t{-1216647231488414561, 3},\n\t{-1213069060332460176, 2},\n\t{-1212722424675087943, 2},\n\t{-1210749641592010249, 0},\n\t{-1209451658367509130, 1},\n\t{-1207985712205088473, 3},\n\t{-1207548938640541284, 3},\n\t{-1205926746069531541, 0},\n\t{-1201533665340011682, 0},\n\t{-1201504451431647717, 0},\n\t{-1201273777071664066, 1},\n\t{-1201080066455059843, 1},\n\t{-1199249073256660406, 2},\n\t{-1198831181223038346, 3},\n\t{-1198637581310715623, 3},\n\t{-1197508199353274230, 0},\n\t{-1195426106362627613, 2},\n\t{-1195090046047014218, 2},\n\t{-1193746069172769025, 3},\n\t{-1191798131784745810, 1},\n\t{-1190858883595967526, 2},\n\t{-1190107657111481907, 2},\n\t{-1186441402359507445, 2},\n\t{-1185776498237769292, 2},\n\t{-1184434569986778375, 0},\n\t{-1183873305629144242, 0},\n\t{-1181277879191692978, 2},\n\t{-1179630796588659498, 0},\n\t{-1177151819143762662, 2},\n\t{-1176774698107529431, 2},\n\t{-1175526857543141289, 3},\n\t{-1174300356546669860, 1},\n\t{-1171152141296850958, 3},\n\t{-1169977416524822488, 0},\n\t{-1168846865328009227, 1},\n\t{-1163257714797357812, 2},\n\t{-1160243713204167242, 1},\n\t{-1158639737153591325, 2},\n\t{-1157036866770926600, 0},\n\t{-1155232900193319944, 1},\n\t{-1152967204838276266, 3},\n\t{-1152868737732334136, 0},\n\t{-1152802578545715236, 0},\n\t{-1152684150090300048, 0},\n\t{-1152359548242977740, 0},\n\t{-1150686407875479255, 1},\n\t{-1150296582975165507, 2},\n\t{-1150264431439526532, 2},\n\t{-1150030117026759518, 2},\n\t{-1147316199121276500, 0},\n\t{-1145217570441118699, 2},\n\t{-1145074827508615066, 2},\n\t{-1144859008037402187, 3},\n\t{-1143261189509486067, 0},\n\t{-1142560685692396912, 1},\n\t{-1140740218452445296, 2},\n\t{-1140471338776748020, 3},\n\t{-1139474671028372331, 3},\n\t{-1139266097077022096, 0},\n\t{-1138484053216818838, 0},\n\t{-1137014555010663959, 2},\n\t{-1136979698434165329, 2},\n\t{-1128109384607248838, 2},\n\t{-1127717459979696323, 2},\n\t{-1126530448459170425, 3},\n\t{-1125328110991572224, 0},\n\t{-1123927515042473937, 1},\n\t{-1123278971731887438, 2},\n\t{-1123009921528778662, 2},\n\t{-1118693171689001704, 2},\n\t{-1117206558756077661, 3},\n\t{-1116797137764417425, 0},\n\t{-1116663361230920869, 0},\n\t{-1113158324599555738, 3},\n\t{-1111781222046260423, 0},\n\t{-1108304800518597836, 3},\n\t{-1107066522854398543, 0},\n\t{-1101625406492723498, 1},\n\t{-1100656754750990607, 2},\n\t{-1098224416273206884, 0},\n\t{-1097301031065938614, 1},\n\t{-1094942599387816303, 3},\n\t{-1093945354052422448, 0},\n\t{-1093929716827351380, 0},\n\t{-1091068625720540039, 2},\n\t{-1090707405519744506, 3},\n\t{-1089289024529580524, 0},\n\t{-1087995967921116164, 1},\n\t{-1087275644534610604, 2},\n\t{-1087242979198940329, 2},\n\t{-1085614910931006866, 3},\n\t{-1075385491503010818, 0},\n\t{-1074668554203872265, 1},\n\t{-1073235046345540556, 2},\n\t{-1070423312756455567, 1},\n\t{-1069673373482873561, 1},\n\t{-1068100500782301346, 3},\n\t{-1066510686810337201, 0},\n\t{-1063735478041201793, 3},\n\t{-1060698767693734800, 1},\n\t{-1059596041075332457, 2},\n\t{-1058218048005603894, 0},\n\t{-1051854066379282369, 1},\n\t{-1051251684367976510, 2},\n\t{-1051150292398920338, 2},\n\t{-1051002128331815853, 2},\n\t{-1050432403394780831, 3},\n\t{-1048441364038490135, 0},\n\t{-1046018898404202211, 2},\n\t{-1045852427070619530, 3},\n\t{-1045253478913356452, 3},\n\t{-1043490112080498771, 1},\n\t{-1040506675782319462, 3},\n\t{-1039555660142716775, 0},\n\t{-1039365871498463897, 0},\n\t{-1038507094029118666, 1},\n\t{-1024137151284929107, 2},\n\t{-1021341092316011443, 0},\n\t{-1020699409736068844, 1},\n\t{-1020185170929668224, 1},\n\t{-1017177323443070327, 0},\n\t{-1014646946621792220, 2},\n\t{-1012382214490360397, 0},\n\t{-1011781922916624888, 1},\n\t{-1010362432658746493, 2},\n\t{-1009453752036546769, 3},\n\t{-1009087513563874359, 3},\n\t{-1005257144232960511, 3},\n\t{-1004665102592750919, 3},\n\t{-1003536152561585952, 0},\n\t{-1000141238090522809, 3},\n\t{-997995573337511703, 1},\n\t{-995019871775582865, 0},\n\t{-994062941789339149, 1},\n\t{-993222845684429133, 1},\n\t{-992653181499890078, 2},\n\t{-987599969184458926, 2},\n\t{-986399483459998572, 3},\n\t{-986382211788141931, 3},\n\t{-983253585597138587, 2},\n\t{-981129884115140304, 0},\n\t{-978888221685878702, 2},\n\t{-977683012077508982, 3},\n\t{-977080948432618714, 0},\n\t{-977050006895902886, 0},\n\t{-972876779519100960, 3},\n\t{-971638704204878139, 1},\n\t{-971530851882327796, 1},\n\t{-971456383110996773, 1},\n\t{-969239839137818859, 3},\n\t{-967825744430733346, 0},\n\t{-967728501516303499, 0},\n\t{-967425539375673921, 0},\n\t{-963754516077969798, 0},\n\t{-962762924286725255, 0},\n\t{-962651811879179709, 0},\n\t{-961907766411954570, 1},\n\t{-960419142166668073, 2},\n\t{-959014489259360762, 0},\n\t{-958738910333608519, 0},\n\t{-956512461201067389, 2},\n\t{-952693488331882972, 1},\n\t{-951579551112524673, 2},\n\t{-950590931691416163, 3},\n\t{-948815683450387798, 1},\n\t{-948500683717338961, 1},\n\t{-946272134678290441, 3},\n\t{-944331205686484850, 1},\n\t{-941228152756570819, 0},\n\t{-940498784430490362, 0},\n\t{-940141377166661318, 0},\n\t{-937512914028603620, 3},\n\t{-937018491241707600, 3},\n\t{-936260470151143969, 0},\n\t{-932830596804011644, 3},\n\t{-931589898911148069, 0},\n\t{-924415414736346231, 2},\n\t{-920727616950795670, 2},\n\t{-920140388996040072, 2},\n\t{-918608439276856107, 0},\n\t{-915409309132928352, 2},\n\t{-915291862740106742, 3},\n\t{-914273139509149148, 3},\n\t{-913828857994003705, 0},\n\t{-913422721190430947, 0},\n\t{-913214085702998812, 0},\n\t{-908823184859081942, 0},\n\t{-907860667411174299, 1},\n\t{-907117004909382785, 2},\n\t{-905477140002459244, 3},\n\t{-903719116778878193, 1},\n\t{-899419123396695724, 1},\n\t{-899009618672977627, 1},\n\t{-895346832758020062, 0},\n\t{-891642641951171181, 0},\n\t{-891605919111554641, 0},\n\t{-891501614146508201, 0},\n\t{-886519233923207175, 0},\n\t{-886127866988687889, 0},\n\t{-884591073667069665, 2},\n\t{-881764523398330524, 0},\n\t{-877365872829187835, 0},\n\t{-876715568781609829, 1},\n\t{-875648607130754806, 2},\n\t{-875108816988003375, 2},\n\t{-872578781267656666, 0},\n\t{-872137386690888106, 1},\n\t{-871681486727994380, 1},\n\t{-871456056532746440, 1},\n\t{-868951014232923967, 0},\n\t{-862064264817027743, 2},\n\t{-857775020482326402, 2},\n\t{-855651265983115465, 0},\n\t{-851358961765885186, 3},\n\t{-850797503307583747, 0},\n\t{-850276587889890853, 0},\n\t{-849718435948130611, 1},\n\t{-848074892985352431, 2},\n\t{-847975243000832844, 2},\n\t{-844047641076089562, 2},\n\t{-843297192462526062, 3},\n\t{-841388159977631827, 0},\n\t{-841088622394001755, 0},\n\t{-841006501537323183, 1},\n\t{-840406953743301563, 1},\n\t{-839839195792572660, 2},\n\t{-839168098500359551, 2},\n\t{-835292887720171328, 2},\n\t{-830789226864780038, 2},\n\t{-830498671011261396, 2},\n\t{-828106487136060559, 0},\n\t{-827749382739086071, 0},\n\t{-825347295361382392, 2},\n\t{-823087301619710795, 0},\n\t{-821676159540573804, 2},\n\t{-818771859446853983, 0},\n\t{-814151344242355196, 0},\n\t{-814005593591987023, 1},\n\t{-813342644116700931, 1},\n\t{-807207866519130243, 3},\n\t{-805822858464796655, 0},\n\t{-805214755764999948, 0},\n\t{-804617920795327793, 1},\n\t{-802693145930005592, 3},\n\t{-801981287171221196, 3},\n\t{-801757062392514851, 3},\n\t{-795543541632209843, 1},\n\t{-795416522810450163, 1},\n\t{-793247799558849163, 3},\n\t{-792965992507521994, 3},\n\t{-790224977923007852, 2},\n\t{-790102614526281936, 2},\n\t{-789936867880901767, 2},\n\t{-789010614468653769, 3},\n\t{-787891129623668480, 0},\n\t{-784992424788800285, 2},\n\t{-782349593170855666, 1},\n\t{-781575004087050824, 1},\n\t{-779477504752744724, 3},\n\t{-775470887151333436, 3},\n\t{-774104534141819447, 0},\n\t{-770427949396936848, 3},\n\t{-769801292173358406, 0},\n\t{-769759231140445305, 0},\n\t{-769141004179211242, 0},\n\t{-766250072124318694, 3},\n\t{-765172561144749439, 0},\n\t{-765051748074516957, 0},\n\t{-762859989423626158, 2},\n\t{-761979729736011117, 3},\n\t{-758103253018524508, 2},\n\t{-756895270274011853, 3},\n\t{-753650760793606515, 2},\n\t{-753040700087578141, 3},\n\t{-753036260583658578, 3},\n\t{-752769468622619709, 3},\n\t{-751720132915383628, 0},\n\t{-749221757880928815, 2},\n\t{-747370348387618091, 0},\n\t{-743510257069913444, 3},\n\t{-741972051330918179, 0},\n\t{-732777874819585469, 1},\n\t{-732527415795266136, 1},\n\t{-728908313856330296, 0},\n\t{-726325844279290769, 2},\n\t{-726104670247783274, 3},\n\t{-725916525071468705, 3},\n\t{-721080886055739804, 3},\n\t{-716173786562910616, 3},\n\t{-711418142795894312, 0},\n\t{-708707312265689755, 2},\n\t{-708259336741294331, 2},\n\t{-708209601443501034, 2},\n\t{-707413840356596211, 3},\n\t{-704264816257841785, 2},\n\t{-700501842376859299, 1},\n\t{-699924121618666580, 2},\n\t{-699173510278521294, 3},\n\t{-694928025106217093, 2},\n\t{-689186831618837430, 3},\n\t{-687133535904964464, 1},\n\t{-686933357352386113, 1},\n\t{-680274134582167252, 3},\n\t{-679246917476623699, 0},\n\t{-678118811503123850, 1},\n\t{-677177035019724642, 2},\n\t{-676979480903665150, 2},\n\t{-676033229760784567, 3},\n\t{-674043833559041960, 1},\n\t{-669793974453348022, 1},\n\t{-667044712082598034, 3},\n\t{-666762408152965524, 3},\n\t{-665993192882545941, 0},\n\t{-663289209930603601, 2},\n\t{-660937286264411051, 0},\n\t{-658584783947766860, 3},\n\t{-658286583263627520, 3},\n\t{-655954128371037376, 1},\n\t{-655494942366869739, 1},\n\t{-654522755499703383, 2},\n\t{-651988276856973182, 0},\n\t{-645865271396655468, 2},\n\t{-640094885109482048, 3},\n\t{-638192256750016889, 1},\n\t{-638030128936957155, 1},\n\t{-637791841615950901, 1},\n\t{-629173266594458701, 1},\n\t{-629051840281723418, 1},\n\t{-628872808572439065, 1},\n\t{-628405696243610091, 1},\n\t{-626765933131104163, 3},\n\t{-626149818514139261, 3},\n\t{-625777811831726968, 0},\n\t{-625739085672544325, 0},\n\t{-618704114558502395, 2},\n\t{-617955489350255873, 3},\n\t{-617504493190557679, 3},\n\t{-614050578974305642, 2},\n\t{-606091791930651899, 1},\n\t{-602186949834657433, 1},\n\t{-600915681010340203, 2},\n\t{-598528765384049157, 0},\n\t{-595726228236097271, 2},\n\t{-594748086612544774, 3},\n\t{-594489435270597066, 3},\n\t{-593130964790886442, 1},\n\t{-590775394569444171, 3},\n\t{-589026533038643231, 0},\n\t{-588965009055505994, 0},\n\t{-587650125902095591, 2},\n\t{-586722688440250625, 2},\n\t{-586708867127744681, 2},\n\t{-586514475303424365, 3},\n\t{-586204668459208778, 3},\n\t{-585096967734484344, 0},\n\t{-583906160172242099, 1},\n\t{-582036368839819185, 3},\n\t{-579181055705552183, 1},\n\t{-578449113031338860, 2},\n\t{-577998860602676434, 2},\n\t{-577750678648786787, 2},\n\t{-574205306189701593, 2},\n\t{-571083669644024950, 0},\n\t{-569902880434305106, 1},\n\t{-567721443821300591, 3},\n\t{-567500972540005687, 3},\n\t{-567015540306850344, 0},\n\t{-566350456914480747, 0},\n\t{-565220067356568000, 1},\n\t{-562983565687368755, 3},\n\t{-561188847121447964, 1},\n\t{-560142088855490684, 2},\n\t{-560019466163975205, 2},\n\t{-557644782841535115, 0},\n\t{-556003840926558196, 2},\n\t{-555639950092640668, 2},\n\t{-555628050309182846, 2},\n\t{-554326076964927066, 3},\n\t{-552069022449147648, 1},\n\t{-548798741259206860, 0},\n\t{-547118646773306259, 2},\n\t{-541640602847650161, 2},\n\t{-540363744289044139, 0},\n\t{-540236694393729254, 0},\n\t{-536097879657316840, 3},\n\t{-534399299648268268, 1},\n\t{-532715588273947737, 2},\n\t{-532157493440787893, 3},\n\t{-529580886942148797, 1},\n\t{-529399673591367197, 1},\n\t{-526974151612197803, 3},\n\t{-521900291291646808, 0},\n\t{-521819338262241021, 0},\n\t{-521777273466472192, 0},\n\t{-521590533477234680, 0},\n\t{-519421720958313954, 2},\n\t{-517610583928040616, 0},\n\t{-514030935887180362, 3},\n\t{-513040944932576616, 0},\n\t{-512661081567630200, 0},\n\t{-510936796703106027, 2},\n\t{-510436188550830240, 2},\n\t{-507209922248318590, 1},\n\t{-501816739816444981, 2},\n\t{-499795352490996691, 0},\n\t{-497772499581118605, 1},\n\t{-495750013236888658, 3},\n\t{-494552530806590575, 0},\n\t{-490620724022037065, 0},\n\t{-489368915436843794, 1},\n\t{-487802567388282187, 2},\n\t{-485840197008653553, 0},\n\t{-483219462271033024, 2},\n\t{-481470925307898641, 0},\n\t{-480316717575630103, 1},\n\t{-476875037956864526, 0},\n\t{-476855407287041063, 0},\n\t{-475599560221382704, 1},\n\t{-475360359861770502, 1},\n\t{-474665027162709122, 2},\n\t{-473271248998237036, 3},\n\t{-473169797920193246, 3},\n\t{-472599117440972782, 0},\n\t{-469494751331826278, 3},\n\t{-469085281818431464, 3},\n\t{-468596155884519221, 3},\n\t{-467329399381280503, 0},\n\t{-460810142657282082, 2},\n\t{-457048210915036106, 2},\n\t{-452354747180591056, 2},\n\t{-452071264969598975, 2},\n\t{-450421382449634808, 3},\n\t{-448160776526584579, 1},\n\t{-446067958794031360, 3},\n\t{-444163390544203835, 1},\n\t{-443238259674941121, 2},\n\t{-438107416680541341, 2},\n\t{-436558849784123331, 0},\n\t{-433318055686475087, 3},\n\t{-430472767797237460, 1},\n\t{-430047833052471868, 2},\n\t{-429951509704472173, 2},\n\t{-427611592361889162, 0},\n\t{-426525328389663639, 1},\n\t{-425670994943984117, 1},\n\t{-423758837370222832, 3},\n\t{-422831163432379469, 0},\n\t{-420356808984731478, 2},\n\t{-415152776690618958, 3},\n\t{-412491629245583811, 1},\n\t{-412411955806795936, 1},\n\t{-409194516914747059, 0},\n\t{-408054142151004596, 1},\n\t{-405916381053817988, 3},\n\t{-402680400405748153, 2},\n\t{-402294563122108593, 2},\n\t{-402179730513070908, 2},\n\t{-401317018934295792, 3},\n\t{-401120271507893491, 3},\n\t{-400201166720684612, 0},\n\t{-398507410050414140, 2},\n\t{-394794716949370121, 1},\n\t{-393986612633286205, 2},\n\t{-393355342249438647, 2},\n\t{-392993602474464002, 2},\n\t{-392906005314377954, 3},\n\t{-388985867040381446, 2},\n\t{-383211370617934898, 3},\n\t{-382521476925762340, 0},\n\t{-382423754079888716, 0},\n\t{-376103656281112226, 1},\n\t{-373734314020955645, 0},\n\t{-371766073836396390, 1},\n\t{-371753564795024950, 1},\n\t{-367578753006445182, 1},\n\t{-364396238653707811, 0},\n\t{-364248120747934747, 0},\n\t{-362809164837844933, 1},\n\t{-361653515061631252, 2},\n\t{-361108568338612868, 3},\n\t{-358351927013351817, 1},\n\t{-355963252458851750, 3},\n\t{-354222795473148829, 1},\n\t{-351461639432972239, 3},\n\t{-349813745284870510, 1},\n\t{-343171624986920150, 3},\n\t{-340305343239490768, 1},\n\t{-339853116163933114, 2},\n\t{-338274602863681392, 3},\n\t{-337947739068872473, 3},\n\t{-336821261629352001, 0},\n\t{-335468982235920052, 2},\n\t{-333748092272552657, 3},\n\t{-332161111509311172, 0},\n\t{-330504950989170206, 2},\n\t{-329656477330667493, 3},\n\t{-327470139832544282, 1},\n\t{-326799867474199465, 1},\n\t{-321291470808127794, 2},\n\t{-320486708517407841, 3},\n\t{-317801693422775921, 1},\n\t{-314172743928834217, 0},\n\t{-314145463593925842, 0},\n\t{-313250437698246935, 1},\n\t{-309089415574316096, 1},\n\t{-307306264328335331, 3},\n\t{-307198390607013151, 3},\n\t{-307141340924384126, 3},\n\t{-304015893526921200, 1},\n\t{-301740446419748943, 0},\n\t{-299794238108725721, 1},\n\t{-299578168054249600, 1},\n\t{-299139383163409969, 2},\n\t{-299018411687511723, 2},\n\t{-294416114236761611, 2},\n\t{-294241495009912632, 2},\n\t{-290496769689193011, 1},\n\t{-290172184000345249, 2},\n\t{-288785721342621054, 3},\n\t{-283860555331182376, 3},\n\t{-282102785892339796, 1},\n\t{-280730676104291787, 2},\n\t{-277889098791410023, 1},\n\t{-272579376031201559, 1},\n\t{-270944517927142521, 3},\n\t{-268040845003362199, 1},\n\t{-267907505272959767, 2},\n\t{-266142010797821901, 3},\n\t{-264999559122421420, 0},\n\t{-264495840778440365, 1},\n\t{-263372665235036844, 2},\n\t{-261555233229301282, 3},\n\t{-259253317087742609, 1},\n\t{-250345527471428046, 1},\n\t{-249463868468920988, 2},\n\t{-247365580122209507, 0},\n\t{-245906337401114133, 1},\n\t{-245847644647976388, 1},\n\t{-237421649820761541, 1},\n\t{-236103314090079636, 2},\n\t{-234003737024208362, 0},\n\t{-232564882469286066, 1},\n\t{-232553029963256329, 1},\n\t{-229732231136376743, 3},\n\t{-227165961174207581, 2},\n\t{-225940683867680844, 3},\n\t{-223231591414754686, 1},\n\t{-220858213162678124, 3},\n\t{-220853209766062543, 3},\n\t{-218814151780565259, 1},\n\t{-217415483528589551, 2},\n\t{-215019501764532499, 1},\n\t{-212800859907387909, 2},\n\t{-211316482699402036, 0},\n\t{-209838200620880590, 1},\n\t{-206047194985799654, 0},\n\t{-205688478318626438, 1},\n\t{-204797643058289232, 2},\n\t{-204723507707766497, 2},\n\t{-204509901656493691, 2},\n\t{-203815543341826767, 2},\n\t{-201560454886547082, 0},\n\t{-201395833392055979, 1},\n\t{-200187547955074750, 2},\n\t{-198697659860506397, 3},\n\t{-198213893087575061, 3},\n\t{-195609007205124389, 2},\n\t{-194502032274121086, 3},\n\t{-193870337344881833, 3},\n\t{-192780734242323592, 0},\n\t{-191823931213441261, 1},\n\t{-190926944869205505, 2},\n\t{-188723776699310893, 0},\n\t{-187892840754218842, 1},\n\t{-187666886727938348, 1},\n\t{-185642768172442400, 3},\n\t{-182931870224192662, 1},\n\t{-182613927073960835, 1},\n\t{-178023573015689523, 1},\n\t{-177442369411702452, 2},\n\t{-175800235966354569, 3},\n\t{-174250492565661475, 1},\n\t{-172507369656689546, 2},\n\t{-171426858473823448, 3},\n\t{-169724297451530417, 1},\n\t{-169621763899538875, 1},\n\t{-169463994805073541, 1},\n\t{-164493060619583776, 1},\n\t{-164314379825977151, 2},\n\t{-163105021764425780, 3},\n\t{-162148505303414039, 3},\n\t{-161680126255521039, 0},\n\t{-161377740959848933, 0},\n\t{-160842573018558141, 1},\n\t{-160549406095001530, 1},\n\t{-159125751571007457, 2},\n\t{-158282582276624510, 3},\n\t{-157864933296445077, 3},\n\t{-156931981352873519, 0},\n\t{-156169512602506618, 1},\n\t{-155711526586381950, 1},\n\t{-153013985105339652, 0},\n\t{-152841809345263249, 0},\n\t{-152545681642528496, 0},\n\t{-152468721982189271, 0},\n\t{-151612077042964049, 1},\n\t{-149699492631760347, 3},\n\t{-145240311186288542, 3},\n\t{-137875643124074655, 1},\n\t{-136823741788358717, 2},\n\t{-133267543280976868, 1},\n\t{-132716169076251081, 2},\n\t{-131760647534116592, 2},\n\t{-131605007050047232, 3},\n\t{-131583060548710765, 3},\n\t{-131420798435651764, 3},\n\t{-130657697238337884, 3},\n\t{-129438970951617485, 1},\n\t{-126551225829717195, 3},\n\t{-123764423966618857, 2},\n\t{-123463349719919011, 2},\n\t{-120526517138463402, 0},\n\t{-119559634183805982, 1},\n\t{-119312023456581357, 2},\n\t{-119018545082410367, 2},\n\t{-112095669382329211, 0},\n\t{-110358033321145423, 1},\n\t{-107892908511118517, 0},\n\t{-104219854386612346, 3},\n\t{-101498572469197163, 1},\n\t{-100951912144139858, 2},\n\t{-99139936086429966, 3},\n\t{-94995291919087899, 3},\n\t{-88711943388874637, 1},\n\t{-83668165339614550, 1},\n\t{-82371603702022797, 2},\n\t{-80304589910404741, 0},\n\t{-80175028528078733, 0},\n\t{-80020596677198829, 0},\n\t{-75944638922506117, 0},\n\t{-70499300361242469, 1},\n\t{-70487716349055786, 1},\n\t{-69769017131629845, 2},\n\t{-68557372089363563, 3},\n\t{-67678228637283364, 3},\n\t{-66685435314917788, 0},\n\t{-65983712739073161, 1},\n\t{-65667333597769575, 1},\n\t{-63148605363145979, 3},\n\t{-61144679699915150, 1},\n\t{-61096754689315132, 1},\n\t{-59693224507000408, 2},\n\t{-59073855366602705, 3},\n\t{-57865695274931102, 0},\n\t{-56208138632121421, 2},\n\t{-55283652474376003, 2},\n\t{-52328287156369476, 1},\n\t{-52147832854641180, 1},\n\t{-51902409972188429, 1},\n\t{-50340608507081554, 3},\n\t{-49827275952989029, 3},\n\t{-47620112558228260, 1},\n\t{-41709503107598606, 2},\n\t{-38949378385075562, 1},\n\t{-30772445739016921, 0},\n\t{-24055876245573933, 2},\n\t{-23787344794631260, 2},\n\t{-20011310888904549, 2},\n\t{-19801451591297434, 2},\n\t{-19388754982815970, 2},\n\t{-18291953055118581, 3},\n\t{-9772655057286938, 3},\n\t{-5233917887925356, 3},\n\t{-3790811379916698, 0},\n\t{-1161327067926650, 2},\n\t{337365398009672, 0},\n\t{1835083890227297, 1},\n\t{3479365334002559, 3},\n\t{3620624420154906, 3},\n\t{5016580628498636, 0},\n\t{5947199420413706, 1},\n\t{12406422862851764, 3},\n\t{13457394691850640, 3},\n\t{13759266764875114, 0},\n\t{16943233056723959, 3},\n\t{17811771696258454, 3},\n\t{18297061267893592, 0},\n\t{19122717499513976, 0},\n\t{19313208863719195, 1},\n\t{21288269071442912, 2},\n\t{22158555735764685, 3},\n\t{22876217507758888, 0},\n\t{24132517986348450, 1},\n\t{27516514484999216, 0},\n\t{36270947124910996, 0},\n\t{37871400534593637, 1},\n\t{38721528907814858, 2},\n\t{42003086096185126, 1},\n\t{42368431559075436, 1},\n\t{45472263415666160, 0},\n\t{50499560488193998, 0},\n\t{52886549432698387, 2},\n\t{54314460065931021, 0},\n\t{58150087335711583, 3},\n\t{58572384606175640, 0},\n\t{65693968761035428, 2},\n\t{65775041077610528, 2},\n\t{67458091619110080, 3},\n\t{69293445680159260, 1},\n\t{71942130426308810, 3},\n\t{73391310423337311, 1},\n\t{73831204758445523, 1},\n\t{74080918672923083, 1},\n\t{74811359757052203, 2},\n\t{75509257701993107, 3},\n\t{75622999261034037, 3},\n\t{76424973408800670, 3},\n\t{77227795434592890, 0},\n\t{77347453074835314, 0},\n\t{78622928595414301, 1},\n\t{80837329384614668, 3},\n\t{84934713532593143, 3},\n\t{85712118963198663, 0},\n\t{85798179858718218, 0},\n\t{86634496166944612, 0},\n\t{87007479623289741, 1},\n\t{88033852397144842, 2},\n\t{89765411247192320, 3},\n\t{90526299466570760, 0},\n\t{91098822676962048, 0},\n\t{92043614588135058, 1},\n\t{97541202511934322, 2},\n\t{100138151257549750, 0},\n\t{101131555154966248, 1},\n\t{101271567986559024, 1},\n\t{102358211025551975, 2},\n\t{103188636542860506, 3},\n\t{103233761839887273, 3},\n\t{105215639123603095, 1},\n\t{105247898612236993, 1},\n\t{109334461514774551, 1},\n\t{113940269966747855, 1},\n\t{114981392801355984, 2},\n\t{115789089383228434, 2},\n\t{117025225880114358, 3},\n\t{120330458503131262, 2},\n\t{123082950455315200, 1},\n\t{123130503269026471, 1},\n\t{123455633768048520, 1},\n\t{123487307600089941, 1},\n\t{124480779319895711, 2},\n\t{124544085092507842, 2},\n\t{125302152732348184, 3},\n\t{127312691055572717, 1},\n\t{127978711203399929, 1},\n\t{129686417761587636, 3},\n\t{130060335428695763, 3},\n\t{133197585235411031, 2},\n\t{133276733357110320, 2},\n\t{133890687739758641, 2},\n\t{134082388779593758, 3},\n\t{136525485375924584, 1},\n\t{137711180325059355, 2},\n\t{137852534869027802, 2},\n\t{138758511575589621, 3},\n\t{143927313191213067, 3},\n\t{145566948387415811, 1},\n\t{146043500758882627, 1},\n\t{146239434715307010, 1},\n\t{147656975457446594, 3},\n\t{147924765446833171, 3},\n\t{148622882929551227, 0},\n\t{153094423748990272, 3},\n\t{155376204296061914, 2},\n\t{156113834385102974, 2},\n\t{156692738160586768, 3},\n\t{158452049235133439, 0},\n\t{159238969879886275, 1},\n\t{159474651983725180, 1},\n\t{161593434730501659, 3},\n\t{162357121275283036, 0},\n\t{163014677410648780, 0},\n\t{165609063072009543, 3},\n\t{165683438545279851, 3},\n\t{167331385283237955, 0},\n\t{167384008766142825, 0},\n\t{169664180767084950, 2},\n\t{170638000774456803, 3},\n\t{171312239643798338, 0},\n\t{171913194746292340, 0},\n\t{175173259055422937, 3},\n\t{180406214755895441, 0},\n\t{180763204734487516, 0},\n\t{181715623314219477, 1},\n\t{183531592189566333, 3},\n\t{183617125152929884, 3},\n\t{185117872141862078, 0},\n\t{186994595414036126, 2},\n\t{188561368309958610, 3},\n\t{193906087462276248, 0},\n\t{194685332682032855, 0},\n\t{196764857437706914, 2},\n\t{197286788100342599, 3},\n\t{197927461559872336, 3},\n\t{201156120159567349, 2},\n\t{205781740550325879, 2},\n\t{207407626016646402, 0},\n\t{211442421345845297, 3},\n\t{213056095944584527, 1},\n\t{214590221976256938, 2},\n\t{216447722602413405, 0},\n\t{217379821904314596, 1},\n\t{217396611170985405, 1},\n\t{218469539110662847, 2},\n\t{219452310774567477, 2},\n\t{224041321812828923, 2},\n\t{224325018177633051, 3},\n\t{224864726707847341, 3},\n\t{228147416769297532, 2},\n\t{230403592908769907, 0},\n\t{230910819710191369, 1},\n\t{232292276743311520, 2},\n\t{234873315809482405, 0},\n\t{236048318327160595, 1},\n\t{238108428351258060, 3},\n\t{238597963220579951, 3},\n\t{239349792036227692, 0},\n\t{239363709735580268, 0},\n\t{240118175049334032, 1},\n\t{240183135090482759, 1},\n\t{240818536923214964, 1},\n\t{242398233326733860, 3},\n\t{244051970029844984, 0},\n\t{244653387233514407, 1},\n\t{245785646003188762, 2},\n\t{245997232532844693, 2},\n\t{251407823474830080, 3},\n\t{253520698967646545, 1},\n\t{254927734982673366, 2},\n\t{256155467344147576, 3},\n\t{258103410624047043, 1},\n\t{261762849724297777, 0},\n\t{261800098297632583, 0},\n\t{266525457107874383, 0},\n\t{266715605587420697, 0},\n\t{266926087904139703, 1},\n\t{267095478455447841, 1},\n\t{270358505600711663, 0},\n\t{272185316158276275, 1},\n\t{272254647515368888, 1},\n\t{273803672914640681, 3},\n\t{274365244835760293, 3},\n\t{276174609308984620, 1},\n\t{276672097945894600, 1},\n\t{278615615452077094, 3},\n\t{280306586670564258, 0},\n\t{282088369226218327, 2},\n\t{284662740045013911, 0},\n\t{285441240227703147, 1},\n\t{288867677693729617, 0},\n\t{289228213040561409, 0},\n\t{291516313259642017, 2},\n\t{291846502005767527, 3},\n\t{296190498504846331, 3},\n\t{303374530070102574, 1},\n\t{304417433989457422, 2},\n\t{304553619725871452, 2},\n\t{306541890774176457, 0},\n\t{306606584236184948, 0},\n\t{309995557562527642, 3},\n\t{315608229279517988, 0},\n\t{319559866097311520, 3},\n\t{319631962641262886, 3},\n\t{321596525596966368, 1},\n\t{322230631275722738, 2},\n\t{323500563420915007, 3},\n\t{333253501526819559, 3},\n\t{336966636538642288, 3},\n\t{338322513685889094, 0},\n\t{340815447648883515, 2},\n\t{342808204456495847, 0},\n\t{344363442083183372, 1},\n\t{346996834423003137, 0},\n\t{352050173411363008, 0},\n\t{354396912301694851, 2},\n\t{355987470783715289, 0},\n\t{358981084325456297, 2},\n\t{359296461283412462, 3},\n\t{362307894109184756, 1},\n\t{362622696874572589, 2},\n\t{364857249575627077, 0},\n\t{366923925676297763, 1},\n\t{368039661304060109, 2},\n\t{369660033030395664, 0},\n\t{373111689511787776, 3},\n\t{374168726287392416, 0},\n\t{374675543762855577, 0},\n\t{374749841501219463, 0},\n\t{374876330850033157, 0},\n\t{375480926451248840, 1},\n\t{380589540610900499, 2},\n\t{380891117707254721, 2},\n\t{384578381819882997, 1},\n\t{386951757158120423, 3},\n\t{390895102354125171, 3},\n\t{391503694742400100, 3},\n\t{392399536927565061, 0},\n\t{392581604898486237, 0},\n\t{393456580623424521, 1},\n\t{396061521001819786, 3},\n\t{397073904761201461, 0},\n\t{401505357222421447, 0},\n\t{402070157707995986, 1},\n\t{404421684825651952, 3},\n\t{404868283939984213, 3},\n\t{410252855556188539, 0},\n\t{415539346022180878, 1},\n\t{416032636133761818, 1},\n\t{416720033476621147, 2},\n\t{419686382925774771, 0},\n\t{421035787703398002, 1},\n\t{423821949166429788, 0},\n\t{425257284077656004, 1},\n\t{425787377679108388, 2},\n\t{426431974320414067, 2},\n\t{427565442753143553, 3},\n\t{428534378636662958, 0},\n\t{430098637932406443, 2},\n\t{432683879540471111, 0},\n\t{439696993716723735, 2},\n\t{440823819967060343, 3},\n\t{441378230138224914, 0},\n\t{441411349356125535, 0},\n\t{441457730413500510, 0},\n\t{442191421312351195, 0},\n\t{446727068500127107, 0},\n\t{450474396458754002, 0},\n\t{451656144049885095, 1},\n\t{454405977678828412, 3},\n\t{463535568942521956, 3},\n\t{465735389253857123, 1},\n\t{466631422970127920, 2},\n\t{467870374922948736, 3},\n\t{468115237644725800, 3},\n\t{472369124048495753, 3},\n\t{478290019479375202, 0},\n\t{480892195976386872, 3},\n\t{484326175962445163, 2},\n\t{486365072260726313, 3},\n\t{487428300374279401, 0},\n\t{488441932932924225, 1},\n\t{490710029825349556, 3},\n\t{492056482150412530, 1},\n\t{492561297140740547, 1},\n\t{492661594913071611, 1},\n\t{496162732100763266, 0},\n\t{496776120299488581, 1},\n\t{496886168345737807, 1},\n\t{496944015270898995, 1},\n\t{500933203559990987, 0},\n\t{500944429247223971, 0},\n\t{501233423162925204, 1},\n\t{502032307740418321, 1},\n\t{504835160439230767, 0},\n\t{507177783332912449, 2},\n\t{509217252508558304, 0},\n\t{511469944185190158, 2},\n\t{516837030458121969, 3},\n\t{521925448109788045, 3},\n\t{523908475000446487, 1},\n\t{524951016726211808, 2},\n\t{528761500497662498, 1},\n\t{530386852072226610, 3},\n\t{531210891015723707, 3},\n\t{531381629741544799, 3},\n\t{531980235083892768, 0},\n\t{533683300186882498, 2},\n\t{536185626940071484, 0},\n\t{536311087107268072, 0},\n\t{539163924238980929, 2},\n\t{542062452577694166, 1},\n\t{543793928320028734, 2},\n\t{544294882063238804, 3},\n\t{544520562553653066, 3},\n\t{545586338841603045, 0},\n\t{547798002764098876, 2},\n\t{549084177158104777, 3},\n\t{549773321515881448, 0},\n\t{551694351184988551, 2},\n\t{552222230753806118, 2},\n\t{553862270397822915, 3},\n\t{554181802875127421, 0},\n\t{554372274652098251, 0},\n\t{554613564275538672, 0},\n\t{555666710752354453, 1},\n\t{555715937471231784, 1},\n\t{556751813715743981, 2},\n\t{557889252770278838, 3},\n\t{558888658462600978, 0},\n\t{560679865241411885, 1},\n\t{560821241126552884, 2},\n\t{560883684809180368, 2},\n\t{561230303704181607, 2},\n\t{561299513492190691, 2},\n\t{561885852477666278, 3},\n\t{562270698796260005, 3},\n\t{563097743169968319, 0},\n\t{566339162869987490, 3},\n\t{567423005556333805, 3},\n\t{569017886015882980, 1},\n\t{574668401802865252, 2},\n\t{576556559454682664, 0},\n\t{578869688591670360, 2},\n\t{579253224818210416, 2},\n\t{581292904207481109, 0},\n\t{583940302802572395, 2},\n\t{586447346893402641, 0},\n\t{588074243027949133, 2},\n\t{589185089630973955, 3},\n\t{589982625287611892, 0},\n\t{592246222838598113, 2},\n\t{594144723906925562, 3},\n\t{597441805673619566, 2},\n\t{598160347685025644, 3},\n\t{599132234851720624, 0},\n\t{600173457889012769, 1},\n\t{602543600201171412, 3},\n\t{603374183917318025, 3},\n\t{603659977879060406, 0},\n\t{605210368520521965, 1},\n\t{605775777190750153, 2},\n\t{610139844400468507, 1},\n\t{610826417599315750, 2},\n\t{611450649805029532, 3},\n\t{612538775579738495, 0},\n\t{612582765730350509, 0},\n\t{614277641753975970, 1},\n\t{614318399645036152, 1},\n\t{618794779997173932, 1},\n\t{620387221110690724, 3},\n\t{621485790692477322, 3},\n\t{621955212201632827, 0},\n\t{625473651597746964, 3},\n\t{625687413740554443, 3},\n\t{626636361892924637, 0},\n\t{628814986250236191, 2},\n\t{630159090784017938, 3},\n\t{631267881297488377, 0},\n\t{635276331565324742, 0},\n\t{637313904146438327, 2},\n\t{641485642622129968, 1},\n\t{642425508859726606, 2},\n\t{644060931085521154, 0},\n\t{646770974357644912, 2},\n\t{647305426889022548, 2},\n\t{656242490540023065, 2},\n\t{656391054161585752, 2},\n\t{656391515615580827, 2},\n\t{656526856410007710, 3},\n\t{657268804481801140, 3},\n\t{657311090456727485, 3},\n\t{662003116984943022, 3},\n\t{665340772538023648, 2},\n\t{666010218889008350, 3},\n\t{669956786553297627, 3},\n\t{671730026270554187, 0},\n\t{672030839275917387, 0},\n\t{673620386362313513, 2},\n\t{675056178048049936, 3},\n\t{675854453175001376, 0},\n\t{676687808166080642, 1},\n\t{678599629935822487, 2},\n\t{680445808036521720, 0},\n\t{680467331078527998, 0},\n\t{681967482214983606, 1},\n\t{683755129463380331, 3},\n\t{686398273273041108, 1},\n\t{690124000269978738, 0},\n\t{691252595381399909, 1},\n\t{692294179800897334, 2},\n\t{692444220637995183, 3},\n\t{692644109289117284, 3},\n\t{695168590355871183, 1},\n\t{698916452129699483, 0},\n\t{706461694073357290, 3},\n\t{707535734886170166, 0},\n\t{707711359274050554, 0},\n\t{709688946484886124, 2},\n\t{714400186386000571, 2},\n\t{715892538453103234, 3},\n\t{716342211815932418, 0},\n\t{717654477562387405, 1},\n\t{717690824093382499, 1},\n\t{718495496558931256, 2},\n\t{718719347157522770, 2},\n\t{719916909403588123, 3},\n\t{721411054070512211, 0},\n\t{721724488120173520, 1},\n\t{723201529057232872, 2},\n\t{723504070534086590, 2},\n\t{723631146530792730, 2},\n\t{725626380395340423, 0},\n\t{726651803167631282, 1},\n\t{727221375081964863, 1},\n\t{727449677177446247, 2},\n\t{738914743692549036, 0},\n\t{739463808082457414, 0},\n\t{745308808409863365, 1},\n\t{745519897470161955, 2},\n\t{745841945296493090, 2},\n\t{748623699537764331, 0},\n\t{749095217428427498, 1},\n\t{755642148242486948, 3},\n\t{755681272866141856, 3},\n\t{755726243276216906, 3},\n\t{757425635468859837, 0},\n\t{761253702785240269, 0},\n\t{763392550528597251, 2},\n\t{765708529370205869, 0},\n\t{766925528466831056, 1},\n\t{768783710055022593, 2},\n\t{772555732763092154, 2},\n\t{774599870418544994, 3},\n\t{774756648255550280, 0},\n\t{778985717569275446, 3},\n\t{779502023171210622, 0},\n\t{791075276130010146, 2},\n\t{792700662267082069, 0},\n\t{792907708367489662, 0},\n\t{793428309236441723, 0},\n\t{793815434919859989, 1},\n\t{795600785879462404, 2},\n\t{797294204554062394, 0},\n\t{797500947937199223, 0},\n\t{804391514991094773, 2},\n\t{804555761589970083, 2},\n\t{805356410022526770, 3},\n\t{811407746428888897, 0},\n\t{812814452165280173, 1},\n\t{813090728865908337, 2},\n\t{813818575717250876, 2},\n\t{814055479504359623, 3},\n\t{817684401878026413, 2},\n\t{819021066271244200, 3},\n\t{819025671360939567, 3},\n\t{819428459126520906, 3},\n\t{821149774937924658, 1},\n\t{824725906020094214, 0},\n\t{827601593006385334, 3},\n\t{828473548921635080, 3},\n\t{829354102130592378, 0},\n\t{835352941317790791, 1},\n\t{839256216425754304, 1},\n\t{839503776742338198, 1},\n\t{839509316896647706, 1},\n\t{840288121521900184, 2},\n\t{840602289338043168, 2},\n\t{841248540516677308, 3},\n\t{842319433657874318, 0},\n\t{843624166482924976, 1},\n\t{848900957253965811, 1},\n\t{848996694706215701, 2},\n\t{850624083803699649, 3},\n\t{851862610389870785, 0},\n\t{854477707945585227, 2},\n\t{857098812080930139, 1},\n\t{858271201847869287, 2},\n\t{858333992539229269, 2},\n\t{859220453935501236, 3},\n\t{859378934821172184, 3},\n\t{859958068469395300, 3},\n\t{860685480897895971, 0},\n\t{861106151214369387, 0},\n\t{861385654789185798, 1},\n\t{863783611178845441, 3},\n\t{864186408820718161, 3},\n\t{864861725479381028, 0},\n\t{875193440960042854, 1},\n\t{875443999704706258, 1},\n\t{876262774402417068, 2},\n\t{878521627235279817, 0},\n\t{879196032588393852, 0},\n\t{879582419694004114, 1},\n\t{881254706009257711, 2},\n\t{882227072856718050, 3},\n\t{882598261022221814, 3},\n\t{883386231004650614, 0},\n\t{883711822615720872, 0},\n\t{884169628483225217, 1},\n\t{885632651921176341, 2},\n\t{888545003464860710, 1},\n\t{892883629136550867, 1},\n\t{893248822015722249, 1},\n\t{895320277096324620, 3},\n\t{896171356496767050, 3},\n\t{899131368000361909, 2},\n\t{902133821765135673, 1},\n\t{902551065938994026, 1},\n\t{904600659399261346, 3},\n\t{905138511533056316, 3},\n\t{906237837296102071, 0},\n\t{908961080316058823, 3},\n\t{911259355609173590, 1},\n\t{915701087126056860, 1},\n\t{925712127334585174, 2},\n\t{925935340277953126, 2},\n\t{928489657013724568, 0},\n\t{929850752341248740, 1},\n\t{933205556752153021, 0},\n\t{933757557593860159, 1},\n\t{934547105018699582, 2},\n\t{936550465252300093, 3},\n\t{938094085229677744, 1},\n\t{941078103132403724, 3},\n\t{944202864752274667, 2},\n\t{946573670118938997, 0},\n\t{947280115192488894, 1},\n\t{948953595599416460, 2},\n\t{952687947479188423, 2},\n\t{953801513254440744, 3},\n\t{959084680228600613, 3},\n\t{961080277765178814, 1},\n\t{961392430845232914, 1},\n\t{962301735301218435, 2},\n\t{963606859868551133, 3},\n\t{963733347542747706, 3},\n\t{967509558281347036, 3},\n\t{969469723070082297, 1},\n\t{970659689666235934, 2},\n\t{971574222404564943, 2},\n\t{977688457852410587, 0},\n\t{977725620600488043, 0},\n\t{977891417952875348, 0},\n\t{978776852477553230, 1},\n\t{980812901908535790, 3},\n\t{982060729801511801, 0},\n\t{986123554861458148, 3},\n\t{988480590191737773, 1},\n\t{989690216748963637, 3},\n\t{989739120193804727, 3},\n\t{990211916120153389, 3},\n\t{990366869750361980, 3},\n\t{993067487641579939, 2},\n\t{993258725591061993, 2},\n\t{994251186392734587, 3},\n\t{996323108122612311, 0},\n\t{997094925100662137, 1},\n\t{998424394203610944, 2},\n\t{1002957240404445546, 2},\n\t{1004611925672609513, 0},\n\t{1005958133829829425, 1},\n\t{1015303344578987207, 1},\n\t{1015386475308003467, 1},\n\t{1022965241305208866, 0},\n\t{1024824968912746329, 2},\n\t{1024933666632532595, 2},\n\t{1025119456145766159, 2},\n\t{1025802827985629416, 3},\n\t{1028205664953556204, 1},\n\t{1028747619818027600, 1},\n\t{1029293419077591562, 2},\n\t{1032204017291942790, 0},\n\t{1032746349041337455, 1},\n\t{1040562332555460063, 0},\n\t{1041185087788456918, 0},\n\t{1050390365100800311, 0},\n\t{1050906963526943019, 1},\n\t{1054599031332574628, 0},\n\t{1055544259175965535, 1},\n\t{1058939659577574223, 0},\n\t{1061009742995815545, 2},\n\t{1061937175004805191, 3},\n\t{1062065604962796920, 3},\n\t{1063988680536248228, 1},\n\t{1064849522879585016, 1},\n\t{1065471006309631872, 2},\n\t{1065572022482167790, 2},\n\t{1067489266685813773, 0},\n\t{1068459445868032838, 0},\n\t{1068824419602182764, 1},\n\t{1069524643026181952, 1},\n\t{1070955674568086375, 3},\n\t{1075257707763902225, 3},\n\t{1075274683913189606, 3},\n\t{1078394678117926137, 1},\n\t{1080511552817548049, 3},\n\t{1080828374048567057, 3},\n\t{1083285357439896990, 2},\n\t{1085682992432990874, 0},\n\t{1089021699717885159, 3},\n\t{1095757044016364031, 1},\n\t{1096785435765046959, 2},\n\t{1096976902901982192, 2},\n\t{1097196481343585659, 2},\n\t{1098600186416161004, 3},\n\t{1098971261621454594, 0},\n\t{1101574533919586285, 2},\n\t{1103540907477374444, 0},\n\t{1104458717241815284, 0},\n\t{1105716337238831748, 2},\n\t{1107422145359961753, 3},\n\t{1109695895264635363, 1},\n\t{1109881179802283759, 1},\n\t{1112172449402580714, 3},\n\t{1112780658531095458, 0},\n\t{1112842949509741101, 0},\n\t{1112998305966503665, 0},\n\t{1113042365779287127, 0},\n\t{1113836218512251067, 1},\n\t{1114071050576470904, 1},\n\t{1123959651517445868, 2},\n\t{1125695354111055758, 3},\n\t{1126032946029330507, 0},\n\t{1126630237720891742, 0},\n\t{1129976329047212134, 3},\n\t{1133252146556133090, 2},\n\t{1140539915437060710, 1},\n\t{1143627388781178725, 3},\n\t{1144177852994490447, 0},\n\t{1145556519401723288, 1},\n\t{1147708699943533897, 3},\n\t{1149158270324655872, 0},\n\t{1149259869158207995, 0},\n\t{1151195231591349858, 2},\n\t{1153364818832711412, 0},\n\t{1155035678806979322, 1},\n\t{1155221942760407103, 2},\n\t{1156092594311720747, 2},\n\t{1161917366711740583, 3},\n\t{1162466388048355522, 0},\n\t{1162477377748463608, 0},\n\t{1165449813666723245, 3},\n\t{1166272088269700786, 3},\n\t{1167384217230804995, 0},\n\t{1171761989772819802, 0},\n\t{1172643630703075609, 1},\n\t{1178966281490397405, 3},\n\t{1182216014295731409, 2},\n\t{1189676940708782892, 0},\n\t{1190362085911399083, 1},\n\t{1190892817511656345, 1},\n\t{1190951433392755456, 1},\n\t{1191953785197365653, 2},\n\t{1195709260847563914, 2},\n\t{1198837813983177895, 0},\n\t{1200078427691073336, 1},\n\t{1200930084466958572, 2},\n\t{1205775714778668544, 2},\n\t{1206507629391066206, 3},\n\t{1207484797551178924, 0},\n\t{1208660885988600982, 1},\n\t{1210375749568901747, 3},\n\t{1210734825750224520, 3},\n\t{1215056712177857526, 3},\n\t{1215729586770795501, 3},\n\t{1217088958690918882, 0},\n\t{1217098229391488706, 1},\n\t{1217610351396086751, 1},\n\t{1218443577247217823, 2},\n\t{1219148923633096877, 2},\n\t{1221418521687777852, 0},\n\t{1222548357771775685, 1},\n\t{1224424985739225678, 3},\n\t{1228491195436101511, 3},\n\t{1228993611345388479, 3},\n\t{1236175304256914233, 1},\n\t{1237331744326455050, 2},\n\t{1239968328321956612, 1},\n\t{1239986683838370037, 1},\n\t{1241529130044755830, 2},\n\t{1242024406094218682, 3},\n\t{1243032970746089399, 0},\n\t{1247548208502798631, 0},\n\t{1250803680837412187, 2},\n\t{1251115187899783177, 3},\n\t{1251289280768314584, 3},\n\t{1255186395986322203, 2},\n\t{1255804769636616025, 3},\n\t{1257623871650212619, 0},\n\t{1259606906059702628, 2},\n\t{1260280508665885255, 3},\n\t{1263647334904245808, 2},\n\t{1271213949009556448, 1},\n\t{1272056078013317084, 1},\n\t{1272092814980824469, 1},\n\t{1273353301268581483, 2},\n\t{1275169597522355515, 0},\n\t{1278365862400192935, 3},\n\t{1279798977652505305, 0},\n\t{1282461421617204637, 3},\n\t{1282693600841221350, 3},\n\t{1283918961184995170, 0},\n\t{1286532647834460686, 2},\n\t{1287354075783632460, 3},\n\t{1288133411826919595, 0},\n\t{1289063085949241225, 0},\n\t{1293656137109978348, 0},\n\t{1293732643300419368, 1},\n\t{1296400053830805208, 3},\n\t{1296653902803072700, 3},\n\t{1298276198882856715, 1},\n\t{1300408848478546790, 2},\n\t{1302015108993484551, 0},\n\t{1304432326433248958, 2},\n\t{1304588447361659055, 2},\n\t{1306169389037231067, 0},\n\t{1307776333242619523, 1},\n\t{1308900095114529310, 2},\n\t{1312266611820928577, 1},\n\t{1318936579949486103, 3},\n\t{1321812264368916196, 2},\n\t{1324220586312640505, 0},\n\t{1325540251631451370, 1},\n\t{1327529584372033689, 3},\n\t{1328855678098796757, 0},\n\t{1329737504329581185, 1},\n\t{1335175570311062918, 1},\n\t{1339196388087145225, 1},\n\t{1342880564241768251, 0},\n\t{1345926424411966953, 3},\n\t{1347428924487193261, 0},\n\t{1347650998776027241, 0},\n\t{1348917497307860143, 2},\n\t{1350140107075203804, 3},\n\t{1352305920148433607, 1},\n\t{1353726696190194404, 2},\n\t{1355081412324984098, 3},\n\t{1358248941840879590, 2},\n\t{1364409949186090703, 3},\n\t{1367310795188030679, 2},\n\t{1368817758288194117, 3},\n\t{1370123731962541445, 0},\n\t{1371150172551586333, 1},\n\t{1372856580863141020, 3},\n\t{1373051972406704766, 3},\n\t{1373574806823150328, 3},\n\t{1378050135726228161, 3},\n\t{1380259432538718654, 1},\n\t{1381239029642314879, 2},\n\t{1382417967811923706, 3},\n\t{1384236866527101123, 1},\n\t{1384267543871591392, 1},\n\t{1387178854716121004, 0},\n\t{1387397682995301202, 0},\n\t{1394553221563288201, 2},\n\t{1398383378572317671, 2},\n\t{1399792476358252050, 3},\n\t{1401418393805019626, 0},\n\t{1402117131624322900, 1},\n\t{1402879261428322998, 2},\n\t{1403500865619565599, 2},\n\t{1404682894575373777, 3},\n\t{1404806749482079980, 3},\n\t{1406103043401372665, 0},\n\t{1409276309328636773, 3},\n\t{1410668021684646948, 0},\n\t{1413972876688782008, 3},\n\t{1414346690926109613, 0},\n\t{1414928731747542872, 0},\n\t{1416356195229226315, 1},\n\t{1416504293928159276, 2},\n\t{1417189303368880684, 2},\n\t{1420600548151686806, 1},\n\t{1424964360742966635, 1},\n\t{1427522872414722372, 3},\n\t{1431883904866309910, 3},\n\t{1434363926078047252, 1},\n\t{1436417281956555896, 3},\n\t{1441626700086218192, 0},\n\t{1442879627917665690, 1},\n\t{1443818553270615499, 2},\n\t{1444328484773338167, 2},\n\t{1445543524566241608, 3},\n\t{1450125052617487218, 3},\n\t{1451172824164603565, 0},\n\t{1451563952130535978, 1},\n\t{1451564548536605484, 1},\n\t{1454078886646757143, 3},\n\t{1454606448084823166, 3},\n\t{1455229561651469301, 0},\n\t{1455376008647936190, 0},\n\t{1457864801814536524, 2},\n\t{1458260161054384761, 3},\n\t{1460974090967837316, 1},\n\t{1462482973820468724, 2},\n\t{1462678069761419850, 3},\n\t{1463249364939387841, 3},\n\t{1464285581219537996, 0},\n\t{1469435370711233007, 1},\n\t{1471237502676188461, 2},\n\t{1472107924069382547, 3},\n\t{1474215929724959893, 1},\n\t{1481518018950793244, 3},\n\t{1482972720172878059, 1},\n\t{1485242396008881197, 3},\n\t{1485271721606994716, 3},\n\t{1486276198525983554, 0},\n\t{1488661914267733065, 2},\n\t{1490063910931054670, 3},\n\t{1490076144037419359, 3},\n\t{1491693315398379687, 0},\n\t{1493930193617735057, 2},\n\t{1495024674561591180, 3},\n\t{1497244913564476265, 1},\n\t{1503076669528790858, 3},\n\t{1514318061541755182, 0},\n\t{1514825661871054352, 1},\n\t{1515254787027393594, 1},\n\t{1519313438571860645, 1},\n\t{1527623267955373855, 0},\n\t{1529800249090541766, 2},\n\t{1531466454557379986, 0},\n\t{1538024964073812442, 2},\n\t{1538305633461972611, 2},\n\t{1538420655160639269, 2},\n\t{1540776694169428903, 0},\n\t{1541257968642167797, 0},\n\t{1541423753282510129, 1},\n\t{1542256467787600283, 1},\n\t{1542859235863580185, 2},\n\t{1544038191617474684, 3},\n\t{1544869788517694714, 0},\n\t{1546856695214145176, 1},\n\t{1548798594257444732, 3},\n\t{1549309662595324668, 0},\n\t{1556792364019312007, 2},\n\t{1557581171296230318, 3},\n\t{1559474092243938650, 1},\n\t{1561179576101225726, 2},\n\t{1562267537644065439, 3},\n\t{1562964254403371708, 0},\n\t{1563485741465461080, 0},\n\t{1567146212701192752, 3},\n\t{1568228349793808253, 0},\n\t{1569039468256256562, 1},\n\t{1569907446894739842, 2},\n\t{1582523014668799630, 1},\n\t{1582934409324620386, 1},\n\t{1589234639648872732, 3},\n\t{1592150242269670743, 2},\n\t{1594340452056863036, 0},\n\t{1596045719363032197, 1},\n\t{1597289653962341551, 2},\n\t{1598695035110472052, 3},\n\t{1599576409118296865, 0},\n\t{1599852532912659023, 0},\n\t{1601509798243168151, 2},\n\t{1605422202025241430, 1},\n\t{1609763743127140922, 1},\n\t{1611532661557778301, 3},\n\t{1612074294355160128, 3},\n\t{1612364221800206638, 0},\n\t{1616918073161810848, 0},\n\t{1618313144051551912, 1},\n\t{1621300337206651673, 0},\n\t{1621584661605830430, 0},\n\t{1624261576589018403, 2},\n\t{1625991241159469153, 0},\n\t{1629449308394550959, 3},\n\t{1631280909760658597, 0},\n\t{1631569970688956569, 1},\n\t{1631981332465356379, 1},\n\t{1635396583089667468, 0},\n\t{1636190769590009107, 1},\n\t{1636331153397267766, 1},\n\t{1637321391733825015, 2},\n\t{1642157344648127337, 2},\n\t{1645963756409166326, 1},\n\t{1647876473784447240, 3},\n\t{1649364507682110219, 0},\n\t{1654943980327399792, 1},\n\t{1655875831343947428, 2},\n\t{1657005815545785647, 3},\n\t{1663427164777559453, 1},\n\t{1664757933041586988, 2},\n\t{1665187587282623339, 2},\n\t{1665798635809173785, 3},\n\t{1676223687738663955, 0},\n\t{1676576784339641631, 1},\n\t{1677713773937543754, 2},\n\t{1679605669637525582, 3},\n\t{1680113678999614981, 0},\n\t{1683114076245164749, 2},\n\t{1684883469382854259, 0},\n\t{1685413085635042295, 0},\n\t{1685636095767489365, 1},\n\t{1687960475414833568, 3},\n\t{1688370358417382632, 3},\n\t{1688916584333141057, 0},\n\t{1689051313101462494, 0},\n\t{1690061097090002333, 1},\n\t{1690594162622540928, 1},\n\t{1690674022256346341, 1},\n\t{1691204926006975703, 2},\n\t{1693473161040945604, 0},\n\t{1696109183115885576, 2},\n\t{1697562721464187664, 3},\n\t{1698135496890692840, 0},\n\t{1699114282818758467, 1},\n\t{1700510067828427876, 2},\n\t{1700514329078996739, 2},\n\t{1701319489031149236, 3},\n\t{1704291712998945652, 1},\n\t{1704839074194920736, 2},\n\t{1705240537533440738, 2},\n\t{1706111423045900983, 3},\n\t{1706118807388848261, 3},\n\t{1710906493552684399, 3},\n\t{1712650752558735340, 1},\n\t{1713721224870738440, 2},\n\t{1715062791537722713, 3},\n\t{1715325788738796308, 3},\n\t{1717569349928912994, 1},\n\t{1722462405438661312, 1},\n\t{1724069996931357019, 3},\n\t{1726695619902932254, 1},\n\t{1732086766094171258, 2},\n\t{1743456421918443792, 0},\n\t{1743919402225504573, 0},\n\t{1744421355624303707, 1},\n\t{1745478346290347545, 2},\n\t{1749350372373441488, 1},\n\t{1751643584586604687, 3},\n\t{1752522540869410670, 0},\n\t{1753545218943935422, 1},\n\t{1753966982237063458, 1},\n\t{1754341998378326322, 2},\n\t{1754611850019271074, 2},\n\t{1754733336441909569, 2},\n\t{1755754271040716255, 3},\n\t{1756675024418822077, 0},\n\t{1757286671389717993, 0},\n\t{1758880903514880813, 2},\n\t{1759218920988292014, 2},\n\t{1762008893023215563, 0},\n\t{1762657471677942089, 1},\n\t{1763984786690737875, 2},\n\t{1765669432992014693, 0},\n\t{1767668741349669867, 2},\n\t{1768934859242607960, 3},\n\t{1772154580420775975, 1},\n\t{1773288756206266287, 2},\n\t{1774457240205898202, 0},\n\t{1777429312201247201, 2},\n\t{1778557028194634154, 3},\n\t{1780836547286869999, 1},\n\t{1783041429131962847, 3},\n\t{1783886775919968269, 0},\n\t{1786432829603837256, 2},\n\t{1789144601278502758, 1},\n\t{1791840132956910642, 3},\n\t{1792623996530670744, 0},\n\t{1793963495385336209, 1},\n\t{1794559612137751486, 1},\n\t{1795121974086810382, 2},\n\t{1797071271526879895, 0},\n\t{1798228779848436515, 1},\n\t{1800989224958926691, 3},\n\t{1808323284489291203, 2},\n\t{1809794557324136694, 3},\n\t{1812860392084017672, 2},\n\t{1813175122091933019, 2},\n\t{1815005475251028405, 0},\n\t{1815835533330475455, 0},\n\t{1817513196550999235, 2},\n\t{1822986072271453107, 3},\n\t{1823729927791719392, 3},\n\t{1824026578138183898, 0},\n\t{1834516432588114945, 1},\n\t{1836674077893607520, 3},\n\t{1838460038563224348, 0},\n\t{1839190532130847758, 1},\n\t{1840847394184144433, 3},\n\t{1845449733049728993, 3},\n\t{1845812409014775527, 3},\n\t{1850197817508690483, 3},\n\t{1850234532054411344, 3},\n\t{1850295190310069365, 3},\n\t{1850489936829771291, 3},\n\t{1854780136859916723, 3},\n\t{1855000518509739830, 3},\n\t{1855277795554843688, 3},\n\t{1857091294769821287, 1},\n\t{1860348451187850559, 0},\n\t{1868540031370038668, 3},\n\t{1873009988545680134, 3},\n\t{1873578126483341325, 0},\n\t{1874079854203843783, 0},\n\t{1877537482152735761, 3},\n\t{1877578913641367408, 3},\n\t{1884954267934626405, 2},\n\t{1885247068441945414, 2},\n\t{1886595055962931020, 3},\n\t{1888420607623793057, 1},\n\t{1889042245272934087, 1},\n\t{1890850676047892556, 3},\n\t{1895587662480657826, 3},\n\t{1895795960219106941, 3},\n\t{1896313334551158327, 0},\n\t{1897257524438295283, 1},\n\t{1902610113851977991, 1},\n\t{1903751297035882168, 2},\n\t{1905593435891678536, 0},\n\t{1906062485406968362, 0},\n\t{1908972624752386690, 3},\n\t{1909416093687517012, 3},\n\t{1910502098048984037, 0},\n\t{1913063781993275919, 3},\n\t{1913739619631529557, 3},\n\t{1914784270455993301, 0},\n\t{1916323113388973127, 2},\n\t{1916691605488781670, 2},\n\t{1917536737215622089, 3},\n\t{1920852431231078915, 2},\n\t{1921157041622943651, 2},\n\t{1921245036485593838, 2},\n\t{1922217526967870291, 3},\n\t{1923492356840387470, 0},\n\t{1924914014411934442, 1},\n\t{1926075864558563321, 2},\n\t{1927706707312966382, 0},\n\t{1927829596674533023, 0},\n\t{1929517350415868639, 1},\n\t{1929925893751789604, 2},\n\t{1930385169294511045, 2},\n\t{1931843700584769296, 3},\n\t{1932327528167692711, 0},\n\t{1932463622659522205, 0},\n\t{1933308397046355035, 1},\n\t{1934682859155462898, 2},\n\t{1937239815325678608, 0},\n\t{1940449000618018928, 3},\n\t{1942075580176447377, 0},\n\t{1944659937569877049, 3},\n\t{1946481587806225383, 0},\n\t{1946646054528896893, 0},\n\t{1946762991795262422, 1},\n\t{1947652641404803424, 1},\n\t{1948879964504746475, 2},\n\t{1951421442121855393, 1},\n\t{1956294880541934048, 1},\n\t{1959092776460949437, 0},\n\t{1959388868105189237, 0},\n\t{1959924580575848220, 0},\n\t{1961532401774648744, 2},\n\t{1962722263775069931, 3},\n\t{1965360840295464456, 1},\n\t{1966627651009049412, 2},\n\t{1967544277611116532, 3},\n\t{1967618869007279712, 3},\n\t{1969789791121359174, 1},\n\t{1973522326953101916, 0},\n\t{1974119876556401010, 1},\n\t{1977827082687434826, 0},\n\t{1979474482463219887, 2},\n\t{1982111314202016634, 0},\n\t{1982778271857132210, 1},\n\t{1988018297876870438, 1},\n\t{1990204698749745565, 3},\n\t{1991073766335982620, 0},\n\t{1994032527043715463, 3},\n\t{1994818308703674801, 3},\n\t{1995061051082325577, 3},\n\t{1997113348408442310, 1},\n\t{1999078881456502566, 3},\n\t{2000394933619766711, 0},\n\t{2001649524261917768, 1},\n\t{2002220664428654473, 2},\n\t{2010755535977040659, 1},\n\t{2011611258746293387, 2},\n\t{2017059831021637818, 3},\n\t{2017361971194385422, 3},\n\t{2018600927603271389, 0},\n\t{2019018434705797619, 1},\n\t{2019767793884827698, 1},\n\t{2023856286326487762, 1},\n\t{2025476593669189764, 2},\n\t{2026248167944608660, 3},\n\t{2027230776019468468, 0},\n\t{2028336964576645850, 1},\n\t{2029726497975634795, 2},\n\t{2030121649743892300, 3},\n\t{2032427348072681901, 1},\n\t{2033859291499173203, 2},\n\t{2037364473579107846, 1},\n\t{2037731274149687583, 1},\n\t{2038909866534233882, 2},\n\t{2042947124590187001, 2},\n\t{2045126817847517348, 0},\n\t{2045608862713241073, 0},\n\t{2047470413794678109, 2},\n\t{2047526262087379413, 2},\n\t{2048149343483780953, 3},\n\t{2050724059043279348, 1},\n\t{2051573162235110655, 2},\n\t{2052292098375195633, 2},\n\t{2055159237887132923, 1},\n\t{2055935761964482527, 2},\n\t{2057351520428769677, 3},\n\t{2057968463543241521, 3},\n\t{2060458934549147135, 2},\n\t{2061021775852935788, 2},\n\t{2062030053579463457, 3},\n\t{2062466841257187767, 3},\n\t{2062958173290906988, 0},\n\t{2063770370078969743, 0},\n\t{2064812986130755772, 1},\n\t{2065009300959985242, 2},\n\t{2068222904914639301, 0},\n\t{2070047604173407488, 2},\n\t{2071368820688747993, 3},\n\t{2072234148640698596, 0},\n\t{2072745192389883601, 0},\n\t{2073538256022502110, 1},\n\t{2074645637035817522, 2},\n\t{2076413005833620879, 0},\n\t{2078985226503809537, 2},\n\t{2079729471625994340, 3},\n\t{2080637927402485838, 3},\n\t{2081350534423472833, 0},\n\t{2085756405478394474, 0},\n\t{2086000883676150572, 0},\n\t{2089245836534955715, 3},\n\t{2089353920849553554, 3},\n\t{2089793043006101152, 0},\n\t{2094163138138120504, 3},\n\t{2095619800171161797, 1},\n\t{2097741731246290187, 3},\n\t{2099310436397692136, 0},\n\t{2099759775829597507, 0},\n\t{2100671181442074773, 1},\n\t{2102243580106409111, 3},\n\t{2102458379346049041, 3},\n\t{2108040728240666622, 0},\n\t{2108630806204455492, 0},\n\t{2111506113223537678, 3},\n\t{2113609219352315034, 1},\n\t{2114358141468174562, 1},\n\t{2114446445393174009, 2},\n\t{2115495849030367870, 2},\n\t{2115835362754613108, 3},\n\t{2117449501413282848, 0},\n\t{2118425314004844420, 1},\n\t{2122540397581077008, 1},\n\t{2124412794412385511, 2},\n\t{2124760302722833621, 3},\n\t{2125392675097197794, 3},\n\t{2125540223666299288, 3},\n\t{2132067412399680819, 1},\n\t{2133790780277651741, 3},\n\t{2133824145787987614, 3},\n\t{2137711397017115015, 2},\n\t{2137785611590843728, 2},\n\t{2141088885788980201, 1},\n\t{2142691165443140155, 3},\n\t{2144758273000772011, 0},\n\t{2145747129304148247, 1},\n\t{2146987675126980373, 2},\n\t{2152877175356655509, 0},\n\t{2154770295320264124, 1},\n\t{2155969914125641507, 2},\n\t{2156571083762545331, 3},\n\t{2160904111632154517, 3},\n\t{2162854547064751339, 1},\n\t{2163756529306353658, 1},\n\t{2164289638652118406, 2},\n\t{2165498862684946919, 3},\n\t{2167255763453934226, 0},\n\t{2169277897073662877, 2},\n\t{2171016813670421951, 0},\n\t{2171395783435526075, 0},\n\t{2173277617212055909, 2},\n\t{2177911771237386394, 2},\n\t{2178724703213895395, 3},\n\t{2181335670246505309, 1},\n\t{2185005309073901372, 0},\n\t{2185067350786174253, 0},\n\t{2190703127780560028, 1},\n\t{2194465897849084042, 1},\n\t{2196759984502755922, 3},\n\t{2198012497691454757, 0},\n\t{2198738782269933396, 0},\n\t{2199547456013532204, 1},\n\t{2201207357179402530, 3},\n\t{2203106756112700978, 0},\n\t{2203353888936245906, 0},\n\t{2207063012462218110, 0},\n\t{2207467518381119303, 0},\n\t{2208121136162133502, 1},\n\t{2210655146895577907, 3},\n\t{2213680692102252663, 2},\n\t{2214710054544757991, 3},\n\t{2215568872963026502, 3},\n\t{2216271935634098934, 0},\n\t{2217996514155256405, 1},\n\t{2220946260301041719, 0},\n\t{2221527757268392661, 1},\n\t{2227522142602323759, 2},\n\t{2228398927083210419, 3},\n\t{2228747564084586798, 3},\n\t{2233044195768004797, 3},\n\t{2234559833801756429, 0},\n\t{2234726675445484237, 0},\n\t{2234928115260701999, 1},\n\t{2238752974981530712, 0},\n\t{2238801118171206190, 0},\n\t{2241120924748579541, 2},\n\t{2243142255386054977, 0},\n\t{2244071294080495237, 1},\n\t{2245377241549771619, 2},\n\t{2248114341304047594, 0},\n\t{2250380280226333243, 2},\n\t{2251708078441275933, 3},\n\t{2253454098611817689, 1},\n\t{2253991235881223902, 1},\n\t{2255741720806302139, 3},\n\t{2257557576713061641, 1},\n\t{2259501396667277667, 2},\n\t{2264241181899746198, 3},\n\t{2264749090329145115, 3},\n\t{2266012157266024767, 0},\n\t{2268462385797138312, 2},\n\t{2273029876326768443, 2},\n\t{2274777688076642652, 0},\n\t{2276824438056155480, 2},\n\t{2278519180919047240, 3},\n\t{2279023830500096268, 0},\n\t{2282213406662803980, 3},\n\t{2282273797294037842, 3},\n\t{2282976378897651913, 3},\n\t{2283504484428765791, 0},\n\t{2291546503161845062, 3},\n\t{2291572111288603505, 3},\n\t{2292409904601552080, 0},\n\t{2293113280352873494, 0},\n\t{2294122475708981176, 1},\n\t{2294234201942509742, 1},\n\t{2296279427432905932, 3},\n\t{2298861869255406419, 1},\n\t{2299216679127802466, 2},\n\t{2299757314951607684, 2},\n\t{2303303803946912624, 1},\n\t{2303905763870364788, 2},\n\t{2305786485934368835, 3},\n\t{2307091314388146200, 1},\n\t{2307165296271925968, 1},\n\t{2314769323078349734, 3},\n\t{2323324255863457448, 3},\n\t{2325797337688793315, 1},\n\t{2328086925039006711, 3},\n\t{2328689956156821661, 0},\n\t{2329107869150758172, 0},\n\t{2329624156079839349, 1},\n\t{2329668520797451478, 1},\n\t{2330630090949070351, 2},\n\t{2331290873189060905, 2},\n\t{2331712960409419724, 2},\n\t{2336835159949449510, 3},\n\t{2339438951562803527, 1},\n\t{2341552435338439783, 3},\n\t{2342666698370816160, 0},\n\t{2346375421856884525, 0},\n\t{2348335974982666089, 1},\n\t{2350079285614523595, 3},\n\t{2351673035477543941, 0},\n\t{2353236410327271024, 2},\n\t{2353334692556161997, 2},\n\t{2354847158095309636, 3},\n\t{2357503292681763718, 1},\n\t{2358962825949288247, 3},\n\t{2362012222781705014, 1},\n\t{2362192470525375733, 2},\n\t{2363548503534177471, 3},\n\t{2364964208926739349, 0},\n\t{2364984617763080065, 0},\n\t{2366292172941057674, 1},\n\t{2369233438093683600, 0},\n\t{2369661455122381900, 0},\n\t{2373288232406530506, 3},\n\t{2374846948125402513, 1},\n\t{2376964498278064864, 3},\n\t{2381247964487272646, 2},\n\t{2382338625359378117, 3},\n\t{2386580687654686883, 3},\n\t{2387304192572085767, 0},\n\t{2391914954532445471, 0},\n\t{2394058413024949413, 2},\n\t{2395585502358415412, 3},\n\t{2397902786314999500, 1},\n\t{2401452743152199348, 0},\n\t{2403815083795836298, 3},\n\t{2404491811373910779, 3},\n\t{2405011667061971612, 0},\n\t{2406196883562974963, 1},\n\t{2406471351344320384, 1},\n\t{2406871345258971158, 1},\n\t{2408429439933862720, 3},\n\t{2414862267573462573, 0},\n\t{2420068100786391340, 1},\n\t{2421568807384469382, 2},\n\t{2422547369165411062, 3},\n\t{2424066531948784486, 1},\n\t{2426577126045088353, 3},\n\t{2428836792551506375, 1},\n\t{2429276828787153458, 1},\n\t{2430529664464207989, 2},\n\t{2430896623150313760, 3},\n\t{2431367072608388491, 3},\n\t{2431683881917828749, 3},\n\t{2431879362670788688, 3},\n\t{2432283942664407013, 0},\n\t{2432326168490032839, 0},\n\t{2433954011467600146, 1},\n\t{2434199041146110021, 2},\n\t{2434466184696633782, 2},\n\t{2435773018803773701, 3},\n\t{2436923351665174906, 0},\n\t{2437808689521092135, 1},\n\t{2438823054316013357, 2},\n\t{2438946896501257890, 2},\n\t{2440906430462170926, 3},\n\t{2444598071916309839, 3},\n\t{2445093168862303470, 3},\n\t{2451147558538712382, 1},\n\t{2455583005749532232, 0},\n\t{2455662914092736100, 1},\n\t{2456020378434434293, 1},\n\t{2462724369151433378, 3},\n\t{2462834811602523909, 3},\n\t{2463424786223129095, 3},\n\t{2465307774524670296, 1},\n\t{2465760423296170162, 2},\n\t{2466122210378917951, 2},\n\t{2467044285986958421, 3},\n\t{2470904541582627672, 2},\n\t{2471999810922273791, 3},\n\t{2477639286958802538, 0},\n\t{2478525812782775062, 1},\n\t{2483718328863080542, 1},\n\t{2496360792245722041, 1},\n\t{2497143467543750984, 1},\n\t{2499884280123589992, 0},\n\t{2500028924844180328, 0},\n\t{2500283785880651154, 0},\n\t{2501454724139367664, 1},\n\t{2504448627158541538, 0},\n\t{2504847052864715234, 0},\n\t{2505551581785959673, 1},\n\t{2508933052051901263, 0},\n\t{2509655595311508378, 1},\n\t{2510664853925284771, 1},\n\t{2514838403717412536, 1},\n\t{2515940280332096506, 2},\n\t{2517591514635144927, 0},\n\t{2520497157330179474, 2},\n\t{2527061984955192280, 0},\n\t{2529680322869109038, 2},\n\t{2529790129620983307, 2},\n\t{2530203331469325359, 3},\n\t{2530355359206443934, 3},\n\t{2534200853592486176, 2},\n\t{2534303282622218582, 2},\n\t{2535164582684345706, 3},\n\t{2535366566823422233, 3},\n\t{2535705539768053478, 0},\n\t{2535775646972511588, 0},\n\t{2536153696112825851, 0},\n\t{2538618081453212263, 2},\n\t{2542816730339936264, 2},\n\t{2545627011064020299, 0},\n\t{2546462063511371390, 1},\n\t{2547037579654373658, 2},\n\t{2549750940817655706, 0},\n\t{2549778925621307832, 0},\n\t{2553498590878748960, 3},\n\t{2556144963661568652, 2},\n\t{2558463272079048142, 0},\n\t{2558723170797460449, 0},\n\t{2559228007059623930, 1},\n\t{2562486897432441957, 3},\n\t{2562770906079014119, 0},\n\t{2564817434086127843, 2},\n\t{2564951538593907755, 2},\n\t{2566222442735478578, 3},\n\t{2567517694216761028, 0},\n\t{2568015560315586994, 0},\n\t{2569320940991805551, 2},\n\t{2569420726395644236, 2},\n\t{2570441294823249065, 3},\n\t{2574434797060318240, 2},\n\t{2578241662485981646, 1},\n\t{2579545784127663565, 3},\n\t{2580461106497514381, 3},\n\t{2581695239002458057, 1},\n\t{2581909222804939588, 1},\n\t{2583007097462047814, 2},\n\t{2587112489049674511, 1},\n\t{2590170021285918711, 0},\n\t{2593640489718234785, 3},\n\t{2595624544247016648, 1},\n\t{2596351564679594002, 2},\n\t{2599759378792195557, 1},\n\t{2601165307394814877, 2},\n\t{2601815154755466835, 2},\n\t{2605598839509508371, 2},\n\t{2606692450807190811, 3},\n\t{2610951885650626733, 2},\n\t{2614692013380081203, 2},\n\t{2616826221426928716, 0},\n\t{2617690121682005379, 0},\n\t{2622490204131674973, 1},\n\t{2625575217743043684, 3},\n\t{2634461472579444998, 3},\n\t{2635815124457594140, 1},\n\t{2636194650119474953, 1},\n\t{2638253349147643366, 3},\n\t{2647035632560182683, 3},\n\t{2648058577250918701, 3},\n\t{2648950295752540988, 0},\n\t{2650074027117480961, 1},\n\t{2652049559061213827, 3},\n\t{2653202925648673114, 0},\n\t{2655671937238131365, 2},\n\t{2657520682757578172, 0},\n\t{2666212218483767809, 0},\n\t{2667137763686944838, 0},\n\t{2669102904289985338, 2},\n\t{2674475996987390602, 3},\n\t{2675585244415597173, 0},\n\t{2677199513735423099, 1},\n\t{2679566211267948232, 3},\n\t{2680416926292157197, 0},\n\t{2680528703268640208, 0},\n\t{2680811156203768409, 1},\n\t{2680915574305481678, 1},\n\t{2683456476753756613, 3},\n\t{2685623252105967824, 1},\n\t{2686359949176722197, 1},\n\t{2690961240338209707, 2},\n\t{2691181709153216317, 2},\n\t{2691711657162475044, 2},\n\t{2693526104072385190, 0},\n\t{2694010528783099020, 0},\n\t{2695083755138402305, 1},\n\t{2697674984229467416, 0},\n\t{2700734775711949772, 2},\n\t{2705807250171775702, 3},\n\t{2706057281606222296, 3},\n\t{2712266804197956772, 0},\n\t{2713623080116796607, 2},\n\t{2719198033500690999, 3},\n\t{2723450134372576974, 2},\n\t{2723909033824677888, 3},\n\t{2724287676937014168, 3},\n\t{2724387710015603466, 3},\n\t{2724759159169259940, 0},\n\t{2737261301435517004, 3},\n\t{2738823627901346124, 0},\n\t{2741398628174060450, 2},\n\t{2743994062301557593, 1},\n\t{2749376327638446782, 1},\n\t{2750862636124883597, 3},\n\t{2751251495790745295, 3},\n\t{2753197909957120256, 1},\n\t{2754185900235614523, 2},\n\t{2756310561073741291, 0},\n\t{2759431311846195548, 2},\n\t{2760133346949424209, 3},\n\t{2761617674770708877, 0},\n\t{2765733986876946854, 0},\n\t{2766679642997634862, 1},\n\t{2767197662113150553, 1},\n\t{2767771878270971276, 2},\n\t{2769522714752031731, 3},\n\t{2771175863079776853, 1},\n\t{2776622740325377108, 2},\n\t{2780015829871747136, 1},\n\t{2780054321163820754, 1},\n\t{2780430617716026729, 1},\n\t{2780633109268615078, 1},\n\t{2782880515675979817, 3},\n\t{2784433032889329891, 1},\n\t{2785103624996640646, 1},\n\t{2786997073576183085, 3},\n\t{2789885466669654417, 1},\n\t{2790457233879669706, 2},\n\t{2791090177013426788, 2},\n\t{2791209212034089992, 3},\n\t{2794486033072035993, 2},\n\t{2794974019087075836, 2},\n\t{2799025748014484496, 2},\n\t{2804071962427612307, 2},\n\t{2805510684508922545, 3},\n\t{2805529119175269965, 3},\n\t{2806001319051376402, 0},\n\t{2807444582417683367, 1},\n\t{2807991225952968108, 1},\n\t{2808402241710171507, 2},\n\t{2808512605043590064, 2},\n\t{2808867127988664760, 2},\n\t{2808907409659557936, 2},\n\t{2809766678327956313, 3},\n\t{2810116705398371018, 3},\n\t{2810632065720403207, 0},\n\t{2812722291926807750, 2},\n\t{2813517957255188132, 2},\n\t{2814838091645615265, 0},\n\t{2814864010004493567, 0},\n\t{2815094356895474885, 0},\n\t{2817919826003254115, 2},\n\t{2818515408154101807, 3},\n\t{2819245983651230926, 3},\n\t{2819285527875791334, 0},\n\t{2821516635140975869, 2},\n\t{2825282157329577481, 1},\n\t{2827040147131093658, 2},\n\t{2829356330824726587, 0},\n\t{2830433674949438859, 1},\n\t{2833367299815299138, 0},\n\t{2833879988422799546, 0},\n\t{2833986686334959987, 1},\n\t{2836343550366458287, 3},\n\t{2836393609560694794, 3},\n\t{2836910113093083321, 3},\n\t{2838919278929826733, 1},\n\t{2839035817307118471, 1},\n\t{2840406940647032152, 2},\n\t{2844615017042294501, 2},\n\t{2845888405618126475, 3},\n\t{2847788564242379871, 1},\n\t{2849686826586096145, 3},\n\t{2851615259847550363, 0},\n\t{2852163895967989861, 1},\n\t{2853285246650312372, 2},\n\t{2856555073458733324, 1},\n\t{2861278927883096247, 1},\n\t{2865144123732194841, 0},\n\t{2868019371222510553, 3},\n\t{2868326802248882075, 3},\n\t{2870357086968320399, 1},\n\t{2874302010494084279, 0},\n\t{2875038447036413171, 1},\n\t{2876873153177572860, 3},\n\t{2879928191030473629, 1},\n\t{2880549659795797383, 2},\n\t{2882142330292643005, 3},\n\t{2883235815820045521, 0},\n\t{2884016388364485602, 1},\n\t{2888979797852181944, 1},\n\t{2890472934831498655, 3},\n\t{2890803238165579622, 3},\n\t{2894403265184929473, 2},\n\t{2898718228184616335, 2},\n\t{2900447284570352596, 0},\n\t{2900718917138543594, 0},\n\t{2901012764493815344, 0},\n\t{2901286729252361434, 0},\n\t{2906222920763114369, 1},\n\t{2906291151471163111, 1},\n\t{2907468202673010362, 2},\n\t{2908752424625161566, 3},\n\t{2910603749229313474, 1},\n\t{2910715460713191325, 1},\n\t{2911182164286716385, 1},\n\t{2914531349475574883, 0},\n\t{2914897181900369267, 0},\n\t{2915779556717987728, 1},\n\t{2916878765340049397, 2},\n\t{2921726276898584519, 3},\n\t{2922223716818812445, 3},\n\t{2923187916591191368, 0},\n\t{2930898086084142673, 3},\n\t{2935453779507915759, 3},\n\t{2940394065321057825, 3},\n\t{2942249056508750688, 1},\n\t{2944988364721981116, 3},\n\t{2946215746790627520, 0},\n\t{2946234501266189024, 0},\n\t{2947762591807146427, 2},\n\t{2950441601736726626, 0},\n\t{2953590053689388406, 3},\n\t{2955083219635037676, 0},\n\t{2956369695319909764, 1},\n\t{2957187483705351084, 2},\n\t{2958383540293237445, 3},\n\t{2959248584834778099, 0},\n\t{2959983536238765557, 0},\n\t{2960089759082884888, 1},\n\t{2960687625428737204, 1},\n\t{2962170707839957521, 2},\n\t{2962497156445981797, 3},\n\t{2965958019637853471, 2},\n\t{2967337895372970972, 3},\n\t{2967813052208817537, 3},\n\t{2968644679462056262, 0},\n\t{2969122328473140919, 1},\n\t{2969887650706214656, 1},\n\t{2970262718495597768, 2},\n\t{2975454360642530618, 2},\n\t{2978567385517976844, 1},\n\t{2979472944889523117, 2},\n\t{2980443149539340869, 3},\n\t{2981032829354224147, 3},\n\t{2981053333496081480, 3},\n\t{2982941799858270243, 1},\n\t{2983276221309770349, 1},\n\t{2984039284535689107, 2},\n\t{2984359113848658706, 2},\n\t{2985519757225120843, 3},\n\t{2985774419674241746, 3},\n\t{2992670909726052831, 2},\n\t{2994380778306300579, 3},\n\t{2994472560058517684, 3},\n\t{2994929426316354364, 0},\n\t{2995351020165597450, 0},\n\t{2996400569037062551, 1},\n\t{2996460494598025727, 1},\n\t{2996946015951864503, 1},\n\t{2997121096690421793, 1},\n\t{2998675450639619662, 3},\n\t{2999777588514915565, 0},\n\t{3001013256844921801, 1},\n\t{3001470156336614070, 1},\n\t{3005280564450022975, 1},\n\t{3005412098938467178, 1},\n\t{3007773003675651623, 3},\n\t{3010330133131778156, 1},\n\t{3012142671577145491, 3},\n\t{3014803894911548373, 1},\n\t{3017075361970622737, 3},\n\t{3018861256983852596, 1},\n\t{3022615273023457947, 0},\n\t{3023045781943514175, 1},\n\t{3023208805483103069, 1},\n\t{3023843951636426506, 1},\n\t{3025073437638076106, 2},\n\t{3025271286408490614, 2},\n\t{3025793919253347393, 3},\n\t{3026667474164639726, 0},\n\t{3026817131257607710, 0},\n\t{3026903947826973844, 0},\n\t{3033255605659601991, 2},\n\t{3033782327290942742, 2},\n\t{3035085710878911110, 3},\n\t{3036996057423969516, 1},\n\t{3039219789250287978, 3},\n\t{3040265781862434326, 0},\n\t{3041613303364073358, 1},\n\t{3046274336834390459, 1},\n\t{3049325758373576761, 0},\n\t{3049675863200393531, 0},\n\t{3050953930570256518, 1},\n\t{3051150236398028196, 1},\n\t{3053184759058143180, 3},\n\t{3057326349519192987, 3},\n\t{3061310152151845604, 2},\n\t{3062147342587982776, 3},\n\t{3062285889017609928, 3},\n\t{3064506194388069949, 1},\n\t{3064764910448561709, 2},\n\t{3064931524548739208, 2},\n\t{3066475996325074886, 3},\n\t{3066687788824831459, 3},\n\t{3067162026500984106, 0},\n\t{3070166643663101695, 2},\n\t{3071448976285722904, 3},\n\t{3072978486010636476, 1},\n\t{3073165006734413870, 1},\n\t{3073335772734840531, 1},\n\t{3074524141632442126, 2},\n\t{3075576965655105265, 3},\n\t{3079680861869243315, 3},\n\t{3079824004392180327, 3},\n\t{3079929730930687472, 3},\n\t{3080090586309517552, 3},\n\t{3084063011640714994, 3},\n\t{3086895493337929669, 1},\n\t{3088326304239481997, 2},\n\t{3091964165303019487, 2},\n\t{3092515539583624002, 2},\n\t{3092640703412310837, 2},\n\t{3092720873175528484, 2},\n\t{3092779397784027407, 2},\n\t{3097122257072133677, 2},\n\t{3099999133873938068, 1},\n\t{3107145493620563786, 3},\n\t{3108966224162852822, 1},\n\t{3116350475142345464, 3},\n\t{3117050603386086052, 0},\n\t{3118262049827060432, 1},\n\t{3118784934153119073, 2},\n\t{3120117449135127598, 3},\n\t{3121383291838196664, 0},\n\t{3123576538713838778, 2},\n\t{3127449853255374688, 1},\n\t{3127679911412836645, 1},\n\t{3129172992179304375, 3},\n\t{3130338849051603144, 0},\n\t{3131305273839156054, 1},\n\t{3134253197458850223, 3},\n\t{3136914495729410564, 2},\n\t{3137515800845216379, 2},\n\t{3137786968001789387, 2},\n\t{3140985220404782640, 1},\n\t{3141689570859898046, 2},\n\t{3143468150308699157, 3},\n\t{3148802513167201267, 0},\n\t{3149509305275409516, 1},\n\t{3150327677611071457, 2},\n\t{3150341634535621630, 2},\n\t{3151374456746403597, 2},\n\t{3154665029035864193, 1},\n\t{3154973020790924649, 2},\n\t{3155002465120584160, 2},\n\t{3155788632933354250, 2},\n\t{3156731606556372716, 3},\n\t{3163758970508620307, 1},\n\t{3164468701080362455, 2},\n\t{3170784045582909952, 0},\n\t{3175263316462616166, 0},\n\t{3176841324862592322, 1},\n\t{3176851520232777914, 1},\n\t{3178880178851110560, 3},\n\t{3178890916332075338, 3},\n\t{3183536397839160912, 3},\n\t{3183569897407931926, 3},\n\t{3184184069617293743, 0},\n\t{3186950263224909931, 2},\n\t{3188323067916717418, 3},\n\t{3190893012851929825, 2},\n\t{3193648970489939740, 0},\n\t{3195941797030440996, 2},\n\t{3199120263433932749, 1},\n\t{3201877037286286734, 3},\n\t{3202043479278758297, 3},\n\t{3202208009093727212, 0},\n\t{3204922450327338704, 2},\n\t{3207386309853249556, 0},\n\t{3208589314514124571, 1},\n\t{3211999944184082143, 0},\n\t{3217733251093768338, 1},\n\t{3217863986972508187, 2},\n\t{3218548089531638887, 2},\n\t{3220804580361818619, 0},\n\t{3221802948653988300, 1},\n\t{3222063330534997880, 1},\n\t{3222175500858351947, 1},\n\t{3224465617760416517, 3},\n\t{3224741691711482653, 0},\n\t{3226563848048972505, 1},\n\t{3228029113764896033, 3},\n\t{3231465918077371556, 2},\n\t{3231571389956958076, 2},\n\t{3234974950106046903, 1},\n\t{3235267709230474881, 1},\n\t{3242634779375207474, 0},\n\t{3244287871640915516, 1},\n\t{3244964180557920894, 2},\n\t{3246011120346063165, 3},\n\t{3249690133482337629, 2},\n\t{3253049886197329853, 1},\n\t{3257072904735968364, 0},\n\t{3257317678812554333, 1},\n\t{3257768525261421241, 1},\n\t{3258376282833299565, 2},\n\t{3259233422798502340, 2},\n\t{3260429946026406252, 3},\n\t{3263546291230689838, 2},\n\t{3264284458288962534, 3},\n\t{3264424389909851169, 3},\n\t{3264578604791845705, 3},\n\t{3265895083621174065, 0},\n\t{3266946485108558087, 1},\n\t{3269417575203973678, 3},\n\t{3273830015016831383, 3},\n\t{3275732804591488302, 1},\n\t{3277082567430551111, 2},\n\t{3279515506911227209, 0},\n\t{3284828307592935660, 1},\n\t{3287870391570586907, 0},\n\t{3292344906188978752, 0},\n\t{3293947362427444949, 1},\n\t{3294454008376456139, 2},\n\t{3297882651585997978, 1},\n\t{3298238903573193961, 1},\n\t{3301289118056825704, 0},\n\t{3301782510674936808, 0},\n\t{3306536363753618949, 0},\n\t{3307962975880785118, 2},\n\t{3309285424640852885, 3},\n\t{3309416672368239775, 3},\n\t{3310437209857045449, 0},\n\t{3313776886417598794, 3},\n\t{3314281500545949801, 3},\n\t{3317021177907224951, 2},\n\t{3319622102877662317, 0},\n\t{3319676467645533751, 0},\n\t{3319970391256777243, 0},\n\t{3320530057836912424, 1},\n\t{3320960569131767991, 1},\n\t{3322142703911304233, 2},\n\t{3323008055807222291, 3},\n\t{3324083974684121063, 0},\n\t{3326427649153763488, 2},\n\t{3328030213895667979, 3},\n\t{3328140311310404631, 3},\n\t{3329125301053562422, 0},\n\t{3330847793778759595, 2},\n\t{3331517044972778552, 2},\n\t{3332452810496252574, 3},\n\t{3334527675420580836, 1},\n\t{3336096106867739679, 3},\n\t{3336599700766073286, 3},\n\t{3337249569639359343, 0},\n\t{3338368369527709137, 1},\n\t{3340053641981531746, 2},\n\t{3341357743610388613, 3},\n\t{3342148444878768725, 0},\n\t{3343490232046443767, 1},\n\t{3348871922887773525, 2},\n\t{3350795023322554551, 0},\n\t{3350890578639598952, 0},\n\t{3358825200737704089, 3},\n\t{3359625966394357101, 3},\n\t{3365163691435824575, 0},\n\t{3365623598786557873, 1},\n\t{3369291342486901160, 0},\n\t{3371015855601698125, 2},\n\t{3372799154120294706, 3},\n\t{3375172859758531368, 1},\n\t{3377570736592840034, 3},\n\t{3378900635555028143, 1},\n\t{3382771364660397555, 0},\n\t{3384278816025362963, 1},\n\t{3384641555434123017, 2},\n\t{3385927231462661363, 3},\n\t{3385949748923062607, 3},\n\t{3393511459427227979, 2},\n\t{3394040364420833623, 2},\n\t{3395176423875915810, 3},\n\t{3400231132561870717, 0},\n\t{3400654021045008797, 0},\n\t{3402341723441174246, 1},\n\t{3404167802524527383, 3},\n\t{3405336529876290803, 0},\n\t{3405549463917682984, 0},\n\t{3406912909405219078, 1},\n\t{3408834567672756721, 3},\n\t{3410789605807983473, 1},\n\t{3412253279524323644, 2},\n\t{3414696525182993885, 0},\n\t{3415060565186217713, 1},\n\t{3418288057764216389, 0},\n\t{3419316785903181906, 0},\n\t{3419575153848582609, 1},\n\t{3420991500973281217, 2},\n\t{3421687766899199032, 3},\n\t{3424568666608217341, 1},\n\t{3425085947139233646, 2},\n\t{3425106180157587034, 2},\n\t{3426456360631049002, 3},\n\t{3427401221269977567, 0},\n\t{3427768995971014979, 0},\n\t{3434914334740252543, 2},\n\t{3442580573368532953, 1},\n\t{3443415068849031212, 2},\n\t{3444094278596479740, 2},\n\t{3444268318713026918, 3},\n\t{3445625117847349025, 0},\n\t{3446088173854709479, 0},\n\t{3448719174844355383, 3},\n\t{3450621967679654082, 0},\n\t{3455797360821141357, 1},\n\t{3457091088867165690, 2},\n\t{3457586404617178462, 2},\n\t{3458315490264497140, 3},\n\t{3458458015549211121, 3},\n\t{3459191261743684297, 0},\n\t{3459812483789493723, 0},\n\t{3460061258007799893, 1},\n\t{3460265397357412575, 1},\n\t{3463767737635578020, 0},\n\t{3467805865330742488, 0},\n\t{3468681945290060782, 0},\n\t{3468737288955317317, 0},\n\t{3469795510945911509, 1},\n\t{3471423705702659702, 3},\n\t{3476312622997853824, 3},\n\t{3477410019648264994, 0},\n\t{3477442615928188451, 0},\n\t{3478241422217936377, 1},\n\t{3479735472453172523, 2},\n\t{3482746684043594291, 1},\n\t{3487995151857953274, 1},\n\t{3488086052715463409, 2},\n\t{3492740592602224243, 2},\n\t{3494214930778272897, 3},\n\t{3494428041786844621, 3},\n\t{3494880947890395274, 0},\n\t{3497604170893077495, 2},\n\t{3497612405961978004, 2},\n\t{3498569149403497938, 3},\n\t{3501177282332406663, 1},\n\t{3502323868188590458, 2},\n\t{3504541985946816063, 0},\n\t{3506358164898880496, 2},\n\t{3507120463825948685, 2},\n\t{3507732774077893748, 3},\n\t{3510114892549258397, 1},\n\t{3511106316788034793, 2},\n\t{3514080040669417175, 1},\n\t{3516646378455763007, 3},\n\t{3516761896967631485, 3},\n\t{3520854102340628868, 3},\n\t{3520872192915917837, 3},\n\t{3521257899633950205, 3},\n\t{3521799362172050991, 3},\n\t{3521852862802554421, 0},\n\t{3522319553184296812, 0},\n\t{3522696854222685961, 0},\n\t{3524061387476484150, 1},\n\t{3525260012624043263, 3},\n\t{3525563889804129061, 3},\n\t{3526417797738331074, 0},\n\t{3530369890416864029, 3},\n\t{3531255192550073268, 0},\n\t{3531323920441231508, 0},\n\t{3532111012207150269, 1},\n\t{3532469600858993458, 1},\n\t{3533335534300421405, 2},\n\t{3534488379889405903, 3},\n\t{3534504829654522014, 3},\n\t{3535492279485348316, 0},\n\t{3536400960578622726, 0},\n\t{3538541575764076013, 2},\n\t{3538939524801473512, 3},\n\t{3540177800567489843, 0},\n\t{3540291248898659226, 0},\n\t{3543192317036527904, 2},\n\t{3544040957624287579, 3},\n\t{3544554587749245559, 0},\n\t{3544944537238854167, 0},\n\t{3546557558454058285, 1},\n\t{3546998724591072949, 2},\n\t{3547523196152872568, 2},\n\t{3549861946200661824, 0},\n\t{3553208949221422214, 3},\n\t{3556054382896127550, 2},\n\t{3557522066982752521, 3},\n\t{3559263536239483440, 1},\n\t{3559541691150269773, 1},\n\t{3563137547515512016, 0},\n\t{3563311411162191670, 0},\n\t{3563457774152001005, 0},\n\t{3563947446204916589, 1},\n\t{3564735890730929393, 2},\n\t{3565822656754228038, 3},\n\t{3567166437103167732, 0},\n\t{3570762708270278732, 3},\n\t{3570988386760280138, 3},\n\t{3571774117613710771, 0},\n\t{3576140393212308006, 0},\n\t{3579151368734717565, 2},\n\t{3580275710537049939, 3},\n\t{3584478334247225873, 3},\n\t{3587470442257930096, 2},\n\t{3590523500237223815, 1},\n\t{3592539750235322553, 2},\n\t{3593230499398004079, 3},\n\t{3593822167955022995, 3},\n\t{3599281523282481104, 0},\n\t{3600518568259788129, 1},\n\t{3608881755988378458, 1},\n\t{3609227581859355055, 1},\n\t{3610894554733909060, 3},\n\t{3611679908339769462, 3},\n\t{3613260097952833779, 1},\n\t{3620098203098193944, 3},\n\t{3620596341949629820, 3},\n\t{3623291613527916602, 2},\n\t{3626929346831188746, 1},\n\t{3627481885228464690, 1},\n\t{3628040400799658137, 2},\n\t{3630328156596193780, 0},\n\t{3631914826509687130, 1},\n\t{3633063731971052520, 2},\n\t{3633897910461780026, 3},\n\t{3635811329352621842, 1},\n\t{3636390896730554893, 1},\n\t{3640766542788071497, 1},\n\t{3642602882574037270, 3},\n\t{3643492202426060648, 0},\n\t{3645794409631552373, 2},\n\t{3648330362918116727, 0},\n\t{3649116146636274587, 1},\n\t{3649785396446530538, 1},\n\t{3650442172715673365, 2},\n\t{3651371035634788826, 3},\n\t{3658519123974785665, 1},\n\t{3660095251041707355, 2},\n\t{3660796605905121511, 3},\n\t{3661865313874199816, 0},\n\t{3663458205346560006, 1},\n\t{3668206424626899983, 2},\n\t{3668452545570955132, 2},\n\t{3670167886812139369, 3},\n\t{3674431379564295101, 3},\n\t{3676736434189457996, 1},\n\t{3679463588532074771, 0},\n\t{3680430992460167359, 0},\n\t{3682985586739338276, 3},\n\t{3683026768694760047, 3},\n\t{3683183701083191683, 3},\n\t{3692426893503511525, 3},\n\t{3692956231688524469, 0},\n\t{3693923710225235172, 0},\n\t{3699095987413124564, 1},\n\t{3699108661131349678, 1},\n\t{3701289680420196718, 3},\n\t{3702383771433211743, 0},\n\t{3704197338834317936, 1},\n\t{3704325018567667916, 2},\n\t{3706287250482303233, 3},\n\t{3708224727088848632, 1},\n\t{3710094639260022653, 3},\n\t{3711228216373241105, 0},\n\t{3711787365308887563, 0},\n\t{3717079396006304506, 1},\n\t{3717346412895298497, 1},\n\t{3720408511379922161, 0},\n\t{3725677000565467539, 1},\n\t{3727307155600968656, 2},\n\t{3729045703710123409, 0},\n\t{3729406694865154095, 0},\n\t{3733670582342095796, 0},\n\t{3737282002778387339, 3},\n\t{3738806909016626924, 0},\n\t{3738888816087743813, 0},\n\t{3740111730958133259, 1},\n\t{3740485131202212521, 2},\n\t{3740953439630422273, 2},\n\t{3741668280832208194, 3},\n\t{3742049504614130172, 3},\n\t{3744422277423831786, 1},\n\t{3745470634921412838, 2},\n\t{3747403294562400056, 0},\n\t{3750042998345869908, 2},\n\t{3750823044385427937, 3},\n\t{3754014957565525773, 2},\n\t{3754339837234337643, 2},\n\t{3757846239892471449, 1},\n\t{3760789624501693217, 0},\n\t{3761875988909903864, 1},\n\t{3762603033372856812, 1},\n\t{3764587415682140748, 3},\n\t{3767982218425468500, 2},\n\t{3769684809007969085, 0},\n\t{3775166842895598343, 1},\n\t{3775371198888880116, 1},\n\t{3775400080492610631, 1},\n\t{3775624659603382373, 1},\n\t{3782164914503545999, 3},\n\t{3794594537772422114, 2},\n\t{3796099210206534219, 3},\n\t{3800356556378005398, 3},\n\t{3807897159754889665, 2},\n\t{3809204305155973345, 3},\n\t{3809863304156015307, 3},\n\t{3811873633409373068, 1},\n\t{3813276357869736000, 2},\n\t{3814012717524745174, 3},\n\t{3816076643826914771, 1},\n\t{3816321983009539824, 1},\n\t{3816765179413625270, 1},\n\t{3820483357394399656, 1},\n\t{3820695838052232588, 1},\n\t{3822842191150026769, 3},\n\t{3827250750161666405, 3},\n\t{3829563167393779045, 1},\n\t{3830773796201512734, 2},\n\t{3833787416136649737, 1},\n\t{3834459321133540598, 1},\n\t{3834491402049297702, 1},\n\t{3835646448997603415, 2},\n\t{3835736964506116520, 2},\n\t{3837450667218903460, 0},\n\t{3839005380043059441, 1},\n\t{3839405360082607448, 2},\n\t{3842991141959275361, 1},\n\t{3843708474933466400, 1},\n\t{3844175977720473233, 2},\n\t{3845511580298498800, 3},\n\t{3847319562720531361, 1},\n\t{3847370602713004898, 1},\n\t{3849041616232956592, 2},\n\t{3851821461774294693, 1},\n\t{3852902096373246345, 2},\n\t{3853399314309319060, 2},\n\t{3853963984663747560, 3},\n\t{3854414633075004229, 3},\n\t{3854961727627545794, 3},\n\t{3855391086496690429, 0},\n\t{3858417253466079056, 2},\n\t{3867703381053458367, 3},\n\t{3871141862912096701, 2},\n\t{3881667645729113184, 3},\n\t{3882715310440714530, 0},\n\t{3886348859361293381, 3},\n\t{3886462746493074033, 3},\n\t{3886983501134915989, 0},\n\t{3887530602515149862, 0},\n\t{3888645901922483869, 1},\n\t{3890192598422858353, 3},\n\t{3893158542707915774, 1},\n\t{3895052363097057093, 3},\n\t{3896279843382136853, 0},\n\t{3899178627199881866, 3},\n\t{3902857616982333808, 2},\n\t{3903348986409735079, 2},\n\t{3904616851077318955, 3},\n\t{3907411251170194821, 2},\n\t{3908067384187416216, 3},\n\t{3910172642246414953, 0},\n\t{3911924442046280011, 2},\n\t{3914629055443757067, 0},\n\t{3914687928472992962, 0},\n\t{3915912422497280120, 2},\n\t{3918504009612176256, 0},\n\t{3919713094923188547, 1},\n\t{3921867695903152677, 3},\n\t{3923036737577991382, 0},\n\t{3925389614098068805, 2},\n\t{3929983933420866047, 2},\n\t{3930663389065252136, 3},\n\t{3934664741955250253, 2},\n\t{3935379837280669818, 3},\n\t{3939738952946211468, 3},\n\t{3941266145607839873, 0},\n\t{3942653437261378352, 1},\n\t{3942708050817898237, 1},\n\t{3943331085977611435, 2},\n\t{3955235529576855139, 0},\n\t{3962171787912742985, 3},\n\t{3962267080775814446, 3},\n\t{3965775119979934618, 2},\n\t{3966694967605869539, 3},\n\t{3967292051942296453, 3},\n\t{3967871692469729144, 0},\n\t{3971298435888564339, 3},\n\t{3973581327224630979, 1},\n\t{3974649540307096128, 2},\n\t{3975172206678360143, 2},\n\t{3976946633654649803, 0},\n\t{3979489501819051454, 2},\n\t{3982940468398677194, 1},\n\t{3983472013755653788, 2},\n\t{3988711897561767956, 2},\n\t{3989574836468683891, 3},\n\t{3990491472899213363, 0},\n\t{3991687508973326983, 1},\n\t{3994882358972358038, 0},\n\t{3995282693026336612, 0},\n\t{3998112572292518263, 3},\n\t{3999174617671892594, 3},\n\t{4000335446025022173, 1},\n\t{4000677556064600331, 1},\n\t{4004863465709064415, 1},\n\t{4007356622358193867, 3},\n\t{4007620176263083653, 3},\n\t{4008049298037574045, 3},\n\t{4008135238438316185, 3},\n\t{4008596704999700118, 0},\n\t{4014570779590664441, 1},\n\t{4017207421232736338, 3},\n\t{4018794596916066769, 1},\n\t{4019786401907810189, 2},\n\t{4023399780843775178, 1},\n\t{4023579473392880122, 1},\n\t{4025916246056896634, 3},\n\t{4026611831864293788, 0},\n\t{4029463273814040416, 2},\n\t{4029769970549945375, 3},\n\t{4030047188732658749, 3},\n\t{4031461840136407195, 0},\n\t{4034108831927568507, 3},\n\t{4036140174675410377, 0},\n\t{4036155043838823339, 0},\n\t{4037488136120735940, 2},\n\t{4039748562278911826, 0},\n\t{4039839263981285036, 0},\n\t{4042809266750045821, 2},\n\t{4047002183599186241, 2},\n\t{4051144076385966950, 2},\n\t{4051435199394590559, 2},\n\t{4053542738079040904, 0},\n\t{4058641639347956663, 0},\n\t{4058710371274696507, 0},\n\t{4058953711288785976, 1},\n\t{4059753249276406084, 1},\n\t{4061732836629713871, 3},\n\t{4065316560054005162, 2},\n\t{4067049258094733719, 0},\n\t{4069687774164023258, 2},\n\t{4069726820510509076, 2},\n\t{4070421742064056072, 3},\n\t{4071026963584354399, 3},\n\t{4072992750323761485, 1},\n\t{4073398000013760870, 1},\n\t{4074013023834326537, 2},\n\t{4074358061401992581, 2},\n\t{4075444716783500002, 3},\n\t{4078067350932430715, 2},\n\t{4079407433748321684, 3},\n\t{4080362368825921658, 0},\n\t{4080605510419996680, 0},\n\t{4081577243356923571, 1},\n\t{4081874992284134063, 1},\n\t{4084159049673583340, 3},\n\t{4085808899332415373, 0},\n\t{4086070055667105127, 1},\n\t{4086293576162062623, 1},\n\t{4087383513506933787, 2},\n\t{4090509023216334693, 1},\n\t{4091331940996397706, 1},\n\t{4092103645012042064, 2},\n\t{4093524614129681152, 3},\n\t{4094524895025015503, 0},\n\t{4097200583223325082, 3},\n\t{4098028207903386869, 3},\n\t{4098535237370634151, 0},\n\t{4099718473540772968, 1},\n\t{4100052832006923140, 1},\n\t{4100403733343314546, 1},\n\t{4102246936373802832, 3},\n\t{4103950165083988717, 1},\n\t{4105446093666545097, 2},\n\t{4105781610309485995, 2},\n\t{4105912217244982945, 2},\n\t{4107340692516268735, 0},\n\t{4107404229534147611, 0},\n\t{4107779968603309180, 0},\n\t{4108494353370581022, 1},\n\t{4112560867431172737, 0},\n\t{4112969548700678556, 1},\n\t{4114828487522653933, 2},\n\t{4117413154263071090, 0},\n\t{4127147208675095402, 1},\n\t{4129594543959819157, 3},\n\t{4132654503788589657, 2},\n\t{4132847311328934341, 2},\n\t{4136976049689221447, 2},\n\t{4137093739393576753, 2},\n\t{4137802831106456963, 3},\n\t{4140384341325907953, 1},\n\t{4148711550192472662, 0},\n\t{4151842681389864831, 3},\n\t{4155098726177504188, 2},\n\t{4155162701325730854, 2},\n\t{4162488291467794839, 1},\n\t{4168433306041676279, 2},\n\t{4168524701517771289, 2},\n\t{4169637528288810948, 3},\n\t{4175420030711711663, 0},\n\t{4176113290521033807, 1},\n\t{4181557178749791542, 1},\n\t{4181638102234558992, 2},\n\t{4183031819903524480, 3},\n\t{4190822072781723144, 2},\n\t{4194226935635648454, 1},\n\t{4195843299843838210, 2},\n\t{4196152880012939252, 2},\n\t{4198610512390402121, 1},\n\t{4198652634018520191, 1},\n\t{4198806991931071556, 1},\n\t{4199356891124370137, 1},\n\t{4201440921743049408, 3},\n\t{4201737956141639634, 3},\n\t{4202204022206657675, 0},\n\t{4203563950705319014, 1},\n\t{4207864467824639439, 1},\n\t{4211973036218958877, 0},\n\t{4212538061537490787, 1},\n\t{4212570490280843406, 1},\n\t{4213066816651251479, 1},\n\t{4213114454162833008, 1},\n\t{4214280081409801576, 3},\n\t{4221678943584874984, 1},\n\t{4222863083323635825, 2},\n\t{4223434480951563634, 3},\n\t{4225151885415047278, 0},\n\t{4231675061215499455, 2},\n\t{4232075085936644864, 2},\n\t{4234685188140782266, 1},\n\t{4234977875860653851, 1},\n\t{4239498270479015899, 1},\n\t{4240717939192446187, 2},\n\t{4241272655392087042, 3},\n\t{4242745513879421346, 0},\n\t{4245606366034171577, 2},\n\t{4246360038993955761, 3},\n\t{4247485632252233198, 0},\n\t{4247821529223044905, 0},\n\t{4255093375872483992, 3},\n\t{4258333482713322347, 2},\n\t{4260100378124976925, 3},\n\t{4260235115626198195, 3},\n\t{4262880255963671183, 2},\n\t{4263873765372520803, 3},\n\t{4264202171752493827, 3},\n\t{4265915327220129986, 0},\n\t{4266571832959345212, 1},\n\t{4268401498199734875, 3},\n\t{4268683392432019129, 3},\n\t{4271018635259475028, 1},\n\t{4271317270611693194, 1},\n\t{4274070304884805399, 0},\n\t{4277500833365948321, 3},\n\t{4279815704399828246, 1},\n\t{4282067632568351563, 3},\n\t{4286456726370828605, 3},\n\t{4286484836527869207, 3},\n\t{4286567281180745730, 3},\n\t{4290590974711478182, 2},\n\t{4293270974448917560, 1},\n\t{4294130091132237411, 1},\n\t{4294644967413544768, 2},\n\t{4294689139559561361, 2},\n\t{4294975134159802258, 2},\n\t{4295884438938417808, 3},\n\t{4298098431456382531, 1},\n\t{4303123581155419075, 1},\n\t{4304879529194916096, 3},\n\t{4305700249485256639, 0},\n\t{4305801574933914853, 0},\n\t{4307948391858959988, 2},\n\t{4308395744793787909, 2},\n\t{4311478765271984555, 1},\n\t{4312364037786171261, 2},\n\t{4312737232224553726, 2},\n\t{4314512480330162710, 0},\n\t{4315614309488988836, 1},\n\t{4315645344355216048, 1},\n\t{4318492003438926187, 3},\n\t{4318863376591322449, 3},\n\t{4319056282139368728, 0},\n\t{4320398825752592450, 1},\n\t{4320641382345463203, 1},\n\t{4320910175804898776, 1},\n\t{4321813541271190397, 2},\n\t{4322176341878479004, 2},\n\t{4326866713461145269, 3},\n\t{4328069235021348020, 0},\n\t{4330020623329261664, 1},\n\t{4333303690604123667, 0},\n\t{4337180967959206113, 0},\n\t{4337202360298294847, 0},\n\t{4337645417391057264, 0},\n\t{4338593314990170992, 1},\n\t{4339299590257585119, 2},\n\t{4341152643303837774, 3},\n\t{4342823957303595245, 1},\n\t{4345640021199834354, 3},\n\t{4346965713544996044, 0},\n\t{4346990655856082576, 0},\n\t{4350928710000396476, 0},\n\t{4356290058915506601, 1},\n\t{4357166167545067111, 1},\n\t{4358169467415338441, 2},\n\t{4359511541240631231, 0},\n\t{4360163811964981187, 0},\n\t{4360465726695100726, 0},\n\t{4365849463800639529, 1},\n\t{4371531406331620622, 2},\n\t{4371844763637625288, 2},\n\t{4371910118609494561, 3},\n\t{4372447903401004582, 3},\n\t{4374934430987491529, 1},\n\t{4379833353501708295, 2},\n\t{4380506802599149948, 2},\n\t{4380751983948700139, 2},\n\t{4381809749507910219, 3},\n\t{4382503413450104797, 0},\n\t{4386885436289188920, 0},\n\t{4387329278182876093, 0},\n\t{4387334672402418589, 0},\n\t{4387579971497194873, 0},\n\t{4389497305771939934, 2},\n\t{4389912366724602955, 3},\n\t{4391955501222044874, 0},\n\t{4394666730808823492, 3},\n\t{4395195543059936138, 3},\n\t{4397588700925961068, 1},\n\t{4397893725148086245, 2},\n\t{4398274354358792020, 2},\n\t{4399272937683746226, 3},\n\t{4401080892004746940, 0},\n\t{4404673828024017676, 0},\n\t{4404674177098943646, 0},\n\t{4405800069647510650, 1},\n\t{4406044491408168756, 1},\n\t{4416117682778071794, 2},\n\t{4417908078744575814, 3},\n\t{4418212938605670879, 0},\n\t{4420362459136515802, 2},\n\t{4422746115213201426, 0},\n\t{4423487070419591392, 0},\n\t{4425311685012809366, 2},\n\t{4428251222441512290, 1},\n\t{4428421811957306371, 1},\n\t{4429635069053893733, 2},\n\t{4430918384357940506, 3},\n\t{4430921023273731046, 3},\n\t{4432441342157380838, 0},\n\t{4434501036008037865, 2},\n\t{4441834568409235516, 1},\n\t{4442717115954061144, 1},\n\t{4443395479346919445, 2},\n\t{4445625198531294060, 0},\n\t{4447140088244231077, 1},\n\t{4452320211860523109, 2},\n\t{4453426910232948821, 3},\n\t{4454510055856860362, 0},\n\t{4455518748157763437, 1},\n\t{4461138832457625470, 2},\n\t{4462394142030084413, 3},\n\t{4464912847214848896, 1},\n\t{4465307408552077066, 1},\n\t{4466575070771838604, 3},\n\t{4468636342493201051, 0},\n\t{4470469924501424498, 2},\n\t{4470808308182890245, 2},\n\t{4472841647567749958, 0},\n\t{4473023916199574949, 0},\n\t{4476312986290230264, 3},\n\t{4484094530921797029, 2},\n\t{4485471962149390644, 3},\n\t{4490011968133246644, 3},\n\t{4491028635415076630, 0},\n\t{4493810190645762231, 3},\n\t{4494645612473207725, 0},\n\t{4498203145894764222, 3},\n\t{4499722267277331801, 0},\n\t{4501648888377589489, 2},\n\t{4504576325414883659, 0},\n\t{4504781445951758814, 1},\n\t{4505328504773043058, 1},\n\t{4506063999763780684, 2},\n\t{4506826908446397835, 2},\n\t{4508496411470895032, 0},\n\t{4508787705476765999, 0},\n\t{4509612966313299122, 1},\n\t{4509742270943717214, 1},\n\t{4509861008909884804, 1},\n\t{4510232384704750004, 1},\n\t{4513642194594339997, 0},\n\t{4517435895676201878, 0},\n\t{4518990156846352197, 1},\n\t{4522246016857748250, 0},\n\t{4523242346400757752, 1},\n\t{4526682621395233424, 0},\n\t{4527371203522142219, 1},\n\t{4528133704485662667, 1},\n\t{4528237122279081609, 1},\n\t{4528261756117349346, 1},\n\t{4529055998164350569, 2},\n\t{4530309512795405867, 3},\n\t{4542378763557348020, 2},\n\t{4545998597828667430, 1},\n\t{4546337370802775832, 1},\n\t{4551159572398093441, 2},\n\t{4551428509467093765, 2},\n\t{4552105221451826075, 3},\n\t{4555898122075658922, 2},\n\t{4557448425965485281, 3},\n\t{4558387103419915204, 0},\n\t{4560035818513021188, 2},\n\t{4560832403453779258, 2},\n\t{4566514606316688001, 3},\n\t{4572945960485160758, 1},\n\t{4573265524643710815, 1},\n\t{4576985991534266370, 1},\n\t{4578741505534189287, 2},\n\t{4579516026675118940, 3},\n\t{4580926831025299071, 0},\n\t{4581307146170945933, 1},\n\t{4582411051625008194, 1},\n\t{4585644113427068340, 0},\n\t{4591754433046665581, 2},\n\t{4592809417129004420, 3},\n\t{4596626538913327426, 2},\n\t{4596842778895355516, 2},\n\t{4598004149896252307, 3},\n\t{4598596709426157489, 0},\n\t{4603342802438875334, 0},\n\t{4605461897371648319, 2},\n\t{4606431397026319709, 3},\n\t{4606667919645777725, 3},\n\t{4607729303870605987, 0},\n\t{4607742700442095049, 0},\n\t{4609014250743618605, 1},\n\t{4609115557529912027, 1},\n\t{4609965960642071389, 2},\n\t{4614525953016249244, 2},\n\t{4615422750080385728, 3},\n\t{4617870967148906005, 1},\n\t{4623590127836296969, 2},\n\t{4623861527357913475, 2},\n\t{4625778785022473905, 0},\n\t{4626083587949318407, 0},\n\t{4626153638625696712, 0},\n\t{4626541177050590883, 1},\n\t{4627027997713994024, 1},\n\t{4632330137991349005, 2},\n\t{4632832204072700910, 2},\n\t{4632965691878551148, 2},\n\t{4634977905973810772, 0},\n\t{4639184833692510328, 0},\n\t{4640200910948042988, 1},\n\t{4642082297628719040, 2},\n\t{4642794063147231353, 3},\n\t{4643695729006736183, 0},\n\t{4645859158182311996, 2},\n\t{4645935184879786362, 2},\n\t{4646982108586023767, 3},\n\t{4652909896747551924, 0},\n\t{4654640801175269934, 2},\n\t{4655096173434740867, 2},\n\t{4656264876561753490, 3},\n\t{4659658767762973267, 2},\n\t{4659837368793564411, 2},\n\t{4662466422842468219, 1},\n\t{4662656414915949380, 1},\n\t{4662823757669349174, 1},\n\t{4663804752143940598, 2},\n\t{4666224250217451623, 0},\n\t{4666584102712571476, 0},\n\t{4668763195823976134, 2},\n\t{4671560191488344985, 1},\n\t{4672606292076135622, 2},\n\t{4674313909237939448, 3},\n\t{4675303189195412883, 0},\n\t{4679388320727791829, 0},\n\t{4680640147777962699, 1},\n\t{4681672918497503552, 2},\n\t{4683063637475430161, 3},\n\t{4683604576323107996, 3},\n\t{4685148825761210619, 1},\n\t{4685355580370409023, 1},\n\t{4688174618514206676, 3},\n\t{4691777993314033338, 3},\n\t{4694676608878349271, 1},\n\t{4696227134379354105, 3},\n\t{4697795398838582577, 0},\n\t{4697853389731267232, 0},\n\t{4698012282164299400, 0},\n\t{4699286635329755154, 1},\n\t{4700985640211419295, 3},\n\t{4701424914602714087, 3},\n\t{4706454348405760990, 0},\n\t{4708159633961387308, 1},\n\t{4709047791322641225, 2},\n\t{4710055268587892087, 3},\n\t{4710356657637817554, 3},\n\t{4710475172446328060, 3},\n\t{4711102455189341275, 0},\n\t{4711335470218805037, 0},\n\t{4713338471426843886, 2},\n\t{4714620120676009927, 3},\n\t{4715217283852863572, 3},\n\t{4721997034143454027, 1},\n\t{4723827331775500204, 3},\n\t{4725790797486714284, 1},\n\t{4726308198697736594, 1},\n\t{4730657650938952964, 1},\n\t{4730676826326532743, 1},\n\t{4731320002684578692, 2},\n\t{4731411243179682880, 2},\n\t{4732566604973505961, 3},\n\t{4732837534729894939, 3},\n\t{4734724237629032237, 1},\n\t{4735598873606395713, 2},\n\t{4736048498404477401, 2},\n\t{4736873958708156464, 3},\n\t{4750785486035266554, 3},\n\t{4753264361840060391, 1},\n\t{4755049775316837721, 3},\n\t{4755565901877939278, 3},\n\t{4759861770386782250, 3},\n\t{4761322082113681058, 0},\n\t{4761492625331305661, 1},\n\t{4763398529909883960, 2},\n\t{4763705824037745188, 3},\n\t{4763799995967278652, 3},\n\t{4763921138761791592, 3},\n\t{4764423203653469544, 3},\n\t{4764662223273860098, 3},\n\t{4764724249766552002, 3},\n\t{4771431246549649971, 1},\n\t{4774082315795350689, 0},\n\t{4775301829330002085, 1},\n\t{4778660205365725418, 0},\n\t{4784060845229932118, 1},\n\t{4784771488476611570, 1},\n\t{4785100707580612364, 2},\n\t{4785135161137249400, 2},\n\t{4790867956573360421, 3},\n\t{4791484965290613382, 3},\n\t{4791770612666791816, 3},\n\t{4803883262772341180, 2},\n\t{4804542519931337348, 3},\n\t{4807193342499930919, 1},\n\t{4810351814219213441, 0},\n\t{4811326575884976869, 1},\n\t{4811916457865324500, 1},\n\t{4811951055614954591, 1},\n\t{4814232727989294249, 3},\n\t{4814765320965186006, 0},\n\t{4816314067596903573, 1},\n\t{4816499433967787512, 1},\n\t{4818621667505205106, 3},\n\t{4820801140120590588, 1},\n\t{4822177363554112757, 2},\n\t{4822563410741587479, 3},\n\t{4822925749212740571, 3},\n\t{4824993638457688971, 1},\n\t{4826375050158826071, 2},\n\t{4827135955581798500, 3},\n\t{4827145232764294973, 3},\n\t{4827710597206137509, 3},\n\t{4827752650438645980, 3},\n\t{4828147645412676478, 0},\n\t{4828374200991313146, 0},\n\t{4834338356026154578, 1},\n\t{4836490692398687228, 3},\n\t{4836642471471124607, 3},\n\t{4840702622796026614, 3},\n\t{4846845369697312836, 0},\n\t{4847135386038226657, 1},\n\t{4853398826803151846, 2},\n\t{4856756360929629707, 1},\n\t{4856862837566493800, 1},\n\t{4865689445468590912, 1},\n\t{4866255000342319343, 2},\n\t{4867430443499164545, 3},\n\t{4868405234108622042, 0},\n\t{4871110459491023148, 2},\n\t{4873891153684616151, 0},\n\t{4877445711586267412, 0},\n\t{4877865308237244713, 0},\n\t{4883811170642746476, 1},\n\t{4883987295373808754, 1},\n\t{4884254600867910929, 2},\n\t{4884826517972830460, 2},\n\t{4886451157532784501, 0},\n\t{4887237179728240206, 0},\n\t{4887899728450826740, 1},\n\t{4888552065955222106, 1},\n\t{4889039581186323004, 2},\n\t{4889388838852999771, 2},\n\t{4889444339040233398, 2},\n\t{4895748719320067500, 0},\n\t{4897600432647424304, 1},\n\t{4897965661593441979, 2},\n\t{4900655692610992483, 0},\n\t{4903326372140063612, 3},\n\t{4904402148578853891, 3},\n\t{4909533814277507741, 0},\n\t{4911172522553355927, 1},\n\t{4911296292641477437, 2},\n\t{4914179048375045259, 0},\n\t{4917043476129554431, 3},\n\t{4917081505880857851, 3},\n\t{4917816396729393466, 3},\n\t{4918516731475821334, 0},\n\t{4920556627127998010, 2},\n\t{4927245708134209391, 0},\n\t{4927537437400004580, 0},\n\t{4930690386842076936, 3},\n\t{4932662171743140088, 1},\n\t{4935085422742336192, 3},\n\t{4937071755239609763, 1},\n\t{4938483907335880510, 2},\n\t{4938998642446160421, 2},\n\t{4939223474926943340, 2},\n\t{4940619397068197921, 0},\n\t{4941448395249681727, 0},\n\t{4942228445088312775, 1},\n\t{4947492273515807874, 2},\n\t{4949052031132606117, 3},\n\t{4951259319200620945, 1},\n\t{4953374126295207645, 3},\n\t{4954527202160740467, 0},\n\t{4955439240988112042, 1},\n\t{4955536471998953935, 1},\n\t{4957934045209707462, 3},\n\t{4957936744355999916, 3},\n\t{4958177027904990201, 3},\n\t{4960705398776100262, 1},\n\t{4963334015510756465, 0},\n\t{4964224426917209909, 1},\n\t{4965186263719972797, 1},\n\t{4967124260056354031, 3},\n\t{4967719218548115658, 0},\n\t{4969018299189815090, 1},\n\t{4969301591451505233, 1},\n\t{4969452548970307194, 1},\n\t{4971125319468801164, 3},\n\t{4975257627302405972, 2},\n\t{4982300245606942924, 1},\n\t{4983278286450825185, 2},\n\t{4986386615437008783, 0},\n\t{4986426006648018767, 0},\n\t{4986573257038743121, 0},\n\t{4987660877167053353, 1},\n\t{4988901539269028919, 3},\n\t{4989879236996068847, 3},\n\t{4992840222235583342, 2},\n\t{4993402647102982576, 3},\n\t{4996330289558081631, 1},\n\t{4997258442294642375, 2},\n\t{4997913558819332340, 3},\n\t{5002386213932090824, 3},\n\t{5005306002763728037, 1},\n\t{5006918986793807370, 3},\n\t{5009320385667943976, 1},\n\t{5011198940530855928, 2},\n\t{5017959014693951694, 0},\n\t{5019182003346871452, 1},\n\t{5020549801909908937, 3},\n\t{5020627306790702830, 3},\n\t{5021826760299570324, 0},\n\t{5022301246076898771, 0},\n\t{5022557299624258526, 0},\n\t{5025759456252832533, 3},\n\t{5027276941892019879, 1},\n\t{5027780260290056568, 1},\n\t{5029275401699182514, 2},\n\t{5030141205857508929, 3},\n\t{5030808872929617636, 0},\n\t{5032491047817258450, 1},\n\t{5036114313678579516, 0},\n\t{5041261757433008568, 1},\n\t{5041969000933394975, 2},\n\t{5044044228461491950, 0},\n\t{5044332123083883064, 0},\n\t{5045478692947684251, 1},\n\t{5045837408496175829, 1},\n\t{5049989617464209972, 1},\n\t{5051299213018644107, 2},\n\t{5051474465505738216, 2},\n\t{5054458983929196605, 1},\n\t{5055158178475434613, 1},\n\t{5061172491247036491, 3},\n\t{5061443207718496911, 3},\n\t{5061506370787253855, 3},\n\t{5064532705984935657, 2},\n\t{5065223257927420641, 2},\n\t{5065944527817500432, 3},\n\t{5066667527574657971, 0},\n\t{5068781183678639974, 1},\n\t{5069167938285654289, 2},\n\t{5072003657260887376, 0},\n\t{5074604964083325216, 3},\n\t{5074792888300783460, 3},\n\t{5075443650483675072, 3},\n\t{5075683239049511674, 0},\n\t{5075843762173610541, 0},\n\t{5076548910214537045, 0},\n\t{5077257332259966629, 1},\n\t{5079825478165990430, 3},\n\t{5083596125992700219, 3},\n\t{5084738784362190200, 0},\n\t{5085215506052118728, 0},\n\t{5085592541579854779, 0},\n\t{5086113127873727514, 1},\n\t{5086623420417937602, 1},\n\t{5087413380788406742, 2},\n\t{5088185476759662799, 3},\n\t{5093596061254334995, 0},\n\t{5095185892327720809, 1},\n\t{5095256235272522505, 1},\n\t{5100847701820752721, 2},\n\t{5102404847767701675, 3},\n\t{5107501425678891473, 0},\n\t{5110092139593063625, 2},\n\t{5110184317740855949, 2},\n\t{5110470421819550906, 3},\n\t{5110876266441521022, 3},\n\t{5114055723007644212, 2},\n\t{5114392665787098064, 2},\n\t{5115288744682723611, 3},\n\t{5117513801589728581, 1},\n\t{5119825786931863673, 3},\n\t{5119999924028115441, 3},\n\t{5121417458675517519, 0},\n\t{5123913157698447750, 2},\n\t{5125305090342253986, 0},\n\t{5129289734515056627, 3},\n\t{5129782070235847765, 0},\n\t{5132151549722367549, 2},\n\t{5132309507049257444, 2},\n\t{5132724110856492904, 2},\n\t{5133726932296528298, 3},\n\t{5137685453139314562, 3},\n\t{5138481315496082845, 3},\n\t{5139006657580811945, 0},\n\t{5139920630388363235, 1},\n\t{5141481437678832124, 2},\n\t{5142813697692518431, 3},\n\t{5147066129010480696, 3},\n\t{5148231641765292900, 0},\n\t{5148768339771467255, 1},\n\t{5150084268544396173, 2},\n\t{5152665162221002820, 0},\n\t{5155665742012954969, 3},\n\t{5156111835245822056, 3},\n\t{5157386893157139071, 0},\n\t{5160187386572538248, 3},\n\t{5160229923167042013, 3},\n\t{5162620815817568032, 1},\n\t{5165512984063187190, 3},\n\t{5166636191298090923, 0},\n\t{5167345670581326328, 1},\n\t{5169391259894938753, 3},\n\t{5170130859547001180, 3},\n\t{5170816192054543637, 0},\n\t{5170975006998919100, 0},\n\t{5171061510663414419, 0},\n\t{5171200977532351160, 0},\n\t{5171501683615437294, 1},\n\t{5174172187388117374, 3},\n\t{5174245592407126220, 3},\n\t{5175328938201196362, 0},\n\t{5176165713826472413, 1},\n\t{5178457700345953931, 3},\n\t{5181836564695499330, 2},\n\t{5182126052869149839, 2},\n\t{5182300501837375548, 2},\n\t{5185616658080613864, 1},\n\t{5185965562261976096, 2},\n\t{5187821408630013099, 3},\n\t{5188978468548117582, 0},\n\t{5191290004517922537, 2},\n\t{5194096108043317765, 1},\n\t{5197244565029609609, 0},\n\t{5197993986781284558, 0},\n\t{5199367254834564646, 1},\n\t{5200032066390826211, 2},\n\t{5200469370597353979, 2},\n\t{5202286657487295929, 0},\n\t{5203335883917466884, 1},\n\t{5203753286180234064, 1},\n\t{5206635425062398492, 0},\n\t{5208047849586124334, 1},\n\t{5208176755076094513, 1},\n\t{5208337061417025237, 1},\n\t{5208862567796559524, 2},\n\t{5210157180149027365, 3},\n\t{5210630917530276905, 3},\n\t{5212076795549357088, 1},\n\t{5214378004406198025, 3},\n\t{5214971047340771068, 3},\n\t{5215489351996227030, 0},\n\t{5216256677837851673, 0},\n\t{5216557811577154934, 1},\n\t{5217864211014532589, 2},\n\t{5218304975230707610, 2},\n\t{5220423579847634126, 0},\n\t{5222117968085716350, 2},\n\t{5223292073235656166, 3},\n\t{5225087823073871263, 0},\n\t{5225832811170381846, 1},\n\t{5231308380820441885, 2},\n\t{5231369518814927797, 2},\n\t{5232521048456725176, 3},\n\t{5238362466286834695, 0},\n\t{5239505523496759622, 1},\n\t{5241002751972814731, 2},\n\t{5241482574188012448, 3},\n\t{5247422817107148154, 0},\n\t{5249540393303816406, 2},\n\t{5250621709200175511, 3},\n\t{5252042788550533595, 0},\n\t{5253189622825989544, 1},\n\t{5254264810680249608, 2},\n\t{5257751805608968016, 1},\n\t{5259685158061024500, 3},\n\t{5261426710953031556, 1},\n\t{5261978218402465607, 1},\n\t{5264984245994912360, 0},\n\t{5265876699330129961, 1},\n\t{5267490243091689752, 2},\n\t{5269824970691159395, 0},\n\t{5270861814216687703, 1},\n\t{5277904115728455574, 3},\n\t{5278090506148678661, 3},\n\t{5283412353181216262, 0},\n\t{5283721431596032999, 0},\n\t{5285747163236290906, 2},\n\t{5288656209377196564, 1},\n\t{5289629493076738098, 2},\n\t{5293067387163208742, 1},\n\t{5294220432506349521, 2},\n\t{5298510533316376600, 2},\n\t{5300625938308666333, 3},\n\t{5302421023832977918, 1},\n\t{5307384845725235700, 1},\n\t{5308285099033996136, 2},\n\t{5308477017995902336, 2},\n\t{5309730369061303839, 3},\n\t{5310262456090108628, 0},\n\t{5310620293105169683, 0},\n\t{5312803779880318313, 2},\n\t{5313566716693168683, 3},\n\t{5313980950867061955, 3},\n\t{5314641904515467393, 0},\n\t{5314863119542651747, 0},\n\t{5315768645311989520, 1},\n\t{5316598437438941386, 2},\n\t{5318785750123503095, 0},\n\t{5320333336971412686, 1},\n\t{5320931671057094735, 1},\n\t{5325469192603864870, 1},\n\t{5326211632469556782, 2},\n\t{5328081077282856667, 0},\n\t{5330439121097336676, 2},\n\t{5334546398074582023, 2},\n\t{5334889134629760794, 2},\n\t{5335313711731296734, 2},\n\t{5337552475843548684, 0},\n\t{5339237247565323941, 2},\n\t{5339313204689921558, 2},\n\t{5340052316165944993, 2},\n\t{5340105747485270137, 2},\n\t{5340754031208992697, 3},\n\t{5340806515349917309, 3},\n\t{5344003472635565642, 2},\n\t{5346678715748914959, 0},\n\t{5354076756077903475, 3},\n\t{5355140252737527174, 0},\n\t{5358091431815683117, 2},\n\t{5358288507998569912, 3},\n\t{5360472559932191114, 1},\n\t{5361375617673564203, 1},\n\t{5365298111426864003, 1},\n\t{5366732353181449163, 2},\n\t{5367477352308987192, 3},\n\t{5367650967143077226, 3},\n\t{5370015230213194185, 1},\n\t{5372036858675718257, 3},\n\t{5372175854099971837, 3},\n\t{5373627574743775687, 0},\n\t{5374760057685605024, 1},\n\t{5378282967678948725, 0},\n\t{5378713720892223612, 1},\n\t{5381991033301799965, 0},\n\t{5382587342637287980, 0},\n\t{5383038597524815200, 1},\n\t{5386986676364246694, 0},\n\t{5387534825684117903, 1},\n\t{5387816655688304549, 1},\n\t{5388039705862531898, 1},\n\t{5389147631384212546, 2},\n\t{5396256934093195501, 0},\n\t{5396594273001355862, 1},\n\t{5397267601992050775, 1},\n\t{5398821294500614516, 3},\n\t{5398904296090869085, 3},\n\t{5401922154093398942, 1},\n\t{5402145050820902743, 2},\n\t{5402625268775633201, 2},\n\t{5411258977078864518, 2},\n\t{5411430830797636639, 2},\n\t{5411684524875350561, 2},\n\t{5414611494982231140, 1},\n\t{5416384295959621740, 2},\n\t{5418147842493895598, 0},\n\t{5423840223930386044, 1},\n\t{5424704405629557023, 2},\n\t{5424754465269744976, 2},\n\t{5428150109722672297, 1},\n\t{5431358152855351800, 0},\n\t{5434083628447509688, 2},\n\t{5438876701733939490, 2},\n\t{5441197739472982802, 0},\n\t{5444824850433653783, 3},\n\t{5445773406457778279, 0},\n\t{5451411693041324724, 1},\n\t{5451498116519307453, 1},\n\t{5454983624615005533, 0},\n\t{5464324730613043688, 1},\n\t{5464851355370116090, 1},\n\t{5465963393053222041, 2},\n\t{5467858766798773985, 0},\n\t{5469698478261554546, 2},\n\t{5470191125939500724, 2},\n\t{5470329115833206971, 2},\n\t{5472766592156284614, 0},\n\t{5475113429636126331, 2},\n\t{5475523126916735511, 3},\n\t{5475757310035669107, 3},\n\t{5476441358410004787, 0},\n\t{5478217042504188081, 1},\n\t{5483684178527121355, 2},\n\t{5484009735751954875, 2},\n\t{5484242430731999589, 2},\n\t{5484581543861348811, 3},\n\t{5487106108064320655, 1},\n\t{5489456112044439842, 3},\n\t{5493009156210739183, 2},\n\t{5495704941419121487, 1},\n\t{5496577539008792744, 1},\n\t{5503554740685621784, 0},\n\t{5505885652536540904, 2},\n\t{5509229971908437103, 1},\n\t{5511929706326463383, 3},\n\t{5515742330999243810, 2},\n\t{5525087477367550914, 3},\n\t{5527022843348957122, 0},\n\t{5527470044523789889, 1},\n\t{5527686191628381440, 1},\n\t{5528099273924769884, 1},\n\t{5529762448605642876, 3},\n\t{5534173125011668675, 3},\n\t{5534736649456897768, 3},\n\t{5535034658964436225, 0},\n\t{5535694753985095986, 0},\n\t{5536054957490105426, 1},\n\t{5539298496204212382, 3},\n\t{5539866376424053110, 0},\n\t{5540629805963289781, 1},\n\t{5543909360696220406, 3},\n\t{5545692870339640299, 1},\n\t{5545776115390534095, 1},\n\t{5549652416340106599, 1},\n\t{5550717519437583450, 2},\n\t{5551952880451002991, 3},\n\t{5552293080845556451, 3},\n\t{5552500544518286006, 3},\n\t{5557461581900923593, 0},\n\t{5558647678584467055, 1},\n\t{5561894926260306543, 3},\n\t{5563402149727298772, 1},\n\t{5564181368184751441, 1},\n\t{5564272761655244547, 2},\n\t{5565827131973446900, 3},\n\t{5569932225605518378, 3},\n\t{5570157314455677844, 3},\n\t{5571180560343076075, 0},\n\t{5574614822369682841, 3},\n\t{5581187861763756627, 1},\n\t{5588883247237350142, 3},\n\t{5589989617017297855, 0},\n\t{5591575530922043019, 2},\n\t{5593248062062490204, 3},\n\t{5594694671407749856, 1},\n\t{5596453447708376055, 2},\n\t{5599052917505667126, 0},\n\t{5602788298316848210, 0},\n\t{5603611641650957536, 1},\n\t{5605261298933584824, 2},\n\t{5605720598793172068, 2},\n\t{5605990881456505001, 3},\n\t{5609318103594359885, 2},\n\t{5610112989782748853, 2},\n\t{5614191066974593226, 2},\n\t{5614916120490694090, 3},\n\t{5616126918430448465, 0},\n\t{5616982847073435450, 0},\n\t{5618196078348891224, 1},\n\t{5619273548659354372, 2},\n\t{5620143520882968278, 3},\n\t{5621044355402783562, 0},\n\t{5628635039315296643, 3},\n\t{5631357405729667701, 1},\n\t{5632112822524222997, 2},\n\t{5636552775428556408, 2},\n\t{5642541513911093222, 3},\n\t{5642560193812812134, 3},\n\t{5643667115226238462, 0},\n\t{5646150186849220744, 2},\n\t{5646876774228107253, 3},\n\t{5646885467038692598, 3},\n\t{5650244400587544233, 2},\n\t{5650708381527038012, 2},\n\t{5659044649877083512, 2},\n\t{5668884047615732234, 2},\n\t{5672246409822670239, 1},\n\t{5672803631571147574, 2},\n\t{5673944371932224005, 3},\n\t{5674814865818505418, 0},\n\t{5674874101538950251, 0},\n\t{5681000536612620681, 1},\n\t{5682169763478627694, 2},\n\t{5684840287572717555, 1},\n\t{5689008616437926686, 0},\n\t{5689299834122844892, 1},\n\t{5690388833517576536, 2},\n\t{5692784649816683588, 0},\n\t{5694690511641025455, 1},\n\t{5694706522518456466, 1},\n\t{5695114308909491554, 2},\n\t{5697820497935439887, 0},\n\t{5702655393406857645, 0},\n\t{5703557541438931407, 1},\n\t{5705052394500430451, 3},\n\t{5707490043659715344, 1},\n\t{5708843082444023028, 2},\n\t{5711080394427217024, 0},\n\t{5711469012336954066, 0},\n\t{5712205398085475358, 1},\n\t{5713058750773305809, 2},\n\t{5715346222313540162, 0},\n\t{5719109514244952779, 3},\n\t{5719399961600514596, 3},\n\t{5724222551111960876, 0},\n\t{5724808230358393468, 0},\n\t{5726366895871925173, 2},\n\t{5729353537456241534, 0},\n\t{5732158411015517832, 3},\n\t{5733001640670984283, 3},\n\t{5733329722696273852, 0},\n\t{5734148225387327587, 0},\n\t{5734562419656389892, 1},\n\t{5734564927161283671, 1},\n\t{5734979283453716410, 1},\n\t{5735023614813146351, 1},\n\t{5735128837404022405, 1},\n\t{5738987165214302334, 1},\n\t{5745094748599987629, 2},\n\t{5745879168965268052, 3},\n\t{5747280852075057967, 0},\n\t{5748765096341349284, 1},\n\t{5749069486797737308, 2},\n\t{5750321217558946957, 3},\n\t{5750530948867933631, 3},\n\t{5752377584195281679, 1},\n\t{5754135142102698396, 2},\n\t{5754141215120657404, 2},\n\t{5754161032399784708, 2},\n\t{5756577500464099055, 0},\n\t{5760967949084190153, 0},\n\t{5762216204706857136, 1},\n\t{5763230390607302845, 2},\n\t{5763636572391427847, 3},\n\t{5764327681635464419, 3},\n\t{5766201301595272499, 1},\n\t{5770259951300036707, 1},\n\t{5771499437167321993, 2},\n\t{5773117727216912340, 3},\n\t{5773197998879590541, 3},\n\t{5774040740574203143, 0},\n\t{5779684294013646436, 1},\n\t{5780576653537512260, 2},\n\t{5781058708920075948, 2},\n\t{5781341806700341357, 2},\n\t{5782846243911586692, 0},\n\t{5783206880988196787, 0},\n\t{5785370868086884710, 2},\n\t{5790391197292669737, 2},\n\t{5792228128150606682, 0},\n\t{5795067060348336314, 3},\n\t{5795950684759610669, 3},\n\t{5801203864258835941, 0},\n\t{5802462469982435629, 1},\n\t{5802727632941258874, 1},\n\t{5804218088454302267, 3},\n\t{5811177927283699815, 1},\n\t{5815119544485160615, 0},\n\t{5817799680232837531, 3},\n\t{5818367628875738393, 3},\n\t{5821582883787141913, 2},\n\t{5821798480688131836, 2},\n\t{5823184859398895357, 0},\n\t{5826293914945155216, 2},\n\t{5830486920177450014, 2},\n\t{5834478884249128812, 2},\n\t{5838803962297500671, 1},\n\t{5840109917744571347, 3},\n\t{5841595600013439183, 0},\n\t{5841624773115866106, 0},\n\t{5842296351831473664, 1},\n\t{5848329897597105938, 2},\n\t{5849177798851483954, 3},\n\t{5849988590903117786, 3},\n\t{5852336778449893475, 1},\n\t{5858946537431787868, 3},\n\t{5859031802533991188, 3},\n\t{5860127672264329149, 0},\n\t{5861204710401776331, 1},\n\t{5861655919086685061, 2},\n\t{5863041279877354917, 3},\n\t{5865076344787967516, 1},\n\t{5865282376022291876, 1},\n\t{5867725680010554018, 3},\n\t{5868022036547348833, 3},\n\t{5871523016211180167, 2},\n\t{5871875652937347193, 3},\n\t{5872175399176938256, 3},\n\t{5875471472322896507, 2},\n\t{5875998582143461504, 2},\n\t{5878764945670735354, 1},\n\t{5882544220237810885, 0},\n\t{5883134132619865231, 1},\n\t{5884244557096105407, 2},\n\t{5884576639938545486, 2},\n\t{5884770198902752062, 2},\n\t{5886036649267748150, 3},\n\t{5886429341456316798, 0},\n\t{5888453648163016687, 1},\n\t{5889677671655366065, 3},\n\t{5891986365680277476, 1},\n\t{5895898488114922923, 0},\n\t{5896919425508176411, 1},\n\t{5897010191228218112, 1},\n\t{5898958420998408220, 3},\n\t{5900438256841097508, 0},\n\t{5900440729764803928, 0},\n\t{5900644173091914339, 0},\n\t{5905886090218860787, 1},\n\t{5908061747521851771, 3},\n\t{5908457648889689851, 3},\n\t{5909825488629464766, 0},\n\t{5910749027408295985, 1},\n\t{5912593925142193100, 3},\n\t{5915893265983788067, 2},\n\t{5916690496427687937, 3},\n\t{5917630859212994989, 3},\n\t{5919881539868190462, 1},\n\t{5919960865051000947, 1},\n\t{5921533224426475625, 3},\n\t{5921675359725231066, 3},\n\t{5923509507998152543, 1},\n\t{5923562550658557358, 1},\n\t{5924086573840147598, 1},\n\t{5925567521729145601, 2},\n\t{5926277358860557767, 3},\n\t{5926399781825307655, 3},\n\t{5926695537077934893, 3},\n\t{5927563035494783927, 0},\n\t{5928398409709101067, 1},\n\t{5930411513967899407, 3},\n\t{5933544452357079463, 2},\n\t{5934819353989008103, 3},\n\t{5935518435633989961, 3},\n\t{5937776929663843468, 1},\n\t{5938903631748588984, 2},\n\t{5939879043437390658, 3},\n\t{5941107027876487545, 0},\n\t{5942285387105998158, 1},\n\t{5944461224572228857, 3},\n\t{5944533998008963067, 3},\n\t{5951451141452555345, 1},\n\t{5952886658068805767, 3},\n\t{5956046812507990479, 2},\n\t{5958455145755976025, 0},\n\t{5960875912967598168, 2},\n\t{5963857618060953505, 0},\n\t{5964332495315212369, 1},\n\t{5965145966159559910, 2},\n\t{5965401571834328643, 2},\n\t{5965822791742173964, 2},\n\t{5967488125545125351, 0},\n\t{5967641626159858020, 0},\n\t{5968995069020434004, 1},\n\t{5972431745092005787, 0},\n\t{5975127485867439603, 2},\n\t{5975951468793138120, 3},\n\t{5976490485567514829, 0},\n\t{5977703805573813682, 1},\n\t{5977825382621992762, 1},\n\t{5979495468358239508, 2},\n\t{5981316720147719291, 0},\n\t{5981433242759101927, 0},\n\t{5981481201971817455, 0},\n\t{5983437098839967556, 2},\n\t{5986004342540184458, 0},\n\t{5986631055611506339, 1},\n\t{5987538565381743512, 2},\n\t{5991077970781953007, 1},\n\t{5991579190797406686, 1},\n\t{5994056894112965282, 3},\n\t{5997876861350164350, 3},\n\t{5999457689184080059, 0},\n\t{5999794870051781393, 0},\n\t{6003075537061947958, 3},\n\t{6008373848958907892, 0},\n\t{6009776305142720380, 1},\n\t{6014072881293007974, 1},\n\t{6015124335724434305, 2},\n\t{6016856162373329938, 0},\n\t{6023249228070662898, 1},\n\t{6025076405471890982, 3},\n\t{6025405556926247316, 3},\n\t{6028364596262610309, 2},\n\t{6029838592583645418, 3},\n\t{6031967723784501006, 1},\n\t{6032476336505668300, 1},\n\t{6032596160577222731, 2},\n\t{6033959993614341560, 3},\n\t{6034902086591430202, 0},\n\t{6038391263799673845, 3},\n\t{6047935930327696175, 3},\n\t{6047966067772297568, 3},\n\t{6048068773693130223, 3},\n\t{6051382165973076291, 2},\n\t{6051477376189385506, 2},\n\t{6051647781143817645, 2},\n\t{6063992524488708148, 1},\n\t{6065091895811047567, 2},\n\t{6066870936876204501, 0},\n\t{6068611349734545154, 2},\n\t{6069399764936722621, 2},\n\t{6071781873644733518, 0},\n\t{6073405732130312254, 2},\n\t{6077208026181981379, 1},\n\t{6077521702592749061, 1},\n\t{6082639332908688390, 2},\n\t{6084996491991026937, 0},\n\t{6085813761108972722, 1},\n\t{6086235712301620506, 1},\n\t{6090430536268187635, 1},\n\t{6095079551468488019, 1},\n\t{6102345850265102109, 3},\n\t{6108315417651728414, 1},\n\t{6109771431634001926, 2},\n\t{6115134509772405659, 3},\n\t{6116765195909036845, 0},\n\t{6119191167328657795, 2},\n\t{6119579316393674740, 3},\n\t{6123774431894963545, 3},\n\t{6124608172964355163, 3},\n\t{6127153158731884302, 2},\n\t{6131610501138562162, 1},\n\t{6134187486777293836, 0},\n\t{6134314300766418716, 0},\n\t{6134400653315257343, 0},\n\t{6136948087630233293, 2},\n\t{6138403369537094202, 3},\n\t{6140057245082557227, 1},\n\t{6143632021610759489, 0},\n\t{6147466985794885918, 0},\n\t{6148036928326343247, 0},\n\t{6148176598295051437, 0},\n\t{6150072540825356051, 2},\n\t{6155563179859386266, 3},\n\t{6155897702451041573, 3},\n\t{6156199747249882571, 3},\n\t{6159876576253458546, 3},\n\t{6161106184311731369, 0},\n\t{6161112063000825194, 0},\n\t{6165127559449762599, 3},\n\t{6165210419450510560, 3},\n\t{6167126982351788425, 1},\n\t{6172829736563079578, 2},\n\t{6172909545330958572, 2},\n\t{6173258135680319539, 2},\n\t{6177232914834115043, 2},\n\t{6182353070948635511, 3},\n\t{6188040208851326788, 0},\n\t{6190047898087242296, 1},\n\t{6190402662236539307, 2},\n\t{6191736030350384664, 3},\n\t{6192756744504177638, 0},\n\t{6194387790775418325, 1},\n\t{6200294301836121631, 2},\n\t{6203970557641311367, 2},\n\t{6204338953925081452, 2},\n\t{6205215187428923981, 3},\n\t{6209851116027589949, 3},\n\t{6213495178446608696, 2},\n\t{6213539664855418938, 2},\n\t{6213760582930620451, 2},\n\t{6214263173325033535, 3},\n\t{6214860907720473405, 3},\n\t{6215316143016206280, 0},\n\t{6219705095347325001, 0},\n\t{6220883700437834872, 1},\n\t{6222519303659280032, 2},\n\t{6225782284654721600, 1},\n\t{6229425203757653430, 0},\n\t{6231917861339168530, 3},\n\t{6235143923732198996, 1},\n\t{6235357697378193554, 2},\n\t{6243123472568756928, 1},\n\t{6243384755453250365, 1},\n\t{6244903592279532795, 2},\n\t{6247002265065472602, 0},\n\t{6247674542187150856, 1},\n\t{6254527648346406492, 3},\n\t{6254935197330393670, 3},\n\t{6256912335391495321, 1},\n\t{6257418547801095851, 1},\n\t{6258550220946004240, 2},\n\t{6262617326664750512, 2},\n\t{6265337058590076123, 0},\n\t{6265499345599331933, 0},\n\t{6267987406909863904, 3},\n\t{6270588902191461484, 1},\n\t{6271241683126966001, 1},\n\t{6275337614212473581, 1},\n\t{6275691143553196975, 1},\n\t{6276238049166074060, 2},\n\t{6276414091215806402, 2},\n\t{6277455165838069356, 3},\n\t{6280007987743022117, 1},\n\t{6281869290951077282, 3},\n\t{6286920648480758693, 3},\n\t{6291064665695856366, 3},\n\t{6293140157010510172, 1},\n\t{6295153634661381476, 3},\n\t{6295889714920073455, 3},\n\t{6296259944339757264, 0},\n\t{6297694989208149147, 1},\n\t{6298451878271795410, 2},\n\t{6300702901155699370, 0},\n\t{6302959378814186636, 2},\n\t{6303289194247064089, 2},\n\t{6304036215107367687, 3},\n\t{6304180685354229568, 3},\n\t{6304860634716431833, 3},\n\t{6310786657494619396, 1},\n\t{6311267664765788452, 1},\n\t{6311471374851890786, 1},\n\t{6313753082039806849, 3},\n\t{6314877998564939764, 0},\n\t{6317173990983901503, 2},\n\t{6318531582570286362, 3},\n\t{6318735763405858808, 0},\n\t{6327208352735702853, 3},\n\t{6327461209077959096, 3},\n\t{6328050678249600098, 0},\n\t{6328640556688344140, 0},\n\t{6329287246059299298, 1},\n\t{6332931762125578266, 0},\n\t{6338788246150630834, 1},\n\t{6339332436125338553, 2},\n\t{6339552262186734353, 2},\n\t{6340165549676494714, 3},\n\t{6345424278858511768, 3},\n\t{6346161439984660045, 0},\n\t{6347132025298623648, 1},\n\t{6347317235792506193, 1},\n\t{6348479691146191845, 2},\n\t{6349814744430797613, 3},\n\t{6351159001044564997, 0},\n\t{6351404903750165989, 1},\n\t{6354044112399122303, 3},\n\t{6355084174957498977, 0},\n\t{6356337941786855197, 1},\n\t{6358128114482472442, 3},\n\t{6364038900013539061, 0},\n\t{6365304760698956368, 1},\n\t{6365490670159653020, 1},\n\t{6365975146866071978, 2},\n\t{6366764058887095264, 2},\n\t{6372633554289373704, 0},\n\t{6373017170543749126, 0},\n\t{6382987668969528334, 1},\n\t{6384342465580185074, 2},\n\t{6385612614133994521, 3},\n\t{6390754474888739185, 0},\n\t{6392613481708594538, 1},\n\t{6394088785275140196, 3},\n\t{6395088351674394165, 3},\n\t{6395449304949476129, 0},\n\t{6395526381257113789, 0},\n\t{6403480026744478822, 3},\n\t{6407742969376326161, 3},\n\t{6407748328625641965, 3},\n\t{6408039341317554191, 3},\n\t{6411375575653125895, 2},\n\t{6413487176261234586, 0},\n\t{6416377191741043014, 2},\n\t{6418473772287630058, 0},\n\t{6420173698063757539, 2},\n\t{6426223178894842892, 3},\n\t{6430364776481296549, 3},\n\t{6432212859772189168, 0},\n\t{6432665909801428724, 1},\n\t{6433060575139722432, 1},\n\t{6433845591617665892, 2},\n\t{6433847858658428903, 2},\n\t{6439117613849223651, 3},\n\t{6440769501979416210, 0},\n\t{6446448118479560352, 1},\n\t{6449242029365869333, 0},\n\t{6451861772190237669, 2},\n\t{6455554781665509847, 1},\n\t{6458371606854624535, 0},\n\t{6460382601597530030, 1},\n\t{6460468494307623821, 2},\n\t{6461969387707806485, 3},\n\t{6464934952008544599, 2},\n\t{6466353849823715358, 3},\n\t{6467824141313664225, 0},\n\t{6467859518052474985, 0},\n\t{6468041038977561794, 0},\n\t{6468539894193205668, 1},\n\t{6469097553668056929, 1},\n\t{6469842929497027370, 2},\n\t{6470800581779840980, 3},\n\t{6471777879989944167, 0},\n\t{6476212172344459936, 0},\n\t{6476510301989108412, 0},\n\t{6482495367522857668, 1},\n\t{6483281663848018009, 2},\n\t{6483620731662674482, 2},\n\t{6484004892088506310, 2},\n\t{6486057519980161334, 0},\n\t{6487683737924422245, 2},\n\t{6487997743235837627, 2},\n\t{6488675643744836451, 3},\n\t{6491429418837563364, 1},\n\t{6494027301439304772, 3},\n\t{6495356252147892915, 1},\n\t{6495660670364579430, 1},\n\t{6496150217043857754, 1},\n\t{6497374663506126127, 2},\n\t{6500581737028138053, 1},\n\t{6501826721167309978, 2},\n\t{6503462129580500811, 0},\n\t{6505108068106878116, 1},\n\t{6509309368986578527, 1},\n\t{6510956180808542130, 2},\n\t{6518931419936010607, 1},\n\t{6520600751076202295, 3},\n\t{6527985924303547936, 2},\n\t{6528703369887053040, 2},\n\t{6531236654821947146, 0},\n\t{6532234643741018032, 1},\n\t{6534269957520710106, 3},\n\t{6539954014528808682, 0},\n\t{6539997341028816240, 0},\n\t{6540172930042703891, 0},\n\t{6542250843073855307, 2},\n\t{6543910944682596200, 0},\n\t{6544450762467484691, 0},\n\t{6548215167569192562, 3},\n\t{6550516805811294984, 2},\n\t{6551305884531541725, 2},\n\t{6553365778044715946, 0},\n\t{6554544128231145118, 1},\n\t{6554958188845837039, 1},\n\t{6559151970632684225, 1},\n\t{6559881835642967957, 2},\n\t{6560017648844307004, 2},\n\t{6561210495529215889, 3},\n\t{6562319384115714703, 0},\n\t{6570887929632950909, 0},\n\t{6572130873226439976, 1},\n\t{6574457238973492227, 3},\n\t{6575604826054365000, 0},\n\t{6576907269598258784, 1},\n\t{6580829947241367053, 0},\n\t{6583057682466835019, 2},\n\t{6583436886249917290, 3},\n\t{6585345545757085720, 0},\n\t{6586538759272518384, 2},\n\t{6586593063465539185, 2},\n\t{6587701269019651503, 3},\n\t{6589882056855857990, 0},\n\t{6592290513260960794, 3},\n\t{6592778596056533482, 3},\n\t{6593816055611080311, 0},\n\t{6594332904671476016, 0},\n\t{6594595749475039055, 1},\n\t{6597518441366298266, 3},\n\t{6597556665498094721, 3},\n\t{6597584186783312391, 3},\n\t{6597598438219715445, 3},\n\t{6599077620671871350, 1},\n\t{6601281181601482005, 3},\n\t{6601495475543548638, 3},\n\t{6602192606518511600, 3},\n\t{6603298061623698105, 0},\n\t{6606759604426632276, 3},\n\t{6607765128979025622, 0},\n\t{6608510724790257085, 1},\n\t{6609448262200057475, 2},\n\t{6610166225087857678, 3},\n\t{6611103914183686804, 3},\n\t{6611559696264760600, 0},\n\t{6613551438743261199, 2},\n\t{6614587697959255231, 2},\n\t{6615135800457248259, 3},\n\t{6618136082274067543, 2},\n\t{6618936114894621728, 2},\n\t{6621087346703464851, 0},\n\t{6624898969056576212, 0},\n\t{6626748552710298833, 1},\n\t{6626873139033783632, 1},\n\t{6630493727057694906, 1},\n\t{6631219579097519351, 1},\n\t{6632445588045569957, 2},\n\t{6636933745239403690, 2},\n\t{6644767978969745184, 1},\n\t{6647145021473000986, 3},\n\t{6647487660255629863, 0},\n\t{6651433721354068515, 3},\n\t{6652267273357888469, 0},\n\t{6653207797624116130, 1},\n\t{6654995460779929909, 2},\n\t{6659380794685690615, 2},\n\t{6660927669970104245, 0},\n\t{6662203287674694997, 1},\n\t{6662714084989294155, 1},\n\t{6664036881919519538, 2},\n\t{6665215989353244102, 3},\n\t{6665877352386445182, 0},\n\t{6667100729189311142, 1},\n\t{6671890554178081965, 1},\n\t{6676942655145773373, 2},\n\t{6677473693977314017, 2},\n\t{6679568149966524458, 0},\n\t{6680132605600041215, 1},\n\t{6682185696023661728, 2},\n\t{6682918577259585619, 3},\n\t{6683494392105064449, 0},\n\t{6686894214493403307, 3},\n\t{6687835344397365981, 3},\n\t{6687865944396060885, 0},\n\t{6688526805765165174, 0},\n\t{6690639712525564549, 2},\n\t{6694635133914923803, 2},\n\t{6695333324556035883, 2},\n\t{6695678096430593124, 2},\n\t{6696264161854610643, 3},\n\t{6697929934631098206, 0},\n\t{6698592544414388456, 1},\n\t{6700335413607770750, 3},\n\t{6701404741738755804, 0},\n\t{6703225196536791996, 1},\n\t{6705160525169732528, 3},\n\t{6708759644857525705, 2},\n\t{6711608163130472641, 1},\n\t{6713813763704636578, 3},\n\t{6714755817127414755, 3},\n\t{6717018220341283065, 1},\n\t{6720690402514811950, 1},\n\t{6722150246009876005, 2},\n\t{6723295749910667690, 3},\n\t{6723721730541776127, 3},\n\t{6727384882306888541, 3},\n\t{6728066752261338427, 3},\n\t{6728752938726286885, 0},\n\t{6728780905164492206, 0},\n\t{6729110101973475982, 0},\n\t{6732301768602618645, 3},\n\t{6733525801870766243, 0},\n\t{6734310940621491651, 1},\n\t{6736425787958235904, 3},\n\t{6739054957632534211, 1},\n\t{6740427543588866406, 2},\n\t{6741354565349208059, 3},\n\t{6742565357409368266, 0},\n\t{6743361077598008674, 1},\n\t{6750616850088853509, 3},\n\t{6752026187796057243, 1},\n\t{6753124560502613091, 1},\n\t{6753269370634114232, 2},\n\t{6754198366028747832, 2},\n\t{6755705544634180287, 0},\n\t{6758125802653040540, 2},\n\t{6765696922558919483, 1},\n\t{6767321517567487250, 2},\n\t{6771281185099087456, 2},\n\t{6772188647095675234, 2},\n\t{6776371455752558421, 2},\n\t{6777064052516098499, 3},\n\t{6778947233728609393, 0},\n\t{6780695881372496451, 2},\n\t{6784478599185599894, 1},\n\t{6786922396611765972, 3},\n\t{6787111338529276268, 0},\n\t{6788688208397593164, 1},\n\t{6791075702297112545, 3},\n\t{6797872609731343778, 1},\n\t{6798010389213349124, 1},\n\t{6801035063334417143, 0},\n\t{6802483736108368654, 1},\n\t{6803393609938389154, 2},\n\t{6806503814488633889, 1},\n\t{6808866330576144754, 3},\n\t{6810584639803548389, 1},\n\t{6812182102140845367, 2},\n\t{6812890342201098953, 3},\n\t{6814314430821799652, 0},\n\t{6814482623607729294, 0},\n\t{6816683091071279382, 2},\n\t{6816860026843275861, 2},\n\t{6817007918265302588, 2},\n\t{6820522522910544850, 1},\n\t{6821601391216486242, 2},\n\t{6824235076006457579, 1},\n\t{6830811579751924374, 2},\n\t{6833682489435403224, 1},\n\t{6833824694803940187, 1},\n\t{6835536512041679926, 3},\n\t{6836242331166410785, 3},\n\t{6838184657405461716, 1},\n\t{6840286870109587224, 3},\n\t{6844202795544180284, 2},\n\t{6844361489718293122, 3},\n\t{6845341054127907539, 3},\n\t{6846392402982579383, 0},\n\t{6861239562394765963, 2},\n\t{6865491212194485137, 1},\n\t{6867450729920789292, 3},\n\t{6868969042393466055, 0},\n\t{6874643495759733482, 1},\n\t{6875685699004955174, 2},\n\t{6878696115042386533, 1},\n\t{6879129824056958868, 1},\n\t{6880635068513631658, 3},\n\t{6882965098398331561, 1},\n\t{6884162701291018287, 2},\n\t{6889788661252486804, 3},\n\t{6890892228019165723, 0},\n\t{6892332220476511424, 1},\n\t{6893106971966404234, 2},\n\t{6894221354962172456, 3},\n\t{6896735827746825260, 1},\n\t{6898194410841208017, 2},\n\t{6900779261232705109, 1},\n\t{6901346070537206780, 1},\n\t{6902017742415621221, 2},\n\t{6902054849283586006, 2},\n\t{6903197425397102486, 3},\n\t{6903322212119345913, 3},\n\t{6903438386933387315, 3},\n\t{6903798721442365664, 3},\n\t{6906976492484241699, 2},\n\t{6907082225818826618, 2},\n\t{6910138644331558936, 1},\n\t{6910283610102036964, 1},\n\t{6911534242619099897, 2},\n\t{6914728625341019896, 1},\n\t{6915021225271241590, 1},\n\t{6917125994264349757, 3},\n\t{6917870237627358305, 0},\n\t{6918112376917038303, 0},\n\t{6918366013419546390, 0},\n\t{6919963461368826549, 2},\n\t{6920051263156088230, 2},\n\t{6920475083242951046, 2},\n\t{6921310980554948555, 3},\n\t{6922770820529218226, 0},\n\t{6924260787226518972, 1},\n\t{6928241615584285380, 1},\n\t{6928478234842745003, 1},\n\t{6929766733662645727, 2},\n\t{6929989534315585593, 3},\n\t{6930234159075987068, 3},\n\t{6932794420811503340, 1},\n\t{6934358934427842116, 2},\n\t{6936780389476756271, 1},\n\t{6937508073552055508, 1},\n\t{6938372515710882062, 2},\n\t{6941109171231709763, 0},\n\t{6942805915844354326, 2},\n\t{6943762346250726795, 3},\n\t{6944500316115619299, 3},\n\t{6948779285788493109, 3},\n\t{6949432918879492730, 0},\n\t{6954327906021638485, 0},\n\t{6960117946768357389, 1},\n\t{6960427156745953594, 2},\n\t{6962282411017498113, 3},\n\t{6965915839719520234, 2},\n\t{6974540124911396009, 2},\n\t{6975294715127385000, 3},\n\t{6977247307767273835, 1},\n\t{6977265916937695331, 1},\n\t{6978295192930028604, 1},\n\t{6980319128666416781, 3},\n\t{6981872716714081277, 1},\n\t{6982286752010508994, 1},\n\t{6983820876336442304, 2},\n\t{6984058398646912514, 3},\n\t{6984433246166513497, 3},\n\t{6986144720146655772, 0},\n\t{6987964923555803968, 2},\n\t{6988158263302920400, 2},\n\t{6990371111415938791, 0},\n\t{6991714888220760968, 1},\n\t{6992823134322374992, 2},\n\t{6998946183504526992, 0},\n\t{7001301327074520702, 2},\n\t{7001427934049115139, 2},\n\t{7004943074148511173, 1},\n\t{7005344199955618068, 1},\n\t{7007531782531791260, 3},\n\t{7013433913089786874, 1},\n\t{7013660359284554411, 1},\n\t{7018061137570227434, 1},\n\t{7019167410475637663, 2},\n\t{7020359324405856401, 3},\n\t{7021238746980193221, 0},\n\t{7023168536638914862, 1},\n\t{7024926148044640952, 3},\n\t{7031017602220309808, 0},\n\t{7033400685464683640, 2},\n\t{7034173210600912549, 3},\n\t{7036478515432020726, 1},\n\t{7036597359216271270, 1},\n\t{7041155319222071786, 1},\n\t{7043791495660968780, 0},\n\t{7045870668790663315, 1},\n\t{7047273848495108896, 3},\n\t{7049099923383314296, 0},\n\t{7050041168063665779, 1},\n\t{7052355472085295025, 3},\n\t{7052519164086853688, 3},\n\t{7052565325474925091, 3},\n\t{7052844633402620283, 0},\n\t{7053622598273641699, 0},\n\t{7054601105583186872, 1},\n\t{7058599742353379340, 1},\n\t{7062447948835092842, 0},\n\t{7064286146105437853, 2},\n\t{7064394616805395114, 2},\n\t{7064944202465335759, 2},\n\t{7064952825921069700, 2},\n\t{7065476455957413848, 3},\n\t{7065987284657550756, 3},\n\t{7066278076896759570, 0},\n\t{7071801491804119563, 1},\n\t{7073192079694939763, 2},\n\t{7084295117068442472, 0},\n\t{7089303965668082514, 0},\n\t{7090442245819037108, 1},\n\t{7090890649216270393, 1},\n\t{7091800773799401599, 2},\n\t{7092461874792753304, 3},\n\t{7093173105132434680, 0},\n\t{7095566182179465279, 2},\n\t{7095702131778021243, 2},\n\t{7098178601251586829, 0},\n\t{7099862990455358511, 1},\n\t{7101053585573745315, 3},\n\t{7103027081340390090, 0},\n\t{7105324110230993503, 2},\n\t{7105394272641749291, 2},\n\t{7105744320170434115, 3},\n\t{7106916584125047412, 0},\n\t{7108603184994970302, 1},\n\t{7110074794815049047, 3},\n\t{7119378345542052422, 3},\n\t{7120655015737065803, 0},\n\t{7122646175965063552, 2},\n\t{7123364808376610479, 2},\n\t{7124661461133590789, 3},\n\t{7124946103666626693, 0},\n\t{7125095977738666891, 0},\n\t{7125384924038271243, 0},\n\t{7125940952594982585, 1},\n\t{7126547711992422524, 1},\n\t{7127774681272783785, 2},\n\t{7128969656120329717, 3},\n\t{7134333243636657799, 0},\n\t{7135896737480962619, 1},\n\t{7136902074504964746, 2},\n\t{7139041838124169939, 0},\n\t{7139478370992174694, 1},\n\t{7142110783349586663, 3},\n\t{7146308211326387385, 3},\n\t{7149658409710091125, 2},\n\t{7155335360813044004, 3},\n\t{7163393164083175949, 2},\n\t{7164952265600023573, 3},\n\t{7168025695898353792, 2},\n\t{7168130160027886120, 2},\n\t{7169709315728876647, 3},\n\t{7170870277106080415, 1},\n\t{7171698304806323789, 1},\n\t{7175107104187691807, 0},\n\t{7176864863562416647, 2},\n\t{7177319266616357498, 2},\n\t{7178226904803203911, 3},\n\t{7179565624328348226, 0},\n\t{7179961509515838763, 1},\n\t{7183506378306968474, 0},\n\t{7184334163225346667, 0},\n\t{7187204757923255264, 3},\n\t{7188374961227716913, 0},\n\t{7189355544912629138, 1},\n\t{7189685262353961111, 1},\n\t{7191881241846497627, 3},\n\t{7193027733300463056, 0},\n\t{7194536295825327391, 2},\n\t{7194985784261031533, 2},\n\t{7196470201113812026, 3},\n\t{7197806261284467474, 0},\n\t{7198352879225966467, 1},\n\t{7200206507090296676, 3},\n\t{7200361664008914655, 3},\n\t{7201153796910597584, 3},\n\t{7201575974044987427, 0},\n\t{7202180134611819820, 0},\n\t{7202570924120676906, 1},\n\t{7204126629913410753, 2},\n\t{7206905386711677650, 1},\n\t{7208292598615153393, 2},\n\t{7208856502996428926, 2},\n\t{7209648521707408713, 3},\n\t{7210229576238625932, 3},\n\t{7210628618393637426, 0},\n\t{7211986723926748547, 1},\n\t{7215712733239957294, 0},\n\t{7218873278686856379, 3},\n\t{7219393958510681817, 0},\n\t{7219680682165998798, 0},\n\t{7221375503734142850, 1},\n\t{7221919141618461319, 2},\n\t{7222629197027679753, 2},\n\t{7223209421064911282, 3},\n\t{7225240017859523365, 1},\n\t{7227374327375717145, 3},\n\t{7228885388802091009, 0},\n\t{7229623976457130652, 1},\n\t{7229771440799825811, 1},\n\t{7233599705422359159, 0},\n\t{7236295874596800565, 3},\n\t{7237957733188836615, 0},\n\t{7238413782554188982, 1},\n\t{7239097712875169141, 1},\n\t{7239270257624363753, 1},\n\t{7240776843671397080, 3},\n\t{7248633343905641856, 2},\n\t{7252551528077763742, 1},\n\t{7257067421969924559, 1},\n\t{7257999471252980417, 2},\n\t{7261025840513502295, 1},\n\t{7266316199927331899, 1},\n\t{7266903213081534058, 2},\n\t{7267856476405651302, 3},\n\t{7269434110184785823, 0},\n\t{7269868563076491509, 0},\n\t{7272938235546774024, 3},\n\t{7274307917466821142, 0},\n\t{7275021190744928671, 1},\n\t{7278336424371856510, 0},\n\t{7278866640734801419, 0},\n\t{7279653745853892362, 1},\n\t{7280358552543519605, 2},\n\t{7280856749579170805, 2},\n\t{7281235708946845532, 3},\n\t{7283929871426656989, 1},\n\t{7287956661078160484, 1},\n\t{7288682558444362985, 1},\n\t{7288725358117308959, 1},\n\t{7295186748708413175, 3},\n\t{7295411429099009647, 3},\n\t{7297900474404722507, 1},\n\t{7299500175052133608, 3},\n\t{7301112408297190440, 0},\n\t{7302025001939906183, 1},\n\t{7308448122173287170, 3},\n\t{7312040765035319296, 2},\n\t{7312168070320905349, 2},\n\t{7312840938177699261, 3},\n\t{7313787417623846931, 3},\n\t{7314126598737812066, 0},\n\t{7314305316987786237, 0},\n\t{7316562600173631109, 2},\n\t{7316973040316656489, 2},\n\t{7320928064703134051, 2},\n\t{7323171488422789044, 0},\n\t{7327708731476913271, 0},\n\t{7328403656896533466, 0},\n\t{7330032302130196934, 2},\n\t{7330944146100148831, 3},\n\t{7330963716307289410, 3},\n\t{7333830344899870083, 1},\n\t{7334068083911983716, 1},\n\t{7336432494229422124, 0},\n\t{7341789377645603337, 0},\n\t{7343575467015822305, 2},\n\t{7347239504969594266, 1},\n\t{7348230346633952577, 2},\n\t{7349241713871838003, 3},\n\t{7349509080699159596, 3},\n\t{7350196715995343304, 0},\n\t{7351116418752114848, 1},\n\t{7354780518608274257, 0},\n\t{7355339260645083026, 0},\n\t{7357607957509610821, 2},\n\t{7364472823391304124, 0},\n\t{7367631014443237592, 3},\n\t{7368016961128177490, 0},\n\t{7369852078285730525, 1},\n\t{7371383742260189342, 3},\n\t{7372375369646376972, 3},\n\t{7373003138115210561, 0},\n\t{7379228672333209682, 2},\n\t{7390589864994393234, 0},\n\t{7391684575887756702, 1},\n\t{7394073663540864511, 3},\n\t{7395786617882768714, 0},\n\t{7397049098057846028, 1},\n\t{7397293237452388291, 2},\n\t{7398488117637152561, 3},\n\t{7401531579500151117, 1},\n\t{7403370486281751861, 3},\n\t{7403661802090299012, 3},\n\t{7405320234200932966, 1},\n\t{7408227722422288148, 3},\n\t{7408592440427451153, 0},\n\t{7409020824362372432, 0},\n\t{7409521290932946537, 0},\n\t{7409921781539621458, 1},\n\t{7412762165528730729, 3},\n\t{7412770047606544270, 3},\n\t{7415959818851535873, 2},\n\t{7416190936850734706, 2},\n\t{7417792560505781181, 0},\n\t{7420734259054336977, 2},\n\t{7423137450690217676, 1},\n\t{7424243105371924102, 2},\n\t{7424722187067171475, 2},\n\t{7425241802840101985, 2},\n\t{7428621099896750673, 1},\n\t{7430655249244235155, 3},\n\t{7432268642192510184, 1},\n\t{7432788943998390121, 1},\n\t{7436258247402762339, 0},\n\t{7436987560392188270, 1},\n\t{7437709632165226978, 2},\n\t{7437990485752451845, 2},\n\t{7439189085888461008, 3},\n\t{7439424701861229218, 3},\n\t{7439875764032774174, 3},\n\t{7442827645393067535, 2},\n\t{7445746365484159330, 1},\n\t{7445945997701075015, 1},\n\t{7455395077332822751, 1},\n\t{7455711159137506446, 2},\n\t{7456543703610124212, 2},\n\t{7456993575851823021, 3},\n\t{7457673075235534113, 3},\n\t{7458909869773902539, 0},\n\t{7460005751295694055, 1},\n\t{7463238873162541036, 0},\n\t{7466888064101991020, 3},\n\t{7467565986518722980, 0},\n\t{7469539149218206237, 2},\n\t{7471218314392972046, 3},\n\t{7471427676523744959, 3},\n\t{7475839232706068587, 3},\n\t{7478195536928595989, 1},\n\t{7479210621040232841, 2},\n\t{7479764188711520656, 3},\n\t{7481998999771059374, 1},\n\t{7482997990506633994, 2},\n\t{7484356765896361466, 3},\n\t{7486386340404728192, 1},\n\t{7487288570296382004, 2},\n\t{7488365668922210884, 3},\n\t{7490744772892375182, 1},\n\t{7493435673354912311, 3},\n\t{7494797619800367049, 0},\n\t{7498497291659472813, 0},\n\t{7499537676279182123, 0},\n\t{7501851182521229852, 2},\n\t{7502549730266342353, 3},\n\t{7503397669176812596, 0},\n\t{7504646752239251429, 1},\n\t{7507167475991248089, 3},\n\t{7507849424940725753, 0},\n\t{7510890252866121269, 3},\n\t{7512257993655592879, 0},\n\t{7514830027188638334, 2},\n\t{7515201663637533715, 2},\n\t{7515800613550627514, 3},\n\t{7519171246316366715, 2},\n\t{7519204787539612918, 2},\n\t{7520803781162979002, 3},\n\t{7521915577811263064, 0},\n\t{7522420879754112866, 1},\n\t{7523001979036737432, 1},\n\t{7524220499179743015, 2},\n\t{7524257096244191954, 2},\n\t{7524262656362780517, 2},\n\t{7525161513121738447, 3},\n\t{7525384991465306789, 3},\n\t{7525693762054162095, 0},\n\t{7525804604860049374, 0},\n\t{7526207308150848816, 0},\n\t{7526276945439953795, 0},\n\t{7529423254268956083, 3},\n\t{7530694134958348786, 0},\n\t{7531958722824493877, 1},\n\t{7535438747501653666, 0},\n\t{7541663839312047462, 2},\n\t{7548012888911632022, 3},\n\t{7549054062615437255, 0},\n\t{7556636951473808595, 3},\n\t{7557494776210986959, 0},\n\t{7559856193248004262, 2},\n\t{7562381332488722906, 0},\n\t{7563273342522911558, 1},\n\t{7563316446082013456, 1},\n\t{7563541689196308786, 1},\n\t{7564042667975700684, 2},\n\t{7568297265294929005, 1},\n\t{7572711061405647226, 1},\n\t{7574986525129671854, 3},\n\t{7581162217598167913, 1},\n\t{7581804508867389294, 1},\n\t{7581927675910089808, 2},\n\t{7582864254704168664, 2},\n\t{7584597602392575590, 0},\n\t{7586941089109921862, 2},\n\t{7589467555657100834, 0},\n\t{7594064531137424074, 0},\n\t{7595687779929300358, 2},\n\t{7597325528548056917, 3},\n\t{7598224545919599078, 0},\n\t{7602032796792896374, 3},\n\t{7603135208899796422, 0},\n\t{7605002061153211570, 2},\n\t{7606538071756711856, 3},\n\t{7610080492160398699, 3},\n\t{7611708958842202712, 0},\n\t{7616697451730582879, 0},\n\t{7618476995187479398, 2},\n\t{7620434768286963612, 0},\n\t{7622299091237408309, 1},\n\t{7628193369260722142, 3},\n\t{7629642355145508873, 0},\n\t{7630147525901331008, 0},\n\t{7632184426473130631, 2},\n\t{7633826188516882863, 0},\n\t{7636153660322038675, 2},\n\t{7636708588660666164, 2},\n\t{7637449762267549991, 3},\n\t{7638149053070171084, 0},\n\t{7639273380053847724, 1},\n\t{7645393233901876368, 2},\n\t{7646192222623705183, 3},\n\t{7647805648263210852, 0},\n\t{7648445694823926647, 1},\n\t{7650110799757109786, 2},\n\t{7652533045352382061, 0},\n\t{7654689226197494963, 2},\n\t{7655895846812006245, 3},\n\t{7656757486410079232, 0},\n\t{7658637714416868424, 2},\n\t{7659114414227472151, 2},\n\t{7659190747591199755, 2},\n\t{7659211961168155554, 2},\n\t{7659881771467593434, 3},\n\t{7659965379261088583, 3},\n\t{7663432869594987511, 2},\n\t{7663555093867406897, 2},\n\t{7663830647764290931, 2},\n\t{7667945174887062992, 2},\n\t{7668298798740451921, 2},\n\t{7669712721401839420, 0},\n\t{7670097299873345458, 0},\n\t{7672040023932327118, 2},\n\t{7675843952489193310, 1},\n\t{7677044163150681064, 2},\n\t{7677757320459238313, 3},\n\t{7678609718799028127, 3},\n\t{7678628540505374498, 3},\n\t{7681036646203293629, 2},\n\t{7683177126248571812, 0},\n\t{7685147536786511122, 1},\n\t{7685491040834708987, 2},\n\t{7685632478651800498, 2},\n\t{7686157800065484724, 2},\n\t{7686420229543185761, 2},\n\t{7687183771696139583, 3},\n\t{7687340176602374842, 3},\n\t{7694125092571277285, 1},\n\t{7694244283536735072, 1},\n\t{7695582423516131386, 3},\n\t{7696482424698998506, 3},\n\t{7699443445059737843, 2},\n\t{7700624525155729014, 3},\n\t{7702363340767833046, 1},\n\t{7702400849774105385, 1},\n\t{7709905394052239019, 3},\n\t{7711314621507022039, 1},\n\t{7713000169299889739, 2},\n\t{7713665210637642883, 3},\n\t{7713735287283453950, 3},\n\t{7717028668400501155, 2},\n\t{7718439172730920981, 3},\n\t{7719372945942160203, 0},\n\t{7722013505397540712, 2},\n\t{7722166155296996937, 2},\n\t{7728628555655437625, 0},\n\t{7729167210875532700, 0},\n\t{7730887540482148756, 2},\n\t{7732766727550172684, 0},\n\t{7734618335535508664, 1},\n\t{7735832410459952027, 2},\n\t{7738380247857777778, 1},\n\t{7738560876489338119, 1},\n\t{7738865789466429036, 1},\n\t{7739687571797392783, 2},\n\t{7746988243378123128, 0},\n\t{7751428670649677689, 0},\n\t{7757386355416624973, 1},\n\t{7759162310409033896, 3},\n\t{7761767087771149001, 1},\n\t{7761796199095889610, 1},\n\t{7761844112910606231, 1},\n\t{7766562184437244103, 2},\n\t{7768072682167455325, 3},\n\t{7768299659469800808, 3},\n\t{7769531691621578276, 0},\n\t{7771033327731920422, 2},\n\t{7773602322060718678, 0},\n\t{7775447509222303391, 1},\n\t{7776073033296487292, 2},\n\t{7777269599966622050, 3},\n\t{7777608478693522503, 3},\n\t{7784978665353641505, 2},\n\t{7786671038905821035, 3},\n\t{7787501422546285311, 0},\n\t{7787825394145981362, 0},\n\t{7788013780855444042, 1},\n\t{7790128764198277724, 3},\n\t{7793699494633816414, 2},\n\t{7796508089859680180, 0},\n\t{7798508262176956668, 2},\n\t{7804245339745398730, 3},\n\t{7804292828423376847, 3},\n\t{7804697185817466663, 3},\n\t{7806184518073542201, 1},\n\t{7807193308434877287, 2},\n\t{7810006636724842419, 0},\n\t{7811606728357896063, 2},\n\t{7814456200566403330, 0},\n\t{7815010142181657955, 1},\n\t{7816232931943818338, 2},\n\t{7819249849757855277, 0},\n\t{7820448160347709551, 1},\n\t{7820738530412289482, 2},\n\t{7822809523555648231, 0},\n\t{7824365774894920217, 1},\n\t{7826500289029853507, 3},\n\t{7828301911132263813, 0},\n\t{7829245629508416240, 1},\n\t{7830105168492723735, 2},\n\t{7830536893839575650, 2},\n\t{7830555835118351487, 2},\n\t{7836607938660911840, 0},\n\t{7838498121769741893, 1},\n\t{7838570772287921909, 2},\n\t{7840193433173960272, 3},\n\t{7840218237062559512, 3},\n\t{7845977419549595339, 0},\n\t{7851872040397647620, 1},\n\t{7865802779016932570, 2},\n\t{7866725776382550926, 3},\n\t{7866739997801656088, 3},\n\t{7868026101056726149, 0},\n\t{7868785034312199345, 0},\n\t{7871341695597306000, 3},\n\t{7872692084102529250, 0},\n\t{7872735095149838690, 0},\n\t{7877098103765737445, 0},\n\t{7878483534039941040, 1},\n\t{7880657672791846765, 3},\n\t{7884921840951818892, 3},\n\t{7887893019310725469, 1},\n\t{7888041909585437246, 1},\n\t{7888568112933994110, 2},\n\t{7892511457337125662, 1},\n\t{7892585062413955993, 2},\n\t{7892904780940957366, 2},\n\t{7894355649186152220, 3},\n\t{7894906962099957943, 0},\n\t{7896866936128933995, 1},\n\t{7897322321665698311, 2},\n\t{7900675393672177992, 1},\n\t{7902525274662638678, 2},\n\t{7904776081144655982, 0},\n\t{7905971573245125981, 1},\n\t{7906429611623763098, 2},\n\t{7908574732851492526, 0},\n\t{7910397049559299062, 1},\n\t{7910543121058072843, 1},\n\t{7916900029627956377, 3},\n\t{7918251789239587924, 0},\n\t{7920395432722775052, 2},\n\t{7923241027053446765, 1},\n\t{7925996189143207295, 3},\n\t{7930036644656234168, 3},\n\t{7933541605213508048, 2},\n\t{7935892591257799039, 0},\n\t{7936846275621542535, 1},\n\t{7937165684711102897, 1},\n\t{7937522819004970784, 1},\n\t{7938300659289518204, 2},\n\t{7939810405693602405, 3},\n\t{7941456982881705013, 1},\n\t{7941473489857558922, 1},\n\t{7943050077784833975, 2},\n\t{7943590278347595839, 3},\n\t{7943628515727020426, 3},\n\t{7944644844553292248, 0},\n\t{7952461408807785160, 3},\n\t{7956051128385237182, 2},\n\t{7959578859012041936, 1},\n\t{7960748682733728124, 2},\n\t{7960966384169805946, 2},\n\t{7962802374442934622, 0},\n\t{7963442496738206995, 0},\n\t{7976949707798235096, 0},\n\t{7976961532036847394, 0},\n\t{7978659501208544399, 2},\n\t{7979991647296020792, 3},\n\t{7981785194652236377, 1},\n\t{7983537489297478989, 2},\n\t{7985440763466884054, 0},\n\t{7986008985985153196, 1},\n\t{7988746105053037247, 3},\n\t{7991768502960008939, 2},\n\t{7991769871323277036, 2},\n\t{7992685670286689864, 2},\n\t{7995029359670810079, 1},\n\t{7997707579522562490, 3},\n\t{7999187680449258857, 0},\n\t{7999538436692306174, 1},\n\t{8000579701042449737, 1},\n\t{8001837399712796664, 3},\n\t{8002115532738795782, 3},\n\t{8002845887140222485, 3},\n\t{8005434673764166358, 2},\n\t{8006143067573694566, 2},\n\t{8007912654530563351, 0},\n\t{8008146817946886729, 0},\n\t{8009056293937277215, 1},\n\t{8013407540783403010, 1},\n\t{8013722145101233616, 1},\n\t{8014064930370196043, 1},\n\t{8015257563164973674, 2},\n\t{8015573398072388195, 3},\n\t{8016315096859492355, 3},\n\t{8017298722492154410, 0},\n\t{8017570683142408687, 1},\n\t{8018968190168991793, 2},\n\t{8021871494822600548, 0},\n\t{8024288693677815549, 3},\n\t{8024754498264663892, 3},\n\t{8026518538630837114, 0},\n\t{8030122709039660892, 0},\n\t{8033933975522942330, 3},\n\t{8034283130585146137, 3},\n\t{8037449966729422089, 2},\n\t{8040049958223863035, 0},\n\t{8040842597125808739, 1},\n\t{8049030052757711178, 0},\n\t{8049987854820755522, 1},\n\t{8050558802346193395, 2},\n\t{8050767334343762975, 2},\n\t{8051180131220781164, 2},\n\t{8052794010964433406, 0},\n\t{8053176062063680388, 0},\n\t{8057652967642298928, 0},\n\t{8057762172349735371, 0},\n\t{8058462327052922861, 1},\n\t{8060353270406235677, 3},\n\t{8060594257477753589, 3},\n\t{8064900350891301971, 3},\n\t{8068799182823062338, 2},\n\t{8071217097716235170, 0},\n\t{8071767619558522363, 1},\n\t{8077479003342000764, 2},\n\t{8077917963821992728, 2},\n\t{8079851019091869509, 0},\n\t{8083758699706230480, 3},\n\t{8093389695832876496, 0},\n\t{8093849070284364175, 0},\n\t{8096378545251979595, 3},\n\t{8097190511588148847, 3},\n\t{8097731688522175882, 0},\n\t{8098452001442713018, 0},\n\t{8100005875358742155, 2},\n\t{8104012627918881656, 1},\n\t{8104044978575683630, 1},\n\t{8105000221026489976, 2},\n\t{8107003132196123439, 0},\n\t{8112174023821410231, 1},\n\t{8112391766500359021, 1},\n\t{8114123380673531032, 2},\n\t{8114220363642010454, 2},\n\t{8115227508893082397, 3},\n\t{8116085666296804061, 0},\n\t{8116520938587518703, 0},\n\t{8117192677013899845, 1},\n\t{8119186999576360262, 3},\n\t{8120606322757578876, 0},\n\t{8121582467031152655, 1},\n\t{8127278127031400694, 2},\n\t{8148532541786155008, 1},\n\t{8150408349802589233, 3},\n\t{8150540179861677648, 3},\n\t{8150767762100444853, 3},\n\t{8152412964826781423, 0},\n\t{8152990242110475952, 1},\n\t{8153787183861665735, 2},\n\t{8154414151803687635, 2},\n\t{8155477488662249843, 3},\n\t{8158011799424467301, 1},\n\t{8159449394921010764, 3},\n\t{8162742975497611968, 1},\n\t{8164659839968985714, 3},\n\t{8167597398835967075, 2},\n\t{8169781764168957256, 0},\n\t{8170702944460419345, 1},\n\t{8172412269494145896, 2},\n\t{8173827239868549200, 3},\n\t{8174715496050449028, 0},\n\t{8175040411359102000, 0},\n\t{8175187430564476476, 1},\n\t{8186231891014030513, 2},\n\t{8186376010078994326, 2},\n\t{8188134322433589614, 0},\n\t{8188345717467169156, 0},\n\t{8192429663926731500, 0},\n\t{8192737022918494591, 0},\n\t{8195232928372100184, 2},\n\t{8196718743995802332, 0},\n\t{8196905169693750275, 0},\n\t{8198415869918801761, 1},\n\t{8198691710932751020, 1},\n\t{8200153012376469678, 3},\n\t{8200745560817025027, 3},\n\t{8204827305969967008, 3},\n\t{8209042066664402518, 3},\n\t{8209177014858882481, 3},\n\t{8210418866863760924, 0},\n\t{8213890257173031708, 3},\n\t{8214344340547193955, 3},\n\t{8215267037619602661, 0},\n\t{8216504310108223117, 1},\n\t{8216790780351461713, 1},\n\t{8218262732660825538, 3},\n\t{8218691563815976182, 3},\n\t{8219033348783266781, 3},\n\t{8219671150731276827, 0},\n\t{8223253715000814767, 3},\n\t{8223299554338339812, 3},\n\t{8225034440210636948, 1},\n\t{8225462898255113758, 1},\n\t{8230689709517881055, 2},\n\t{8231674002626480405, 3},\n\t{8233231122304316262, 0},\n\t{8234445411125250618, 1},\n\t{8239757209519363373, 2},\n\t{8242843673717123613, 1},\n\t{8243329526725075127, 1},\n\t{8243577931036888576, 1},\n\t{8249610846206486299, 3},\n\t{8249979185474747226, 3},\n\t{8252216759801775061, 1},\n\t{8253828339881301963, 2},\n\t{8254217932357719749, 3},\n\t{8254467331171144598, 3},\n\t{8258253391793462438, 2},\n\t{8259523488489007419, 3},\n\t{8261738544790768524, 1},\n\t{8264230449965386730, 0},\n\t{8264570083381650043, 0},\n\t{8267848172783910380, 3},\n\t{8268339861393164434, 3},\n\t{8269465132232115011, 0},\n\t{8270451738745783659, 1},\n\t{8271403501926865248, 2},\n\t{8274364529672028137, 1},\n\t{8275054463686978127, 1},\n\t{8282933589830002528, 0},\n\t{8283925976356639036, 1},\n\t{8284820931879263419, 2},\n\t{8284905071952546849, 2},\n\t{8285594760221674332, 3},\n\t{8288524003519431923, 1},\n\t{8289509785634189113, 2},\n\t{8294661371673442165, 3},\n\t{8295840290968281616, 0},\n\t{8296863956415889693, 1},\n\t{8298002286009257927, 2},\n\t{8300467309605536203, 0},\n\t{8303998530869801891, 3},\n\t{8304460675037950677, 3},\n\t{8305635340369162653, 0},\n\t{8308609594381779176, 3},\n\t{8309060277817474292, 3},\n\t{8311485755051557458, 2},\n\t{8312390233679676667, 2},\n\t{8312681205186471327, 3},\n\t{8314442920082478415, 0},\n\t{8314572585579527932, 0},\n\t{8315968292214743930, 2},\n\t{8317786831718601862, 3},\n\t{8318749796678087881, 0},\n\t{8321752454968208888, 3},\n\t{8323210176224501595, 0},\n\t{8323736503057789090, 0},\n\t{8324689917188530009, 1},\n\t{8326556165409224098, 3},\n\t{8326850041978247691, 3},\n\t{8327405947415640490, 0},\n\t{8328030160131413359, 0},\n\t{8329125965493322687, 1},\n\t{8333983164812244028, 2},\n\t{8335703151636916218, 3},\n\t{8336524928779035034, 0},\n\t{8340451866762529289, 3},\n\t{8340466806472593292, 3},\n\t{8340501542589775986, 3},\n\t{8345443520379316135, 0},\n\t{8346373975982879110, 1},\n\t{8347442908956980596, 2},\n\t{8348339482229214830, 2},\n\t{8356367532342220296, 1},\n\t{8356634985691482293, 2},\n\t{8357309323187017107, 2},\n\t{8357347202607980608, 2},\n\t{8358093563230912442, 3},\n\t{8362061795228126637, 3},\n\t{8362636407604295011, 3},\n\t{8363448751146694535, 0},\n\t{8364094316323883521, 0},\n\t{8364173254635099791, 0},\n\t{8364786835497513340, 1},\n\t{8365118422387876449, 1},\n\t{8366608556645181664, 3},\n\t{8366772594534571305, 3},\n\t{8369708033552873953, 1},\n\t{8370932988997749760, 2},\n\t{8375282569020175184, 2},\n\t{8375515940093304751, 2},\n\t{8375537075439695812, 2},\n\t{8376327975948524419, 3},\n\t{8376912416720573217, 0},\n\t{8377046140202950266, 0},\n\t{8377645206378399231, 0},\n\t{8379825368832325487, 2},\n\t{8380001625780181272, 2},\n\t{8380305185285746081, 3},\n\t{8382239768817927538, 0},\n\t{8385742438717847245, 0},\n\t{8387198822739989280, 1},\n\t{8387755233473232669, 1},\n\t{8394104195021561325, 3},\n\t{8394239784655208493, 3},\n\t{8395087522858079634, 0},\n\t{8397548768711828341, 2},\n\t{8401012731861844796, 1},\n\t{8405349729907987605, 1},\n\t{8406533919303458484, 2},\n\t{8408817949690271703, 0},\n\t{8410348456151395456, 1},\n\t{8410689808519203064, 2},\n\t{8412111066543962230, 3},\n\t{8413333178039810164, 0},\n\t{8414024643162677267, 1},\n\t{8416228101465709428, 3},\n\t{8416562430108537754, 3},\n\t{8417354384712485677, 0},\n\t{8417361497177812416, 0},\n\t{8417822991022764153, 0},\n\t{8418858047195550638, 1},\n\t{8420467047752901998, 2},\n\t{8421156346043822582, 3},\n\t{8423231468811497619, 1},\n\t{8433903182351023529, 2},\n\t{8435060098192054799, 3},\n\t{8441895623112146792, 1},\n\t{8442133403548049952, 2},\n\t{8442352739916070476, 2},\n\t{8442476405010842960, 2},\n\t{8444726840151372871, 0},\n\t{8446548310840205412, 2},\n\t{8448394379130782129, 3},\n\t{8448644485505501221, 3},\n\t{8449972942657550434, 1},\n\t{8450560086303552475, 1},\n\t{8452285475470254786, 3},\n\t{8454769060124752019, 1},\n\t{8455429094869338842, 1},\n\t{8455893963989903062, 2},\n\t{8461369245586699838, 3},\n\t{8462112073354606517, 3},\n\t{8462519140778323204, 0},\n\t{8463476691614102718, 1},\n\t{8465112964251593447, 2},\n\t{8466561070444308455, 3},\n\t{8467552360600473088, 0},\n\t{8469683935699482732, 2},\n\t{8470076913949444496, 2},\n\t{8471825296952223937, 0},\n\t{8472779201767331645, 1},\n\t{8474117312471289817, 2},\n\t{8474738760877740811, 3},\n\t{8477513015469080731, 1},\n\t{8477624942326090040, 1},\n\t{8480080070321033636, 3},\n\t{8485644207730875954, 0},\n\t{8486931316666653249, 1},\n\t{8488493833253468072, 3},\n\t{8489018748877322875, 3},\n\t{8491365252673729907, 1},\n\t{8496438962971037053, 2},\n\t{8496832031861980794, 2},\n\t{8496978267868974125, 2},\n\t{8500553243240043440, 2},\n\t{8502152999219918403, 3},\n\t{8503669106105849810, 0},\n\t{8506446547879894731, 3},\n\t{8507004865978969668, 3},\n\t{8508138533330561846, 0},\n\t{8509042663487713835, 1},\n\t{8512814699604634573, 0},\n\t{8513531237152180855, 1},\n\t{8514052162454177352, 1},\n\t{8514659706212537498, 2},\n\t{8521003260352419801, 0},\n\t{8522154100195149199, 1},\n\t{8533960865939415426, 3},\n\t{8535809010608235857, 1},\n\t{8537322905046267597, 2},\n\t{8544122822063491271, 0},\n\t{8544885153945275019, 1},\n\t{8544911790224905489, 1},\n\t{8545516859409474333, 1},\n\t{8545775432839522402, 2},\n\t{8546535698405135703, 2},\n\t{8547008824798062105, 3},\n\t{8547989919210321567, 0},\n\t{8548076619168712509, 0},\n\t{8550328057917393919, 2},\n\t{8555278161020220667, 2},\n\t{8556948632399490607, 0},\n\t{8558580816609305738, 1},\n\t{8561391129339216835, 0},\n\t{8564523321601239224, 2},\n\t{8566052373820288742, 0},\n\t{8567541237672157618, 1},\n\t{8571481787597486391, 1},\n\t{8574151222711593392, 3},\n\t{8576217433180579550, 1},\n\t{8579242458966266897, 3},\n\t{8580374922128271797, 0},\n\t{8580761042547102222, 1},\n\t{8582496345134531470, 2},\n\t{8584418972140144704, 0},\n\t{8585007933030237871, 1},\n\t{8586049714738708795, 1},\n\t{8590875638897003940, 2},\n\t{8592939006759521450, 0},\n\t{8594301318465192797, 1},\n\t{8594642978790378262, 1},\n\t{8595631671972097217, 2},\n\t{8596665809222571807, 3},\n\t{8597466005107645107, 0},\n\t{8600538900061309458, 2},\n\t{8609176568420185285, 2},\n\t{8611217186722961122, 0},\n\t{8611858997570073702, 0},\n\t{8614101850578394994, 2},\n\t{8615760036491987252, 0},\n\t{8618040894131847401, 2},\n\t{8620260678423448456, 0},\n\t{8624209230042000497, 3},\n\t{8627275366674772141, 2},\n\t{8627718897629466854, 2},\n\t{8628154862165225998, 3},\n\t{8632710955286493506, 3},\n\t{8633939701530389654, 0},\n\t{8635277321471886835, 1},\n\t{8635931891440204479, 2},\n\t{8641417180659258718, 3},\n\t{8642414224426603014, 0},\n\t{8642866632967975820, 0},\n\t{8644333995379892951, 1},\n\t{8645604651829730437, 2},\n\t{8647065247314230933, 0},\n\t{8648038679748613307, 1},\n\t{8651903194700517689, 0},\n\t{8652532604961058742, 0},\n\t{8656246648258445426, 0},\n\t{8659442245666248376, 3},\n\t{8660677265384323485, 0},\n\t{8661303031436092144, 0},\n\t{8665267678920996401, 0},\n\t{8666246142891793156, 1},\n\t{8667258594251879200, 2},\n\t{8668095087516913662, 2},\n\t{8677579648883816304, 3},\n\t{8678839712041446770, 0},\n\t{8679009859724199473, 0},\n\t{8679017287582235659, 0},\n\t{8683318678005844006, 0},\n\t{8686027461725544893, 2},\n\t{8687114629496478087, 3},\n\t{8687207514058569133, 3},\n\t{8692667920219205255, 0},\n\t{8695981218061639335, 3},\n\t{8696534312732566375, 0},\n\t{8697249482057670604, 0},\n\t{8698263393278667265, 1},\n\t{8698847479686058492, 2},\n\t{8699306102726856007, 2},\n\t{8705513475384084192, 0},\n\t{8706337311054859069, 0},\n\t{8706816447812868578, 1},\n\t{8707394252073633978, 1},\n\t{8711033790046141813, 0},\n\t{8713158956803646846, 2},\n\t{8713261564830468140, 2},\n\t{8714705443675092535, 0},\n\t{8715602625519057054, 1},\n\t{8718819306984602173, 3},\n\t{8728621314047940311, 0},\n\t{8740419509414506976, 3},\n\t{8741901311570877859, 0},\n\t{8744428362506489542, 2},\n\t{8744943512276441891, 3},\n\t{8746156782384384467, 0},\n\t{8750183031380003170, 3},\n\t{8752066522595958822, 1},\n\t{8752219747961560742, 1},\n\t{8753253583694883309, 2},\n\t{8753824859801159210, 2},\n\t{8754510758434088893, 3},\n\t{8756440292573682494, 1},\n\t{8759935557672630601, 0},\n\t{8760642811714871451, 1},\n\t{8761229024940490940, 1},\n\t{8764890330690343460, 0},\n\t{8766031941567711277, 1},\n\t{8766958219444565781, 2},\n\t{8768838417072501110, 0},\n\t{8777931167855672272, 0},\n\t{8780262070352282248, 2},\n\t{8782431702771154648, 0},\n\t{8785078652726854689, 2},\n\t{8785860903439902006, 3},\n\t{8788900704897876828, 2},\n\t{8789185302371359190, 2},\n\t{8792062934274831448, 0},\n\t{8797497947866103029, 1},\n\t{8799108171344063584, 3},\n\t{8801117578161331203, 0},\n\t{8801193891696580701, 1},\n\t{8801400230069568872, 1},\n\t{8801814367990732642, 1},\n\t{8801969312011273550, 1},\n\t{8801969573182581211, 1},\n\t{8802151240149383774, 1},\n\t{8802424396209908148, 2},\n\t{8802561517891595564, 2},\n\t{8803671413929043565, 3},\n\t{8804673402816362738, 0},\n\t{8806226928389521982, 1},\n\t{8806851861152731451, 2},\n\t{8808128126120113304, 3},\n\t{8808336749445622050, 3},\n\t{8809266294232745918, 0},\n\t{8810202678525499747, 1},\n\t{8810436672736251324, 1},\n\t{8813990664419695628, 0},\n\t{8815968119488177655, 2},\n\t{8816338219606655388, 2},\n\t{8816836837652793404, 2},\n\t{8825221579567645309, 2},\n\t{8825228288601331294, 2},\n\t{8826482559944492341, 3},\n\t{8828660755664910426, 1},\n\t{8834663691614705767, 2},\n\t{8837323267818495495, 1},\n\t{8848273428886996454, 2},\n\t{8849630815178195441, 0},\n\t{8852769371258146283, 2},\n\t{8857775086364991486, 3},\n\t{8859294400788032917, 0},\n\t{8861403251534004560, 2},\n\t{8862507075241237299, 3},\n\t{8862586135009330401, 3},\n\t{8863967661557995554, 0},\n\t{8865359161832520249, 2},\n\t{8867174585918043423, 3},\n\t{8867981532231442801, 0},\n\t{8870645679389749242, 2},\n\t{8873235671076387331, 1},\n\t{8874525870670529516, 2},\n\t{8876696455496022807, 0},\n\t{8878325348555107882, 1},\n\t{8880022518673358785, 3},\n\t{8881617203953076633, 0},\n\t{8886215292972159137, 0},\n\t{8886677649613413294, 0},\n\t{8890845524749336300, 0},\n\t{8891534018190566141, 1},\n\t{8892489602187080229, 2},\n\t{8894496721366302695, 3},\n\t{8896513886192874382, 1},\n\t{8898944760335435201, 3},\n\t{8899518244339025420, 0},\n\t{8902904664419776119, 3},\n\t{8903790054884701071, 0},\n\t{8910598092665810040, 2},\n\t{8911141245933353477, 2},\n\t{8912263771313892241, 3},\n\t{8915196926568706531, 2},\n\t{8921198285112571587, 3},\n\t{8926696357622658878, 0},\n\t{8927909811206404135, 1},\n\t{8928887319351489167, 2},\n\t{8929202961991520410, 2},\n\t{8930842243064825644, 0},\n\t{8932372966673693913, 1},\n\t{8932446420167856214, 1},\n\t{8932665482582846150, 1},\n\t{8933991808737496788, 2},\n\t{8935863617989923389, 0},\n\t{8936746675655021024, 1},\n\t{8936934712517527121, 1},\n\t{8937916709705154918, 2},\n\t{8938490372212431025, 2},\n\t{8942341754081012490, 2},\n\t{8943202980815635900, 3},\n\t{8948437094103829573, 3},\n\t{8950376186315720732, 1},\n\t{8950918074093999981, 2},\n\t{8951771999109633241, 2},\n\t{8952827279536411079, 3},\n\t{8953262429739367710, 0},\n\t{8954387075054690648, 1},\n\t{8954777655204591729, 1},\n\t{8956479164926963601, 2},\n\t{8959778815082916266, 1},\n\t{8963978940723666608, 1},\n\t{8967106235025308817, 0},\n\t{8968954275649960231, 2},\n\t{8969510896854235994, 2},\n\t{8971527407870203070, 0},\n\t{8971713444374712178, 0},\n\t{8972759444865536314, 1},\n\t{8975326172970725217, 3},\n\t{8975552259484029713, 3},\n\t{8983209907300300359, 2},\n\t{8983586831598409668, 3},\n\t{8985279399773627595, 0},\n\t{8986055034478589598, 1},\n\t{8986265358844895175, 1},\n\t{8989340846053888187, 0},\n\t{8990792628231778499, 1},\n\t{8991454223333596035, 2},\n\t{8991525099440074826, 2},\n\t{8993049957638156056, 3},\n\t{8994398178395633426, 0},\n\t{8995234369596067534, 1},\n\t{8995270380879222110, 1},\n\t{8996127038342030962, 2},\n\t{8998768903758844477, 0},\n\t{8999035118549396101, 0},\n\t{8999467668100320477, 1},\n\t{9001402465467466894, 2},\n\t{9002631123861881623, 3},\n\t{9004630863402561846, 1},\n\t{9007014066314201619, 3},\n\t{9008591676274473002, 1},\n\t{9009865001362811260, 2},\n\t{9011504264626994843, 3},\n\t{9013142713823148190, 1},\n\t{9014711772233205553, 2},\n\t{9017328176781944395, 0},\n\t{9017867778444839106, 1},\n\t{9017984958609081444, 1},\n\t{9019336897688741500, 2},\n\t{9019395857951543452, 2},\n\t{9020254073416955736, 3},\n\t{9020257452404220067, 3},\n\t{9020481443925005558, 3},\n\t{9021434886084218241, 0},\n\t{9023077193616750903, 2},\n\t{9024814676469869818, 3},\n\t{9026189346563417194, 0},\n\t{9026308588793324014, 0},\n\t{9026384078998165586, 1},\n\t{9027712148946986885, 2},\n\t{9029096887121237369, 3},\n\t{9029384168847931836, 3},\n\t{9033049595634697012, 2},\n\t{9034147385043770367, 3},\n\t{9036013522052612905, 1},\n\t{9037204342890356043, 2},\n\t{9037531161043446013, 2},\n\t{9038853579939715426, 0},\n\t{9040639381169079355, 1},\n\t{9041058438875313629, 2},\n\t{9041098587048874675, 2},\n\t{9045207383281141015, 1},\n\t{9053030264973621164, 0},\n\t{9054248936407115313, 1},\n\t{9054326231634226583, 1},\n\t{9055911220672312967, 3},\n\t{9056081229145736417, 3},\n\t{9056093895932861461, 3},\n\t{9061225196233628321, 3},\n\t{9068211875715483742, 2},\n\t{9070757410129033928, 0},\n\t{9072540519157212402, 2},\n\t{9073760565693700666, 3},\n\t{9075736794111282292, 0},\n\t{9075978049186446977, 1},\n\t{9083745296129982860, 3},\n\t{9087408943067785160, 3},\n\t{9087731044895664529, 3},\n\t{9094724960177121711, 1},\n\t{9095189239094154798, 2},\n\t{9095508647436673929, 2},\n\t{9097475647951840428, 0},\n\t{9098633317613921552, 1},\n\t{9103362147068614817, 1},\n\t{9105272495095028544, 3},\n\t{9106461775371684260, 0},\n\t{9108041077909203955, 1},\n\t{9109156817871264837, 2},\n\t{9111981028674696204, 1},\n\t{9112036793732803789, 1},\n\t{9119442121669333238, 3},\n\t{9123558123814184573, 3},\n\t{9124434217269606475, 0},\n\t{9124586802063395064, 0},\n\t{9129282860586328335, 0},\n\t{9137210243440259473, 3},\n\t{9138132364417684971, 0},\n\t{9138671230689286115, 0},\n\t{9146401162383718377, 3},\n\t{9147047684882162431, 0},\n\t{9151264726783959525, 3},\n\t{9154610909476106534, 2},\n\t{9155659481516160477, 3},\n\t{9157825333315209398, 1},\n\t{9161748311905994898, 1},\n\t{9163541838275329407, 2},\n\t{9165181996660680576, 0},\n\t{9167018380406759413, 1},\n\t{9169815834892129037, 0},\n\t{9174955522866467270, 0},\n\t{9183339094624243743, 0},\n\t{9184303865258274310, 1},\n\t{9185175068059017337, 2},\n\t{9187829891919878632, 0},\n\t{9191610275894508036, 3},\n\t{9192147570164982437, 0},\n\t{9194197254270776678, 2},\n\t{9194306748219568751, 2},\n\t{9198427616043000261, 1},\n\t{9199485016162953999, 2},\n\t{9200054798065318708, 3},\n\t{9200563736728780744, 3},\n\t{9200749901620093660, 3},\n\t{9201200566037300421, 0},\n\t{9203923190973454807, 2},\n\t{9205657223992507821, 0},\n\t{9205919749081689000, 0},\n\t{9207233672484108073, 1},\n\t{9208939405210219392, 3},\n\t{9209692744597551944, 3},\n\t{9222568625800748744, 3},\n\t{9222582454147032830, 3},\n}\n"
  },
  {
    "path": "scyllacloud/cluster.go",
    "content": "// Copyright (C) 2021 ScyllaDB\n\npackage scyllacloud\n\nimport (\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\n\t\"sigs.k8s.io/yaml\"\n\n\t\"github.com/gocql/gocql\"\n)\n\nfunc NewCloudCluster(bundlePath string) (*gocql.ClusterConfig, error) {\n\tconnConf := &ConnectionConfig{}\n\n\tbundleFile, err := os.ReadFile(bundlePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't open bundle path: %w\", err)\n\t}\n\n\tif err := yaml.Unmarshal(bundleFile, connConf); err != nil {\n\t\treturn nil, fmt.Errorf(\"can't decode bundle file at %q: %w\", bundlePath, err)\n\t}\n\n\tif _, ok := connConf.Contexts[connConf.CurrentContext]; !ok {\n\t\treturn nil, fmt.Errorf(\"current context points to unknown context\")\n\t}\n\n\tconfContext := connConf.Contexts[connConf.CurrentContext]\n\n\tif _, ok := connConf.AuthInfos[confContext.AuthInfoName]; !ok {\n\t\treturn nil, fmt.Errorf(\"context %q auth info points to unknown authinfo\", connConf.CurrentContext)\n\t}\n\n\tif _, ok := connConf.Datacenters[confContext.DatacenterName]; !ok {\n\t\treturn nil, fmt.Errorf(\"context %q datacenter points to unknown datacenter\", connConf.CurrentContext)\n\t}\n\n\tauthInfo := connConf.AuthInfos[confContext.AuthInfoName]\n\n\tcaPool, err := connConf.GetRootCAPool()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't create root CA pool: %w\", err)\n\t}\n\n\tcc := gocql.NewCluster(connConf.GetInitialContactPoints()...)\n\tcc.Port = 443\n\n\t// SslOpts are used only by establishing connection to initial contact points.\n\t// Skip verifying TLS if any of DC requires it.\n\tinsecureSkipVerify := false\n\tfor _, dc := range connConf.Datacenters {\n\t\tif dc.InsecureSkipTLSVerify {\n\t\t\tinsecureSkipVerify = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tcc.SslOpts = &gocql.SslOptions{\n\t\t// Set to false, always use value from tls.Config.\n\t\tEnableHostVerification: false,\n\t\tConfig: &tls.Config{\n\t\t\tRootCAs: caPool,\n\t\t\tGetClientCertificate: func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {\n\t\t\t\treturn connConf.GetClientCertificate()\n\t\t\t},\n\t\t\tInsecureSkipVerify: insecureSkipVerify,\n\t\t},\n\t}\n\n\tdialer := cc.Dialer\n\tif dialer == nil {\n\t\tdialer = &net.Dialer{}\n\t}\n\n\tcc.HostDialer = NewSniHostDialer(connConf, dialer)\n\tcc.Authenticator = gocql.PasswordAuthenticator{Password: authInfo.Password, Username: authInfo.Username}\n\n\tif connConf.Parameters != nil {\n\t\tif connConf.Parameters.DefaultConsistency != \"\" {\n\t\t\tif !validateConsistency(connConf.Parameters.DefaultConsistency, allowedConsistencies) {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid value of default consistency %q, values can be one of: %v\", connConf.Parameters.DefaultConsistency, allowedConsistencies)\n\t\t\t}\n\t\t\tif err := cc.Consistency.UnmarshalText([]byte(connConf.Parameters.DefaultConsistency)); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unmarshal default consistency: %w\", err)\n\t\t\t}\n\t\t}\n\t\tif connConf.Parameters.DefaultSerialConsistency != \"\" {\n\t\t\tif !validateConsistency(connConf.Parameters.DefaultSerialConsistency, allowedSerialConsistencies) {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid value of default serial consistency %q, values can be one of: %v\", connConf.Parameters.DefaultSerialConsistency, allowedSerialConsistencies)\n\t\t\t}\n\t\t\tif err := cc.SerialConsistency.UnmarshalText([]byte(connConf.Parameters.DefaultSerialConsistency)); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unmarshal default serial consistency: %w\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn cc, nil\n}\n\nfunc validateConsistency(c ConsistencyString, allowed []ConsistencyString) bool {\n\tfor _, ac := range allowed {\n\t\tif ac == c {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nvar allowedSerialConsistencies = []ConsistencyString{\n\tDefaultSerialConsistency,\n\tDefaultLocalSerialConsistency,\n}\nvar allowedConsistencies = []ConsistencyString{\n\tDefaultThreeConsistency,\n\tDefaultOneConsistency,\n\tDefaultTwoConsistency,\n\tDefaultAnyConsistency,\n\tDefaultQuorumConsistency,\n\tDefaultAllConsistency,\n\tDefaultLocalQuorumConsistency,\n\tDefaultEachQuorumConsistency,\n\tDefaultLocalOneConsistency,\n}\n"
  },
  {
    "path": "scyllacloud/config.go",
    "content": "package scyllacloud\n\nimport (\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"fmt\"\n\t\"os\"\n)\n\ntype ConnectionConfig struct {\n\t// Datacenters is a map of referencable names to datacenter configs.\n\tDatacenters map[string]*Datacenter `json:\"datacenters\"`\n\t// AuthInfos is a map of referencable names to authentication configs.\n\tAuthInfos map[string]*AuthInfo `json:\"authInfos\"`\n\t// Contexts is a map of referencable names to context configs.\n\tContexts   map[string]*Context `json:\"contexts\"`\n\tParameters *Parameters         `json:\"parameters,omitempty\"`\n\t// Kind is a string value representing the REST resource this object represents.\n\t// Servers may infer this from the endpoint the client submits requests to.\n\t// In CamelCase.\n\t// +optional\n\tKind string `json:\"kind,omitempty\"`\n\t// APIVersion defines the versioned schema of this representation of an object.\n\t// Servers should convert recognized schemas to the latest internal value, and\n\t// may reject unrecognized values\n\t// +optional\n\tAPIVersion     string `json:\"apiVersion,omitempty\"`\n\tCurrentContext string `json:\"currentContext\"`\n}\n\ntype AuthInfo struct {\n\t// ClientCertificatePath is the path to a client cert file for TLS.\n\tClientCertificatePath string `json:\"clientCertificatePath,omitempty\"`\n\t// ClientKeyPath is the path to a client key file for TLS.\n\tClientKeyPath string `json:\"clientKeyPath,omitempty\"`\n\t// Username is the username for basic authentication to the Scylla cluster.\n\tUsername string `json:\"username,omitempty\"`\n\t// Password is the password for basic authentication to the Scylla cluster\n\tPassword string `json:\"password,omitempty\"`\n\t// ClientCertificateData contains PEM-encoded data from a client cert file for TLS.\n\t// Overrides ClientCertificatePath.\n\tClientCertificateData []byte `json:\"clientCertificateData,omitempty\"`\n\t// ClientKeyData contains PEM-encoded data from a client key file for TLS.\n\t// Overrides ClientKeyPath.\n\tClientKeyData []byte `json:\"clientKeyData,omitempty\"`\n}\n\ntype Datacenter struct {\n\t// CertificateAuthorityPath is the path to a cert file for the certificate authority.\n\tCertificateAuthorityPath string `json:\"certificateAuthorityPath,omitempty\"`\n\t// Server is the initial contact point of the Scylla cluster.\n\t// Example: https://hostname:port\n\tServer string `json:\"server\"`\n\t// TLSServerName is used to check server certificates.\n\t// If TLSServerName is empty, the hostname used to contact the server is used.\n\tTLSServerName string `json:\"tlsServerName,omitempty\"`\n\t// NodeDomain the domain suffix that is concatenated with host_id of the node driver wants to connect to.\n\t// Example: host_id.<nodeDomain>\n\tNodeDomain string `json:\"nodeDomain\"`\n\t// ProxyURL is the URL to the proxy to be used for all requests made by this\n\t// client. URLs with \"http\", \"https\", and \"socks5\" schemes are supported. If\n\t// this configuration is not provided or the empty string, the client\n\t// attempts to construct a proxy configuration from http_proxy and\n\t// https_proxy environment variables. If these environment variables are not\n\t// set, the client does not attempt to proxy requests.\n\t// It is optional\n\tProxyURL string `json:\"proxyUrl,omitempty\"`\n\t// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority.\n\tCertificateAuthorityData []byte `json:\"certificateAuthorityData,omitempty\"`\n\t// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.\n\tInsecureSkipTLSVerify bool `json:\"insecureSkipTlsVerify,omitempty\"`\n}\n\ntype Context struct {\n\t// DatacenterName is the name of the datacenter for this context.\n\tDatacenterName string `json:\"datacenterName\"`\n\t// AuthInfoName is the name of the authInfo for this context.\n\tAuthInfoName string `json:\"authInfoName\"`\n}\n\ntype Parameters struct {\n\t// DefaultConsistency is the default consistency level used for user queries.\n\t// +optional\n\tDefaultConsistency ConsistencyString `json:\"defaultConsistency,omitempty\"`\n\t// DefaultSerialConsistency is the default consistency level for the serial part of user queries.\n\t// +optional\n\tDefaultSerialConsistency ConsistencyString `json:\"defaultSerialConsistency,omitempty\"`\n}\n\ntype ConsistencyString string\n\n// just AnyConsistency etc is better, but there's already SerialConsistency defined elsewhere.\nconst (\n\tDefaultAnyConsistency         ConsistencyString = \"ANY\"\n\tDefaultOneConsistency         ConsistencyString = \"ONE\"\n\tDefaultTwoConsistency         ConsistencyString = \"TWO\"\n\tDefaultThreeConsistency       ConsistencyString = \"THREE\"\n\tDefaultQuorumConsistency      ConsistencyString = \"QUORUM\"\n\tDefaultAllConsistency         ConsistencyString = \"ALL\"\n\tDefaultLocalQuorumConsistency ConsistencyString = \"LOCAL_QUORUM\"\n\tDefaultEachQuorumConsistency  ConsistencyString = \"EACH_QUORUM\"\n\tDefaultSerialConsistency      ConsistencyString = \"SERIAL\"\n\tDefaultLocalSerialConsistency ConsistencyString = \"LOCAL_SERIAL\"\n\tDefaultLocalOneConsistency    ConsistencyString = \"LOCAL_ONE\"\n)\n\nfunc (cc *ConnectionConfig) GetRootCAPool() (*x509.CertPool, error) {\n\tcaPool := x509.NewCertPool()\n\n\tfor dcName, dc := range cc.Datacenters {\n\t\tif len(dc.CertificateAuthorityData) == 0 && len(dc.CertificateAuthorityPath) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"datacenter %q does not include certificate authority\", dcName)\n\t\t}\n\n\t\tcaData, err := cc.getDataOrReadFile(dc.CertificateAuthorityData, dc.CertificateAuthorityPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"can't read datacenter %q certificate authority file from %q: %w\", dcName, dc.CertificateAuthorityPath, err)\n\t\t}\n\n\t\tcaPool.AppendCertsFromPEM(caData)\n\t}\n\n\treturn caPool, nil\n}\n\nfunc (cc *ConnectionConfig) GetDatacenterCAPool(datacenterName string) (*x509.CertPool, error) {\n\tdc, ok := cc.Datacenters[datacenterName]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"datacenter %q not found in cloud connection config\", datacenterName)\n\t}\n\n\tcaPool := x509.NewCertPool()\n\n\tif len(dc.CertificateAuthorityData) == 0 && len(dc.CertificateAuthorityPath) == 0 {\n\t\treturn nil, fmt.Errorf(\"datacenter %q does not include certificate authority\", datacenterName)\n\t}\n\n\tcaData, err := cc.getDataOrReadFile(dc.CertificateAuthorityData, dc.CertificateAuthorityPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't read datacenter %q certificate authority file from %q: %w\", datacenterName, dc.CertificateAuthorityPath, err)\n\t}\n\tcaPool.AppendCertsFromPEM(caData)\n\n\treturn caPool, nil\n}\n\nfunc (cc *ConnectionConfig) GetInitialContactPoints() []string {\n\thosts := make([]string, 0, len(cc.Datacenters))\n\tfor _, dc := range cc.Datacenters {\n\t\thosts = append(hosts, dc.Server)\n\t}\n\treturn hosts\n}\n\nfunc (cc *ConnectionConfig) getDataOrReadFile(data []byte, path string) ([]byte, error) {\n\tif len(data) == 0 {\n\t\treturn os.ReadFile(path)\n\t}\n\n\treturn data, nil\n}\n\nfunc (cc *ConnectionConfig) GetCurrentDatacenterConfig() (*Datacenter, error) {\n\tcontextConf, err := cc.GetCurrentContextConfig()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get current context config: %w\", err)\n\t}\n\n\tif len(contextConf.DatacenterName) == 0 {\n\t\treturn nil, fmt.Errorf(\"datacenterName in current context can't be empty\")\n\t}\n\n\tdcConf, ok := cc.Datacenters[contextConf.DatacenterName]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"datacenter %q does not exists\", contextConf.DatacenterName)\n\t}\n\n\treturn dcConf, nil\n}\n\nfunc (cc *ConnectionConfig) GetCurrentContextConfig() (*Context, error) {\n\tif len(cc.CurrentContext) == 0 {\n\t\treturn nil, fmt.Errorf(\"current context can't be empty\")\n\t}\n\n\tcontextConf, ok := cc.Contexts[cc.CurrentContext]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"context %q does not exists\", cc.CurrentContext)\n\t}\n\n\treturn contextConf, nil\n}\n\nfunc (cc *ConnectionConfig) GetCurrentAuthInfo() (*AuthInfo, error) {\n\tcontextConf, err := cc.GetCurrentContextConfig()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get current context config: %w\", err)\n\t}\n\n\tif len(contextConf.AuthInfoName) == 0 {\n\t\treturn nil, fmt.Errorf(\"authInfo in current context can't be empty\")\n\t}\n\n\tauthInfo, ok := cc.AuthInfos[contextConf.AuthInfoName]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"authInfo %q does not exists\", contextConf.AuthInfoName)\n\t}\n\n\treturn authInfo, nil\n}\n\nfunc (cc *ConnectionConfig) GetClientCertificate() (*tls.Certificate, error) {\n\tauthInfo, err := cc.GetCurrentAuthInfo()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get current auth info: %w\", err)\n\t}\n\n\tclientCert, err := cc.getDataOrReadFile(authInfo.ClientCertificateData, authInfo.ClientCertificatePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't read client certificate: %w\", err)\n\t}\n\n\tclientKey, err := cc.getDataOrReadFile(authInfo.ClientKeyData, authInfo.ClientKeyPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't read client key: %w\", err)\n\t}\n\n\tcert, err := tls.X509KeyPair(clientCert, clientKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't create x509 pair: %w\", err)\n\t}\n\n\treturn &cert, nil\n}\n"
  },
  {
    "path": "scyllacloud/config_test.go",
    "content": "// Copyright (C) 2021 ScyllaDB\n\n//go:build unit\n// +build unit\n\npackage scyllacloud\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"sigs.k8s.io/yaml\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n\n\t\"github.com/gocql/gocql\"\n)\n\nfunc TestCloudCluster(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tsingleDCConfig = func() *ConnectionConfig {\n\t\t\treturn &ConnectionConfig{\n\t\t\t\tDatacenters: map[string]*Datacenter{\n\t\t\t\t\t\"dc-1\": {\n\t\t\t\t\t\tCertificateAuthorityPath: \"../testdata/pki/ca.crt\",\n\t\t\t\t\t\tServer:                   \"eu.cloud.scylladb.com\",\n\t\t\t\t\t\tTLSServerName:            \"some-host\",\n\t\t\t\t\t\tNodeDomain:               \"eu.cloud.scylladb.com\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAuthInfos: map[string]*AuthInfo{\n\t\t\t\t\t\"ai-1\": {\n\t\t\t\t\t\tUsername:              \"username\",\n\t\t\t\t\t\tPassword:              \"password\",\n\t\t\t\t\t\tClientKeyPath:         \"../testdata/pki/gocql.key\",\n\t\t\t\t\t\tClientCertificatePath: \"../testdata/pki/gocql.crt\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tContexts: map[string]*Context{\n\t\t\t\t\t\"default-context\": {\n\t\t\t\t\t\tAuthInfoName:   \"ai-1\",\n\t\t\t\t\t\tDatacenterName: \"dc-1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCurrentContext: \"default-context\",\n\t\t\t}\n\t\t}\n\n\t\tmultiDCConfig = func() *ConnectionConfig {\n\t\t\tcc := singleDCConfig()\n\t\t\tcc.Datacenters[\"dc-2\"] = &Datacenter{\n\t\t\t\tCertificateAuthorityPath: \"../testdata/pki/ca.crt\",\n\t\t\t\tServer:                   \"cloud.scylladb.com\",\n\t\t\t\tTLSServerName:            \"some-host\",\n\t\t\t\tNodeDomain:               \"cloud.scylladb.com\",\n\t\t\t\tProxyURL:                 \"socks5://127.0.0.1:5215\",\n\t\t\t}\n\t\t\treturn cc\n\t\t}\n\t)\n\n\tts := []struct {\n\t\tname                string\n\t\tcreateConfig        func() (*ConnectionConfig, string)\n\t\texpectedError       error\n\t\tverifyClusterConfig func(*testing.T, *ConnectionConfig, *gocql.ClusterConfig)\n\t}{\n\t\t{\n\t\t\tname: \"current context points to unknown context\",\n\t\t\tcreateConfig: func() (*ConnectionConfig, string) {\n\t\t\t\treturn writeCloudConnectionConfigToTemp(t, &ConnectionConfig{\n\t\t\t\t\tCurrentContext: \"unknown\",\n\t\t\t\t})\n\t\t\t},\n\t\t\texpectedError: fmt.Errorf(\"current context points to unknown context\"),\n\t\t},\n\t\t{\n\t\t\tname: \"context auth info points to unknown auth info\",\n\t\t\tcreateConfig: func() (*ConnectionConfig, string) {\n\t\t\t\treturn writeCloudConnectionConfigToTemp(t, &ConnectionConfig{\n\t\t\t\t\tContexts: map[string]*Context{\n\t\t\t\t\t\t\"default\": {\n\t\t\t\t\t\t\tAuthInfoName: \"unknown\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tCurrentContext: \"default\",\n\t\t\t\t})\n\t\t\t},\n\t\t\texpectedError: fmt.Errorf(\"context %q auth info points to unknown authinfo\", \"default\"),\n\t\t},\n\t\t{\n\t\t\tname: \"context datacenter points to unknown datacenter\",\n\t\t\tcreateConfig: func() (*ConnectionConfig, string) {\n\t\t\t\treturn writeCloudConnectionConfigToTemp(t, &ConnectionConfig{\n\t\t\t\t\tAuthInfos: map[string]*AuthInfo{\n\t\t\t\t\t\t\"default\": {},\n\t\t\t\t\t},\n\t\t\t\t\tContexts: map[string]*Context{\n\t\t\t\t\t\t\"default\": {\n\t\t\t\t\t\t\tAuthInfoName:   \"default\",\n\t\t\t\t\t\t\tDatacenterName: \"unknown\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tCurrentContext: \"default\",\n\t\t\t\t})\n\t\t\t},\n\t\t\texpectedError: fmt.Errorf(\"context %q datacenter points to unknown datacenter\", \"default\"),\n\t\t},\n\t\t{\n\t\t\tname: \"invalid default consistency\",\n\t\t\tcreateConfig: func() (*ConnectionConfig, string) {\n\t\t\t\tcc := singleDCConfig()\n\t\t\t\tcc.Parameters = &Parameters{\n\t\t\t\t\tDefaultConsistency: DefaultSerialConsistency,\n\t\t\t\t}\n\t\t\t\treturn writeCloudConnectionConfigToTemp(t, cc)\n\t\t\t},\n\t\t\texpectedError: fmt.Errorf(\"invalid value of default consistency %q, values can be one of: [THREE ONE TWO ANY QUORUM ALL LOCAL_QUORUM EACH_QUORUM LOCAL_ONE]\", DefaultSerialConsistency),\n\t\t},\n\t\t{\n\t\t\tname: \"invalid default serial consistency\",\n\t\t\tcreateConfig: func() (*ConnectionConfig, string) {\n\t\t\t\tcc := singleDCConfig()\n\t\t\t\tcc.Parameters = &Parameters{\n\t\t\t\t\tDefaultSerialConsistency: DefaultQuorumConsistency,\n\t\t\t\t}\n\t\t\t\treturn writeCloudConnectionConfigToTemp(t, cc)\n\t\t\t},\n\t\t\texpectedError: fmt.Errorf(\"invalid value of default serial consistency %q, values can be one of: [SERIAL LOCAL_SERIAL]\", DefaultQuorumConsistency),\n\t\t},\n\t\t{\n\t\t\tname: \"initial contact points are taken from all available datacenters\",\n\t\t\tcreateConfig: func() (*ConnectionConfig, string) {\n\t\t\t\treturn writeCloudConnectionConfigToTemp(t, multiDCConfig())\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t\tverifyClusterConfig: func(t *testing.T, connConfig *ConnectionConfig, config *gocql.ClusterConfig) {\n\t\t\t\tif len(connConfig.Datacenters) != len(config.Hosts) {\n\t\t\t\t\tt.Errorf(\"initial contact points does not use all datacenters\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"certificate validation is off if any dc requires it\",\n\t\t\tcreateConfig: func() (*ConnectionConfig, string) {\n\t\t\t\tcc := multiDCConfig()\n\t\t\t\tcc.Datacenters[\"dc-1\"].InsecureSkipTLSVerify = true\n\t\t\t\treturn writeCloudConnectionConfigToTemp(t, cc)\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t\tverifyClusterConfig: func(t *testing.T, connConfig *ConnectionConfig, config *gocql.ClusterConfig) {\n\t\t\t\tif !config.SslOpts.Config.InsecureSkipVerify {\n\t\t\t\t\tt.Errorf(\"expected disabled certificate verification\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authentication credentials are taken from current auth info\",\n\t\t\tcreateConfig: func() (*ConnectionConfig, string) {\n\t\t\t\treturn writeCloudConnectionConfigToTemp(t, singleDCConfig())\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t\tverifyClusterConfig: func(t *testing.T, connConfig *ConnectionConfig, config *gocql.ClusterConfig) {\n\t\t\t\tauthenticator, ok := config.Authenticator.(gocql.PasswordAuthenticator)\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Errorf(\"expected PasswordAuthenticator, got %T\", config.Authenticator)\n\t\t\t\t}\n\t\t\t\tcurrentContext := connConfig.Contexts[connConfig.CurrentContext]\n\t\t\t\tauthInfo := connConfig.AuthInfos[currentContext.AuthInfoName]\n\t\t\t\tif authInfo.Username != authenticator.Username {\n\t\t\t\t\tt.Errorf(\"expected %q username, got %q\", authInfo.Username, authenticator.Username)\n\t\t\t\t}\n\t\t\t\tif authInfo.Password != authenticator.Password {\n\t\t\t\t\tt.Errorf(\"expected %q password, got %q\", authInfo.Password, authenticator.Password)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"certificate and key data has priority over path to file containing it\",\n\t\t\tcreateConfig: func() (*ConnectionConfig, string) {\n\t\t\t\tcc := singleDCConfig()\n\n\t\t\t\tcaCert, err := os.ReadFile(cc.Datacenters[\"dc-1\"].CertificateAuthorityPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tcc.Datacenters[\"dc-1\"].CertificateAuthorityData = caCert\n\t\t\t\tcc.Datacenters[\"dc-1\"].CertificateAuthorityPath = \"/not-existing-path\"\n\n\t\t\t\tclientKey, err := os.ReadFile(cc.AuthInfos[\"ai-1\"].ClientKeyPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tcc.AuthInfos[\"ai-1\"].ClientKeyData = clientKey\n\t\t\t\tcc.AuthInfos[\"ai-1\"].ClientKeyPath = \"/not-existing-path\"\n\n\t\t\t\tclientCert, err := os.ReadFile(cc.AuthInfos[\"ai-1\"].ClientCertificatePath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tcc.AuthInfos[\"ai-1\"].ClientCertificateData = clientCert\n\t\t\t\tcc.AuthInfos[\"ai-1\"].ClientCertificatePath = \"/not-existing-path\"\n\n\t\t\t\treturn writeCloudConnectionConfigToTemp(t, cc)\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t}\n\n\tfor i := range ts {\n\t\ttest := ts[i]\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tcc, path := test.createConfig()\n\t\t\tdefer os.RemoveAll(path)\n\n\t\t\tcloudConfig, err := NewCloudCluster(path)\n\t\t\tif !tests.ErrEqual(err, test.expectedError) {\n\t\t\t\tt.Errorf(\"expected error %#v, got %#v\", test.expectedError, err)\n\t\t\t}\n\t\t\tif test.verifyClusterConfig != nil {\n\t\t\t\ttest.verifyClusterConfig(t, cc, cloudConfig)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestConnectionConfig_GetCurrentContextConfig(t *testing.T) {\n\tt.Parallel()\n\n\ttt := []struct {\n\t\tname            string\n\t\tconnConfig      *ConnectionConfig\n\t\texpectedContext *Context\n\t\texpectedError   error\n\t}{\n\t\t{\n\t\t\tname: \"empty current context\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tCurrentContext: \"\",\n\t\t\t},\n\t\t\texpectedContext: nil,\n\t\t\texpectedError:   fmt.Errorf(\"current context can't be empty\"),\n\t\t},\n\t\t{\n\t\t\tname: \"not existing current context\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tCurrentContext: \"not-existing-context\",\n\t\t\t},\n\t\t\texpectedContext: nil,\n\t\t\texpectedError:   fmt.Errorf(`context \"not-existing-context\" does not exists`),\n\t\t},\n\t\t{\n\t\t\tname: \"context from current context is returned\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tContexts: map[string]*Context{\n\t\t\t\t\t\"default\": {\n\t\t\t\t\t\tAuthInfoName:   \"admin\",\n\t\t\t\t\t\tDatacenterName: \"us-east-1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCurrentContext: \"default\",\n\t\t\t},\n\t\t\texpectedContext: &Context{\n\t\t\t\tAuthInfoName:   \"admin\",\n\t\t\t\tDatacenterName: \"us-east-1\",\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t}\n\n\tfor i := range tt {\n\t\ttc := tt[i]\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcontextConf, err := tc.connConfig.GetCurrentContextConfig()\n\t\t\tif !tests.ErrEqual(err, tc.expectedError) {\n\t\t\t\tt.Errorf(\"expected error %#v, got %#v\", tc.expectedError, err)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(tc.expectedContext, contextConf) {\n\t\t\t\tt.Errorf(\"expected context %#v, got %#v\", tc.expectedContext, contextConf)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestConnectionConfig_GetCurrentAuthInfo(t *testing.T) {\n\tt.Parallel()\n\n\ttt := []struct {\n\t\tname             string\n\t\tconnConfig       *ConnectionConfig\n\t\texpectedAuthInfo *AuthInfo\n\t\texpectedError    error\n\t}{\n\t\t{\n\t\t\tname: \"empty current context\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tCurrentContext: \"\",\n\t\t\t},\n\t\t\texpectedAuthInfo: nil,\n\t\t\texpectedError:    fmt.Errorf(\"can't get current context config: %w\", fmt.Errorf(\"current context can't be empty\")),\n\t\t},\n\t\t{\n\t\t\tname: \"not existing current context\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tCurrentContext: \"not-existing-context\",\n\t\t\t},\n\t\t\texpectedAuthInfo: nil,\n\t\t\texpectedError:    fmt.Errorf(\"can't get current context config: %w\", fmt.Errorf(`context \"not-existing-context\" does not exists`)),\n\t\t},\n\t\t{\n\t\t\tname: \"empty auth info name in current context\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tContexts: map[string]*Context{\n\t\t\t\t\t\"default\": {\n\t\t\t\t\t\tAuthInfoName: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCurrentContext: \"default\",\n\t\t\t},\n\t\t\texpectedAuthInfo: nil,\n\t\t\texpectedError:    fmt.Errorf(\"authInfo in current context can't be empty\"),\n\t\t},\n\t\t{\n\t\t\tname: \"not existing auth info name in current context\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tContexts: map[string]*Context{\n\t\t\t\t\t\"default\": {\n\t\t\t\t\t\tAuthInfoName: \"not-existing-auth-info\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCurrentContext: \"default\",\n\t\t\t},\n\t\t\texpectedAuthInfo: nil,\n\t\t\texpectedError:    fmt.Errorf(`authInfo \"not-existing-auth-info\" does not exists`),\n\t\t},\n\t\t{\n\t\t\tname: \"auth info from current context is returned\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tAuthInfos: map[string]*AuthInfo{\n\t\t\t\t\t\"admin\": {\n\t\t\t\t\t\tClientCertificatePath: \"client-cert-path\",\n\t\t\t\t\t\tClientKeyPath:         \"client-key-path\",\n\t\t\t\t\t\tUsername:              \"username\",\n\t\t\t\t\t\tPassword:              \"password\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tContexts: map[string]*Context{\n\t\t\t\t\t\"default\": {\n\t\t\t\t\t\tAuthInfoName: \"admin\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCurrentContext: \"default\",\n\t\t\t},\n\t\t\texpectedAuthInfo: &AuthInfo{\n\t\t\t\tClientCertificatePath: \"client-cert-path\",\n\t\t\t\tClientKeyPath:         \"client-key-path\",\n\t\t\t\tUsername:              \"username\",\n\t\t\t\tPassword:              \"password\",\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t}\n\n\tfor i := range tt {\n\t\ttc := tt[i]\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tai, err := tc.connConfig.GetCurrentAuthInfo()\n\t\t\tif !tests.ErrEqual(err, tc.expectedError) {\n\t\t\t\tt.Errorf(\"expected error %#v, got %#v\", tc.expectedError, err)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(tc.expectedAuthInfo, ai) {\n\t\t\t\tt.Errorf(\"expected authInfo %#v, got %#v\", tc.expectedAuthInfo, ai)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestConnectionConfig_GetCurrentDatacenterConfig(t *testing.T) {\n\tt.Parallel()\n\n\ttt := []struct {\n\t\tname               string\n\t\tconnConfig         *ConnectionConfig\n\t\texpectedDatacenter *Datacenter\n\t\texpectedError      error\n\t}{\n\t\t{\n\t\t\tname: \"empty current context\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tCurrentContext: \"\",\n\t\t\t},\n\t\t\texpectedDatacenter: nil,\n\t\t\texpectedError:      fmt.Errorf(\"can't get current context config: %w\", fmt.Errorf(\"current context can't be empty\")),\n\t\t},\n\t\t{\n\t\t\tname: \"not existing current context\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tCurrentContext: \"not-existing-context\",\n\t\t\t},\n\t\t\texpectedDatacenter: nil,\n\t\t\texpectedError:      fmt.Errorf(\"can't get current context config: %w\", fmt.Errorf(`context \"not-existing-context\" does not exists`)),\n\t\t},\n\t\t{\n\t\t\tname: \"empty datacenter name in current context\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tContexts: map[string]*Context{\n\t\t\t\t\t\"default\": {\n\t\t\t\t\t\tDatacenterName: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCurrentContext: \"default\",\n\t\t\t},\n\t\t\texpectedDatacenter: nil,\n\t\t\texpectedError:      fmt.Errorf(\"datacenterName in current context can't be empty\"),\n\t\t},\n\t\t{\n\t\t\tname: \"not existing datacenter name in current context\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tContexts: map[string]*Context{\n\t\t\t\t\t\"default\": {\n\t\t\t\t\t\tDatacenterName: \"not-existing-dc\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCurrentContext: \"default\",\n\t\t\t},\n\t\t\texpectedDatacenter: nil,\n\t\t\texpectedError:      fmt.Errorf(`datacenter \"not-existing-dc\" does not exists`),\n\t\t},\n\t\t{\n\t\t\tname: \"datacenter from current context is returned\",\n\t\t\tconnConfig: &ConnectionConfig{\n\t\t\t\tDatacenters: map[string]*Datacenter{\n\t\t\t\t\t\"us-east-1\": {\n\t\t\t\t\t\tCertificateAuthorityPath: \"path-to-ca-cert\",\n\t\t\t\t\t\tServer:                   \"server\",\n\t\t\t\t\t\tTLSServerName:            \"tls-server-name\",\n\t\t\t\t\t\tNodeDomain:               \"node-domain\",\n\t\t\t\t\t\tInsecureSkipTLSVerify:    true,\n\t\t\t\t\t\tProxyURL:                 \"proxy-url\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tContexts: map[string]*Context{\n\t\t\t\t\t\"default\": {\n\t\t\t\t\t\tDatacenterName: \"us-east-1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCurrentContext: \"default\",\n\t\t\t},\n\t\t\texpectedDatacenter: &Datacenter{\n\t\t\t\tCertificateAuthorityPath: \"path-to-ca-cert\",\n\t\t\t\tServer:                   \"server\",\n\t\t\t\tTLSServerName:            \"tls-server-name\",\n\t\t\t\tNodeDomain:               \"node-domain\",\n\t\t\t\tInsecureSkipTLSVerify:    true,\n\t\t\t\tProxyURL:                 \"proxy-url\",\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t}\n\n\tfor i := range tt {\n\t\ttc := tt[i]\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tdc, err := tc.connConfig.GetCurrentDatacenterConfig()\n\t\t\tif !tests.ErrEqual(err, tc.expectedError) {\n\t\t\t\tt.Errorf(\"expected error %#v, got %#v\", tc.expectedError, err)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(tc.expectedDatacenter, dc) {\n\t\t\t\tt.Errorf(\"expected datacenter %v, got %v\", tc.expectedDatacenter, dc)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc writeCloudConnectionConfigToTemp(t *testing.T, cc *ConnectionConfig) (*ConnectionConfig, string) {\n\tf, err := os.CreateTemp(os.TempDir(), \"gocql-cloud-conn-config-\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := f.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbuf, err := yaml.Marshal(cc)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := os.WriteFile(f.Name(), buf, 0600); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn cc, f.Name()\n}\n"
  },
  {
    "path": "scyllacloud/hostdialer.go",
    "content": "// Copyright (C) 2021 ScyllaDB\n\npackage scyllacloud\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/url\"\n\n\t\"golang.org/x/net/proxy\"\n\n\t\"github.com/gocql/gocql\"\n)\n\n// SniHostDialer is able to dial particular host through SNI proxy.\n// TLS Config is build from ConnectionConfig based on datacenter where given node belongs.\n// SNI is constructed from host_id of a node, and NodeDomain taken from cloud config.\ntype SniHostDialer struct {\n\tconnConfig *ConnectionConfig\n\tdialer     gocql.Dialer\n}\n\nfunc NewSniHostDialer(connConfig *ConnectionConfig, dialer gocql.Dialer) *SniHostDialer {\n\treturn &SniHostDialer{\n\t\tconnConfig: connConfig,\n\t\tdialer:     dialer,\n\t}\n}\n\nfunc (s *SniHostDialer) DialHost(ctx context.Context, host *gocql.HostInfo) (*gocql.DialedHost, error) {\n\thostID := host.HostID()\n\tif len(hostID) == 0 {\n\t\treturn s.dialInitialContactPoint(ctx)\n\t}\n\n\tdcName := host.DataCenter()\n\tdcConf, ok := s.connConfig.Datacenters[dcName]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"datacenter %q configuration not found in connection bundle\", dcName)\n\t}\n\n\tdialer := s.dialer\n\n\tif len(dcConf.ProxyURL) != 0 {\n\t\tu, err := url.Parse(dcConf.ProxyURL)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"can't parse proxy URL %q: %w\", dcConf.ProxyURL, err)\n\t\t}\n\n\t\td, err := proxy.FromURL(u, proxyDialerFunc(func(network, addr string) (net.Conn, error) {\n\t\t\treturn dialer.DialContext(ctx, network, addr)\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"can't create proxy dialer: %w\", err)\n\t\t}\n\n\t\tdialer = d.(proxy.ContextDialer)\n\t}\n\n\tsni := fmt.Sprintf(\"%s.%s\", host.HostID(), dcConf.NodeDomain)\n\tclientCertificate, err := s.connConfig.GetClientCertificate()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get client certificate from configuration: %w\", err)\n\t}\n\n\tca, err := s.connConfig.GetDatacenterCAPool(dcName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get root CA from configuration: %w\", err)\n\t}\n\n\treturn s.connect(ctx, dialer, dcConf.Server, &tls.Config{\n\t\tServerName:         sni,\n\t\tRootCAs:            ca,\n\t\tInsecureSkipVerify: dcConf.InsecureSkipTLSVerify,\n\t\tCertificates:       []tls.Certificate{*clientCertificate},\n\t})\n}\n\nfunc (s *SniHostDialer) dialInitialContactPoint(ctx context.Context) (*gocql.DialedHost, error) {\n\tinsecureSkipVerify := false\n\tfor _, dc := range s.connConfig.Datacenters {\n\t\tif dc.InsecureSkipTLSVerify {\n\t\t\tinsecureSkipVerify = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tclientCertificate, err := s.connConfig.GetClientCertificate()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get client certificate from configuration: %w\", err)\n\t}\n\n\tca, err := s.connConfig.GetRootCAPool()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get root CA from configuration: %w\", err)\n\t}\n\n\tdcConf, err := s.connConfig.GetCurrentDatacenterConfig()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get current datacenter config: %w\", err)\n\t}\n\n\tserverName := dcConf.NodeDomain\n\tif len(serverName) == 0 {\n\t\tserverName = dcConf.Server\n\t}\n\n\treturn s.connect(ctx, s.dialer, dcConf.Server, &tls.Config{\n\t\tServerName:         serverName,\n\t\tRootCAs:            ca,\n\t\tInsecureSkipVerify: insecureSkipVerify,\n\t\tCertificates:       []tls.Certificate{*clientCertificate},\n\t})\n}\n\nfunc (s *SniHostDialer) connect(ctx context.Context, dialer gocql.Dialer, server string, tlsConfig *tls.Config) (*gocql.DialedHost, error) {\n\tconn, err := dialer.DialContext(ctx, \"tcp\", server)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't connect to %q: %w\", server, err)\n\t}\n\n\ttconn := tls.Client(conn, tlsConfig)\n\tif err := tconn.HandshakeContext(ctx); err != nil {\n\t\t_ = conn.Close()\n\t\treturn nil, fmt.Errorf(\"can't finish TLS handshake with server %q SNI %q: %w\", server, tlsConfig.ServerName, err)\n\t}\n\n\treturn &gocql.DialedHost{\n\t\tConn:            tconn,\n\t\tDisableCoalesce: true,\n\t}, nil\n}\n\ntype proxyDialerFunc func(network, addr string) (net.Conn, error)\n\nfunc (d proxyDialerFunc) Dial(network, addr string) (net.Conn, error) {\n\treturn d(network, addr)\n}\n"
  },
  {
    "path": "scyllacloud/hostdialer_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage scyllacloud\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"crypto/x509/pkix\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"net\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests\"\n)\n\nconst (\n\ttestTimeout = time.Second\n)\n\nfunc TestHostSNIDialer_InvalidConnectionConfig(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t_, serverCertPem, clientCertPem, clientKeyPem, err := setupTLSServer(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdialer := &gocql.ScyllaShardAwareDialer{Dialer: net.Dialer{}}\n\n\ttt := []struct {\n\t\tname          string\n\t\tconnConfig    *ConnectionConfig\n\t\thostInfo      *gocql.HostInfo\n\t\texpectedError error\n\t}{\n\t\t{\n\t\t\tname: \"empty current context\",\n\t\t\tconnConfig: func() *ConnectionConfig {\n\t\t\t\tcc := newBasicConnectionConf(\"127.0.0.1:9142\", serverCertPem, clientCertPem, clientKeyPem)\n\t\t\t\tcc.CurrentContext = \"\"\n\t\t\t\treturn cc\n\t\t\t}(),\n\t\t\thostInfo:      &gocql.HostInfo{},\n\t\t\texpectedError: fmt.Errorf(\"can't get client certificate from configuration: %w\", fmt.Errorf(\"can't get current auth info: %w\", fmt.Errorf(\"can't get current context config: %w\", fmt.Errorf(\"current context can't be empty\")))),\n\t\t},\n\t\t{\n\t\t\tname: \"current context is unknown\",\n\t\t\tconnConfig: func() *ConnectionConfig {\n\t\t\t\tcc := newBasicConnectionConf(\"127.0.0.1:9142\", serverCertPem, clientCertPem, clientKeyPem)\n\t\t\t\tcc.CurrentContext = \"unknown-context\"\n\t\t\t\treturn cc\n\t\t\t}(),\n\t\t\thostInfo:      &gocql.HostInfo{},\n\t\t\texpectedError: fmt.Errorf(\"can't get client certificate from configuration: %w\", fmt.Errorf(\"can't get current auth info: %w\", fmt.Errorf(\"can't get current context config: %w\", fmt.Errorf(`context \"unknown-context\" does not exists`)))),\n\t\t},\n\t\t{\n\t\t\tname: \"unknown default authInfo\",\n\t\t\tconnConfig: func() *ConnectionConfig {\n\t\t\t\tcc := newBasicConnectionConf(\"127.0.0.1:9142\", serverCertPem, clientCertPem, clientKeyPem)\n\t\t\t\tcc.Contexts[cc.CurrentContext].AuthInfoName = \"unknown-authinfo\"\n\t\t\t\treturn cc\n\t\t\t}(),\n\t\t\thostInfo:      &gocql.HostInfo{},\n\t\t\texpectedError: fmt.Errorf(\"can't get client certificate from configuration: %w\", fmt.Errorf(\"can't get current auth info: %w\", fmt.Errorf(`authInfo \"unknown-authinfo\" does not exists`))),\n\t\t},\n\t\t{\n\t\t\tname:          \"empty client certificate\",\n\t\t\tconnConfig:    newBasicConnectionConf(\"127.0.0.1:9142\", serverCertPem, nil, clientKeyPem),\n\t\t\thostInfo:      &gocql.HostInfo{},\n\t\t\texpectedError: fmt.Errorf(\"can't get client certificate from configuration: %w\", fmt.Errorf(\"can't read client certificate: %w\", &os.PathError{Op: \"open\", Path: \"\", Err: syscall.ENOENT})),\n\t\t},\n\t\t{\n\t\t\tname:          \"empty client key\",\n\t\t\tconnConfig:    newBasicConnectionConf(\"127.0.0.1:9142\", serverCertPem, clientCertPem, nil),\n\t\t\thostInfo:      &gocql.HostInfo{},\n\t\t\texpectedError: fmt.Errorf(\"can't get client certificate from configuration: %w\", fmt.Errorf(\"can't read client key: %w\", &os.PathError{Op: \"open\", Path: \"\", Err: syscall.ENOENT})),\n\t\t},\n\t\t{\n\t\t\tname:          \"empty certificate authority\",\n\t\t\tconnConfig:    newBasicConnectionConf(\"127.0.0.1:9142\", nil, clientCertPem, clientKeyPem),\n\t\t\thostInfo:      &gocql.HostInfo{},\n\t\t\texpectedError: fmt.Errorf(\"can't get root CA from configuration: %w\", fmt.Errorf(`datacenter \"us-east-1\" does not include certificate authority`)),\n\t\t},\n\t\t{\n\t\t\tname: \"unknown default datacenter\",\n\t\t\tconnConfig: func() *ConnectionConfig {\n\t\t\t\tcc := newBasicConnectionConf(\"127.0.0.1:9142\", serverCertPem, clientCertPem, clientKeyPem)\n\t\t\t\tcc.Contexts[cc.CurrentContext].DatacenterName = \"unknown-datacenter\"\n\t\t\t\treturn cc\n\t\t\t}(),\n\t\t\thostInfo:      &gocql.HostInfo{},\n\t\t\texpectedError: fmt.Errorf(\"can't get current datacenter config: %w\", fmt.Errorf(`datacenter \"unknown-datacenter\" does not exists`)),\n\t\t},\n\t\t{\n\t\t\tname:       \"unknown host datacenter\",\n\t\t\tconnConfig: newBasicConnectionConf(\"127.0.0.1:9142\", serverCertPem, clientCertPem, clientKeyPem),\n\t\t\thostInfo: func() *gocql.HostInfo {\n\t\t\t\thi := gocql.HostInfoBuilder{\n\t\t\t\t\tDataCenter: \"unknown-datacenter\",\n\t\t\t\t\tHostId:     \"a0000000-0000-0000-0000-000000000099\",\n\t\t\t\t}.Build()\n\t\t\t\treturn &hi\n\t\t\t}(),\n\t\t\texpectedError: fmt.Errorf(`datacenter \"unknown-datacenter\" configuration not found in connection bundle`),\n\t\t},\n\t}\n\tfor i := range tt {\n\t\ttc := tt[i]\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\thostDialer := NewSniHostDialer(tc.connConfig, dialer)\n\t\t\t_, err := hostDialer.DialHost(ctx, tc.hostInfo)\n\t\t\tif !tests.ErrEqual(err, tc.expectedError) {\n\t\t\t\tt.Errorf(\"expected error to be %#v, got %#v\", tc.expectedError, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestHostSNIDialer_ServerNameIdentifiers(t *testing.T) {\n\tt.Parallel()\n\n\ttt := []struct {\n\t\tname        string\n\t\tconnConfig  func(server string, serverCertPem, clientCertPem, clientKeyPem []byte) *ConnectionConfig\n\t\thostInfo    *gocql.HostInfo\n\t\texpectedSNI func(config *ConnectionConfig) string\n\t}{\n\t\t{\n\t\t\tname:       \"node domain as SNI when host info is unknown\",\n\t\t\thostInfo:   &gocql.HostInfo{},\n\t\t\tconnConfig: newBasicConnectionConf,\n\t\t\texpectedSNI: func(_ *ConnectionConfig) string {\n\t\t\t\treturn \"node.scylladb.com\"\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:     \"server as SNI when host info is unknown and node domain is empty\",\n\t\t\thostInfo: &gocql.HostInfo{},\n\t\t\tconnConfig: func(server string, serverCertPem, clientCertPem, clientKeyPem []byte) *ConnectionConfig {\n\t\t\t\tcc := newBasicConnectionConf(server, serverCertPem, clientCertPem, clientKeyPem)\n\t\t\t\tdcConf := cc.Datacenters[cc.Contexts[cc.CurrentContext].DatacenterName]\n\t\t\t\tdcConf.NodeDomain = \"\"\n\t\t\t\t// Disable verification because serving cert isn't signed for IP address.\n\t\t\t\tdcConf.InsecureSkipTLSVerify = true\n\t\t\t\treturn cc\n\t\t\t},\n\t\t\texpectedSNI: func(cc *ConnectionConfig) string {\n\t\t\t\treturn cc.Datacenters[cc.Contexts[cc.CurrentContext].DatacenterName].Server\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"host SNI when host is known\",\n\t\t\thostInfo: func() *gocql.HostInfo {\n\t\t\t\thi := gocql.HostInfoBuilder{\n\t\t\t\t\tDataCenter: \"us-east-1\",\n\t\t\t\t\tHostId:     \"a0000000-0000-0000-0000-000000000001\",\n\t\t\t\t}.Build()\n\t\t\t\treturn &hi\n\t\t\t}(),\n\t\t\tconnConfig: newBasicConnectionConf,\n\t\t\texpectedSNI: func(_ *ConnectionConfig) string {\n\t\t\t\treturn \"a0000000-0000-0000-0000-000000000001.node.scylladb.com\"\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i := range tt {\n\t\ttc := tt[i]\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), testTimeout)\n\t\t\tdefer cancel()\n\n\t\t\tserver, serverCertPem, clientCertPem, clientKeyPem, err := setupTLSServer([]string{\"a0000000-0000-0000-0000-000000000001.node.scylladb.com\", \"node.scylladb.com\"})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tconnectionStateCh := make(chan tls.ConnectionState, 1)\n\t\t\tserver.TLS.VerifyConnection = func(state tls.ConnectionState) error {\n\t\t\t\tconnectionStateCh <- state\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tserver.StartTLS()\n\t\t\tdefer server.Close()\n\n\t\t\tdialer := &gocql.ScyllaShardAwareDialer{Dialer: net.Dialer{}}\n\t\t\tconnConfig := tc.connConfig(server.Listener.Addr().String(), serverCertPem, clientCertPem, clientKeyPem)\n\t\t\thostDialer := NewSniHostDialer(connConfig, dialer)\n\n\t\t\t_, err = hostDialer.DialHost(ctx, tc.hostInfo)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase receivedState := <-connectionStateCh:\n\t\t\t\texpectedSNI := tc.expectedSNI(connConfig)\n\t\t\t\tif receivedState.ServerName != expectedSNI {\n\t\t\t\t\tt.Errorf(\"expected %q SNI, got %q\", expectedSNI, receivedState.ServerName)\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\tt.Fatal(\"expected to receive connection, but timed out\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc setupTLSServer(dnsDomains []string) (*httptest.Server, []byte, []byte, []byte, error) {\n\tclientCert, clientKey, err := generateClientCert()\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tclientCertPem, err := encodeCertificates(clientCert)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tclientKeyPem, err := encodePrivateKey(clientKey)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tclientCAPool := x509.NewCertPool()\n\tclientCAPool.AppendCertsFromPEM(clientCertPem)\n\n\tserverCert, serverKey, err := generateServingCert(dnsDomains)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tserverCertPem, err := encodeCertificates(serverCert)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tserverKeyPem, err := encodePrivateKey(serverKey)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tservingCert, err := tls.X509KeyPair(serverCertPem, serverKeyPem)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tserver := httptest.NewUnstartedServer(nil)\n\n\tserver.TLS = &tls.Config{\n\t\tCertificates: []tls.Certificate{servingCert},\n\t\tClientCAs:    clientCAPool,\n\t\tClientAuth:   tls.RequestClientCert,\n\t}\n\n\treturn server, serverCertPem, clientCertPem, clientKeyPem, nil\n}\n\nfunc newBasicConnectionConf(server string, serverCertPem, clientCertPem, clientKeyPem []byte) *ConnectionConfig {\n\treturn &ConnectionConfig{\n\t\tDatacenters: map[string]*Datacenter{\n\t\t\t\"us-east-1\": {\n\t\t\t\tCertificateAuthorityData: serverCertPem,\n\t\t\t\tServer:                   server,\n\t\t\t\tNodeDomain:               \"node.scylladb.com\",\n\t\t\t},\n\t\t},\n\t\tAuthInfos: map[string]*AuthInfo{\n\t\t\t\"admin\": {\n\t\t\t\tClientCertificateData: clientCertPem,\n\t\t\t\tClientKeyData:         clientKeyPem,\n\t\t\t},\n\t\t},\n\t\tContexts: map[string]*Context{\n\t\t\t\"default\": {\n\t\t\t\tDatacenterName: \"us-east-1\",\n\t\t\t\tAuthInfoName:   \"admin\",\n\t\t\t},\n\t\t},\n\t\tCurrentContext: \"default\",\n\t}\n}\n\nfunc generateServingCert(dnsNames []string) (*x509.Certificate, *rsa.PrivateKey, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 1028)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"can't generate private key: %w\", err)\n\t}\n\n\tcommonName := \"serving-cert\"\n\tcert, err := generateSelfSignedX509Certificate(commonName, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, dnsNames, &privateKey.PublicKey, privateKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn cert, privateKey, nil\n}\n\nfunc generateClientCert() (*x509.Certificate, *rsa.PrivateKey, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 1028)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"can't generate private key: %w\", err)\n\t}\n\n\tcommonName := \"client\"\n\tcert, err := generateSelfSignedX509Certificate(commonName, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, nil, &privateKey.PublicKey, privateKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn cert, privateKey, nil\n}\n\nfunc generateSerialNumber() (*big.Int, error) {\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn serialNumber, nil\n}\n\nfunc generateSelfSignedX509Certificate(cn string, extKeyUsage []x509.ExtKeyUsage, dnsNames []string, pub, priv any) (*x509.Certificate, error) {\n\tnow := time.Now()\n\n\tserialNumber, err := generateSerialNumber()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttemplate := &x509.Certificate{\n\t\tSubject: pkix.Name{\n\t\t\tCommonName: cn,\n\t\t},\n\t\tIsCA:                  false,\n\t\tKeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage:           extKeyUsage,\n\t\tNotBefore:             now.Add(-1 * time.Second),\n\t\tNotAfter:              now.Add(time.Hour),\n\t\tSignatureAlgorithm:    x509.SHA512WithRSA,\n\t\tBasicConstraintsValid: true,\n\t\tSerialNumber:          serialNumber,\n\t\tDNSNames:              dnsNames,\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, template, template, pub, priv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't create certificate: %w\", err)\n\t}\n\n\tcerts, err := x509.ParseCertificates(derBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't parse der encoded certificate: %w\", err)\n\t}\n\tif len(certs) != 1 {\n\t\treturn nil, fmt.Errorf(\"expected to parse 1 certificate from der bytes but %d were present\", len(certs))\n\t}\n\n\treturn certs[0], nil\n}\n\nfunc encodeCertificates(certificates ...*x509.Certificate) ([]byte, error) {\n\tbuffer := bytes.Buffer{}\n\tfor _, certificate := range certificates {\n\t\terr := pem.Encode(&buffer, &pem.Block{\n\t\t\tType:  \"CERTIFICATE\",\n\t\t\tBytes: certificate.Raw,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"can't pem encode certificate: %w\", err)\n\t\t}\n\t}\n\treturn buffer.Bytes(), nil\n}\n\nfunc encodePrivateKey(key *rsa.PrivateKey) ([]byte, error) {\n\tbuffer := bytes.Buffer{}\n\terr := pem.Encode(&buffer, &pem.Block{\n\t\tType:  \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(key),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't pem encode rsa private key: %w\", err)\n\t}\n\n\treturn buffer.Bytes(), nil\n}\n"
  },
  {
    "path": "serialization/ascii/marshal.go",
    "content": "package ascii\n\nimport (\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase string:\n\t\treturn EncString(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tcase []byte:\n\t\treturn EncBytes(v)\n\tcase *[]byte:\n\t\treturn EncBytesR(v)\n\tdefault:\n\t\t// Custom types (type MyString string) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(rv)\n\t\t}\n\t\treturn EncReflectR(rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/ascii/marshal_utils.go",
    "content": "package ascii\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc EncString(v string) ([]byte, error) {\n\treturn encString(v), nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encString(*v), nil\n}\n\nfunc EncBytes(v []byte) ([]byte, error) {\n\treturn v, nil\n}\n\nfunc EncBytesR(v *[]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn *v, nil\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.String:\n\t\treturn encString(v.String()), nil\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal ascii: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn EncBytes(v.Bytes())\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal ascii: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal ascii: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encString(v string) []byte {\n\tif v == \"\" {\n\t\treturn make([]byte, 0)\n\t}\n\treturn []byte(v)\n}\n"
  },
  {
    "path": "serialization/ascii/unmarshal.go",
    "content": "package ascii\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *string:\n\t\treturn DecString(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tcase *[]byte:\n\t\treturn DecBytes(data, v)\n\tcase **[]byte:\n\t\treturn DecBytesR(data, v)\n\tdefault:\n\t\t// Custom types (type MyString string) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal ascii: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/ascii/unmarshal_utils.go",
    "content": "package ascii\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc errInvalidData(p []byte) error {\n\tfor i := range p {\n\t\tif p[i] > 127 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal ascii: invalid charester %s\", string(p[i]))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal ascii: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decString(p)\n\treturn errInvalidData(p)\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decStringR(p)\n\treturn errInvalidData(p)\n}\n\nfunc DecBytes(p []byte, v *[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tif p == nil {\n\t\t*v = nil\n\t\treturn nil\n\t}\n\tif len(p) == 0 {\n\t\t*v = make([]byte, 0)\n\t\treturn nil\n\t}\n\t*v = append((*v)[:0], p...)\n\treturn errInvalidData(p)\n}\n\nfunc DecBytesR(p []byte, v **[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decBytesR(p)\n\treturn errInvalidData(p)\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.String:\n\t\tv.SetString(decString(p))\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal ascii: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t\t}\n\t\tv.SetBytes(decBytes(p))\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal ascii: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t}\n\treturn errInvalidData(p)\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch ev := v.Type().Elem().Elem(); ev.Kind() {\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tcase reflect.Slice:\n\t\tif ev.Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal ascii: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t\t}\n\t\treturn decReflectBytesR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal ascii: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t}\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Elem().Type()))\n\t\t} else {\n\t\t\tv.Elem().Set(reflect.New(v.Type().Elem().Elem()))\n\t\t}\n\t\treturn nil\n\t}\n\tval := reflect.New(v.Type().Elem().Elem())\n\tval.Elem().SetString(string(p))\n\tv.Elem().Set(val)\n\treturn errInvalidData(p)\n}\n\nfunc decReflectBytesR(p []byte, v reflect.Value) error {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Elem().Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetBytes(make([]byte, 0))\n\t\t\tv.Elem().Set(val)\n\t\t}\n\t\treturn nil\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\n\tval := reflect.New(v.Type().Elem().Elem())\n\tval.Elem().SetBytes(tmp)\n\tv.Elem().Set(val)\n\treturn errInvalidData(p)\n}\n\nfunc decString(p []byte) string {\n\tif len(p) == 0 {\n\t\treturn \"\"\n\t}\n\treturn string(p)\n}\n\nfunc decStringR(p []byte) *string {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn new(string)\n\t}\n\ttmp := string(p)\n\treturn &tmp\n}\n\nfunc decBytes(p []byte) []byte {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn make([]byte, 0)\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\treturn tmp\n}\n\nfunc decBytesR(p []byte) *[]byte {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\ttmp := make([]byte, 0)\n\t\treturn &tmp\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\treturn &tmp\n}\n"
  },
  {
    "path": "serialization/bigint/marshal.go",
    "content": "package bigint\n\nimport (\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase int8:\n\t\treturn EncInt8(v)\n\tcase int16:\n\t\treturn EncInt16(v)\n\tcase int32:\n\t\treturn EncInt32(v)\n\tcase int64:\n\t\treturn EncInt64(v)\n\tcase int:\n\t\treturn EncInt(v)\n\n\tcase uint8:\n\t\treturn EncUint8(v)\n\tcase uint16:\n\t\treturn EncUint16(v)\n\tcase uint32:\n\t\treturn EncUint32(v)\n\tcase uint64:\n\t\treturn EncUint64(v)\n\tcase uint:\n\t\treturn EncUint(v)\n\n\tcase big.Int:\n\t\treturn EncBigInt(v)\n\tcase string:\n\t\treturn EncString(v)\n\n\tcase *int8:\n\t\treturn EncInt8R(v)\n\tcase *int16:\n\t\treturn EncInt16R(v)\n\tcase *int32:\n\t\treturn EncInt32R(v)\n\tcase *int64:\n\t\treturn EncInt64R(v)\n\tcase *int:\n\t\treturn EncIntR(v)\n\n\tcase *uint8:\n\t\treturn EncUint8R(v)\n\tcase *uint16:\n\t\treturn EncUint16R(v)\n\tcase *uint32:\n\t\treturn EncUint32R(v)\n\tcase *uint64:\n\t\treturn EncUint64R(v)\n\tcase *uint:\n\t\treturn EncUintR(v)\n\n\tcase *big.Int:\n\t\treturn EncBigIntR(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tdefault:\n\t\t// Custom types (type MyInt int) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/bigint/marshal_utils.go",
    "content": "package bigint\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nconst supportedTypes = \"~int8, ~int16, ~int32, ~int64, ~int, ~uint8, ~uint16, ~uint32, ~uint64, ~uint, ~string, big.Int\"\n\nfunc EncInt8(v int8) ([]byte, error) {\n\tif v < 0 {\n\t\treturn []byte{255, 255, 255, 255, 255, 255, 255, byte(v)}, nil\n\t}\n\treturn []byte{0, 0, 0, 0, 0, 0, 0, byte(v)}, nil\n}\n\nfunc EncInt8R(v *int8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt8(*v)\n}\n\nfunc EncInt16(v int16) ([]byte, error) {\n\tif v < 0 {\n\t\treturn []byte{255, 255, 255, 255, 255, 255, byte(v >> 8), byte(v)}, nil\n\t}\n\treturn []byte{0, 0, 0, 0, 0, 0, byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncInt16R(v *int16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt16(*v)\n}\n\nfunc EncInt32(v int32) ([]byte, error) {\n\tif v < 0 {\n\t\treturn []byte{255, 255, 255, 255, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n\t}\n\treturn []byte{0, 0, 0, 0, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncInt32R(v *int32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt32(*v)\n}\n\nfunc EncInt64(v int64) ([]byte, error) {\n\treturn encInt64(v), nil\n}\n\nfunc EncInt64R(v *int64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt64(*v)\n}\n\nfunc EncInt(v int) ([]byte, error) {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncIntR(v *int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt(*v)\n}\n\nfunc EncUint8(v uint8) ([]byte, error) {\n\treturn []byte{0, 0, 0, 0, 0, 0, 0, v}, nil\n}\n\nfunc EncUint8R(v *uint8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint8(*v)\n}\n\nfunc EncUint16(v uint16) ([]byte, error) {\n\treturn []byte{0, 0, 0, 0, 0, 0, byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUint16R(v *uint16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint16(*v)\n}\n\nfunc EncUint32(v uint32) ([]byte, error) {\n\treturn []byte{0, 0, 0, 0, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUint32R(v *uint32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint32(*v)\n}\n\nfunc EncUint64(v uint64) ([]byte, error) {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUint64R(v *uint64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint64(*v)\n}\n\nfunc EncUint(v uint) ([]byte, error) {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUintR(v *uint) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint(*v)\n}\n\nfunc EncBigInt(v big.Int) ([]byte, error) {\n\tif !v.IsInt64() {\n\t\treturn nil, fmt.Errorf(\"failed to marshal bigint: value (%T)(%s) out of range\", v, v.String())\n\t}\n\treturn encInt64(v.Int64()), nil\n}\n\nfunc EncBigIntR(v *big.Int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\tif !v.IsInt64() {\n\t\treturn nil, fmt.Errorf(\"failed to marshal bigint: value (%T)(%s) out of range\", v, v.String())\n\t}\n\treturn encInt64(v.Int64()), nil\n}\n\nfunc EncString(v string) ([]byte, error) {\n\tif v == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tn, err := strconv.ParseInt(v, 10, 64)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal bigint: can not marshal (%T)(%[1]v) %s\", v, err)\n\t}\n\treturn encInt64(n), nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncString(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8:\n\t\treturn EncInt64(v.Int())\n\tcase reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8:\n\t\treturn EncUint64(v.Uint())\n\tcase reflect.String:\n\t\tval := v.String()\n\t\tif val == \"\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\tn, err := strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal bigint: can not marshal (%T)(%[1]v) %s\", v.Interface(), err)\n\t\t}\n\t\treturn encInt64(n), nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal bigint: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal bigint: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encInt64(v int64) []byte {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n"
  },
  {
    "path": "serialization/bigint/unmarshal.go",
    "content": "package bigint\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\n\tcase *int8:\n\t\treturn DecInt8(data, v)\n\tcase *int16:\n\t\treturn DecInt16(data, v)\n\tcase *int32:\n\t\treturn DecInt32(data, v)\n\tcase *int64:\n\t\treturn DecInt64(data, v)\n\tcase *int:\n\t\treturn DecInt(data, v)\n\n\tcase *uint8:\n\t\treturn DecUint8(data, v)\n\tcase *uint16:\n\t\treturn DecUint16(data, v)\n\tcase *uint32:\n\t\treturn DecUint32(data, v)\n\tcase *uint64:\n\t\treturn DecUint64(data, v)\n\tcase *uint:\n\t\treturn DecUint(data, v)\n\n\tcase *big.Int:\n\t\treturn DecBigInt(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\n\tcase **int8:\n\t\treturn DecInt8R(data, v)\n\tcase **int16:\n\t\treturn DecInt16R(data, v)\n\tcase **int32:\n\t\treturn DecInt32R(data, v)\n\tcase **int64:\n\t\treturn DecInt64R(data, v)\n\tcase **int:\n\t\treturn DecIntR(data, v)\n\n\tcase **uint8:\n\t\treturn DecUint8R(data, v)\n\tcase **uint16:\n\t\treturn DecUint16R(data, v)\n\tcase **uint32:\n\t\treturn DecUint32R(data, v)\n\tcase **uint64:\n\t\treturn DecUint64R(data, v)\n\tcase **uint:\n\t\treturn DecUintR(data, v)\n\n\tcase **big.Int:\n\t\treturn DecBigIntR(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tdefault:\n\n\t\t// Custom types (type MyInt int) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: unsupported value type (%T)(%[1]v), supported types: %s\", value, supportedTypes)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/bigint/unmarshal_utils.go",
    "content": "package bigint\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nvar errWrongDataLen = fmt.Errorf(\"failed to unmarshal bigint: the length of the data should be 0 or 8\")\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal bigint: can not unmarshal into nil reference (%T)(%[1]v))\", v)\n}\n\nfunc DecInt8(p []byte, v *int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into int8, the data should be in the int8 range\")\n\t\t}\n\t\t*v = int8(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt8R(p []byte, v **int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int8)\n\t\t}\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into int8, the data should be in the int8 range\")\n\t\t}\n\t\ttmp := int8(val)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt16(p []byte, v *int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into int16, the data should be in the int16 range\")\n\t\t}\n\t\t*v = int16(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt16R(p []byte, v **int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int16)\n\t\t}\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into int16, the data should be in the int16 range\")\n\t\t}\n\t\ttmp := int16(val)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32(p []byte, v *int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt32 || val < math.MinInt32 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into int32, the data should be in the int32 range\")\n\t\t}\n\t\t*v = int32(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32R(p []byte, v **int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int32)\n\t\t}\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt32 || val < math.MinInt32 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into int32, the data should be in the int32 range\")\n\t\t}\n\t\ttmp := int32(val)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64(p []byte, v *int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = decInt64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64R(p []byte, v **int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int64)\n\t\t}\n\tcase 8:\n\t\tval := decInt64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt(p []byte, v *int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = int(p[0])<<56 | int(p[1])<<48 | int(p[2])<<40 | int(p[3])<<32 | int(p[4])<<24 | int(p[5])<<16 | int(p[6])<<8 | int(p[7])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecIntR(p []byte, v **int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int)\n\t\t}\n\tcase 8:\n\t\tval := int(p[0])<<56 | int(p[1])<<48 | int(p[2])<<40 | int(p[3])<<32 | int(p[4])<<24 | int(p[5])<<16 | int(p[6])<<8 | int(p[7])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint8(p []byte, v *uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 || p[6] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into uint8, the data should be in the uint8 range\")\n\t\t}\n\t\t*v = p[7]\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint8R(p []byte, v **uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint8)\n\t\t}\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 || p[6] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into uint8, the data should be in the uint8 range\")\n\t\t}\n\t\tval := p[7]\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint16(p []byte, v *uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into uint16, the data should be in the uint16 range\")\n\t\t}\n\t\t*v = uint16(p[6])<<8 | uint16(p[7])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint16R(p []byte, v **uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint16)\n\t\t}\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into uint16, the data should be in the uint16 range\")\n\t\t}\n\t\tval := uint16(p[6])<<8 | uint16(p[7])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32(p []byte, v *uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into uint32, the data should be in the uint32 range\")\n\t\t}\n\t\t*v = uint32(p[4])<<24 | uint32(p[5])<<16 | uint32(p[6])<<8 | uint32(p[7])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32R(p []byte, v **uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint32)\n\t\t}\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into uint32, the data should be in the uint32 range\")\n\t\t}\n\t\tval := uint32(p[4])<<24 | uint32(p[5])<<16 | uint32(p[6])<<8 | uint32(p[7])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint64(p []byte, v *uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = decUint64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint64R(p []byte, v **uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint64)\n\t\t}\n\tcase 8:\n\t\tval := decUint64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint(p []byte, v *uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = uint(p[0])<<56 | uint(p[1])<<48 | uint(p[2])<<40 | uint(p[3])<<32 | uint(p[4])<<24 | uint(p[5])<<16 | uint(p[6])<<8 | uint(p[7])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUintR(p []byte, v **uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint)\n\t\t}\n\tcase 8:\n\t\tval := uint(p[0])<<56 | uint(p[1])<<48 | uint(p[2])<<40 | uint(p[3])<<32 | uint(p[4])<<24 | uint(p[5])<<16 | uint(p[6])<<8 | uint(p[7])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = \"0\"\n\t\t}\n\tcase 8:\n\t\t*v = strconv.FormatInt(decInt64(p), 10)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\tval := \"0\"\n\t\t\t*v = &val\n\t\t}\n\tcase 8:\n\t\tval := strconv.FormatInt(decInt64(p), 10)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBigInt(p []byte, v *big.Int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt64(0)\n\tcase 8:\n\t\tv.SetInt64(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBigIntR(p []byte, v **big.Int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(big.Int)\n\t\t}\n\tcase 8:\n\t\t*v = big.NewInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal bigint: can not unmarshal into nil reference (%T)(%[1]v))\", v.Interface())\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Int8:\n\t\treturn decReflectInt8(p, v)\n\tcase reflect.Int16:\n\t\treturn decReflectInt16(p, v)\n\tcase reflect.Int32:\n\t\treturn decReflectInt32(p, v)\n\tcase reflect.Int64, reflect.Int:\n\t\treturn decReflectInts(p, v)\n\tcase reflect.Uint8:\n\t\treturn decReflectUint8(p, v)\n\tcase reflect.Uint16:\n\t\treturn decReflectUint16(p, v)\n\tcase reflect.Uint32:\n\t\treturn decReflectUint32(p, v)\n\tcase reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUints(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal bigint: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc decReflectInt8(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the int8 range\", v.Interface())\n\t\t}\n\t\tv.SetInt(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt16(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the int16 range\", v.Interface())\n\t\t}\n\t\tv.SetInt(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt32(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt32 || val < math.MinInt32 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the int32 range\", v.Interface())\n\t\t}\n\t\tv.SetInt(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInts(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 8:\n\t\tv.SetInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint8(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 || p[6] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the uint8 range\", v.Interface())\n\t\t}\n\t\tv.SetUint(uint64(p[7]))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint16(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the uint16 range\", v.Interface())\n\t\t}\n\t\tv.SetUint(uint64(p[6])<<8 | uint64(p[7]))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint32(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the uint32 range\", v.Interface())\n\t\t}\n\t\tv.SetUint(uint64(p[4])<<24 | uint64(p[5])<<16 | uint64(p[6])<<8 | uint64(p[7]))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUints(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 8:\n\t\tv.SetUint(decUint64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(\"0\")\n\t\t}\n\tcase 8:\n\t\tv.SetString(strconv.FormatInt(decInt64(p), 10))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal bigint: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Int8:\n\t\treturn decReflectInt8R(p, v)\n\tcase reflect.Int16:\n\t\treturn decReflectInt16R(p, v)\n\tcase reflect.Int32:\n\t\treturn decReflectInt32R(p, v)\n\tcase reflect.Int64, reflect.Int:\n\t\treturn decReflectIntsR(p, v)\n\tcase reflect.Uint8:\n\t\treturn decReflectUint8R(p, v)\n\tcase reflect.Uint16:\n\t\treturn decReflectUint16R(p, v)\n\tcase reflect.Uint32:\n\t\treturn decReflectUint32R(p, v)\n\tcase reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUintsR(p, v)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal bigint: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc decReflectInt8R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the int8 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(val)\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt16R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the int16 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(val)\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt32R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt32 || val < math.MinInt32 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the int32 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(val)\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectIntsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetInt(decInt64(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint8R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 || p[6] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the uint8 range\", v.Interface())\n\t\t}\n\t\tnewVal.Elem().SetUint(uint64(p[7]))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint16R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the uint16 range\", v.Interface())\n\t\t}\n\t\tnewVal.Elem().SetUint(uint64(p[6])<<8 | uint64(p[7]))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint32R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal bigint: to unmarshal into %T, the data should be in the uint32 range\", v.Interface())\n\t\t}\n\t\tnewVal.Elem().SetUint(uint64(p[4])<<24 | uint64(p[5])<<16 | uint64(p[6])<<8 | uint64(p[7]))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUintsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetUint(decUint64(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetString(\"0\")\n\t\t}\n\t\tv.Elem().Set(val)\n\tcase 8:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetString(strconv.FormatInt(decInt64(p), 10))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectNullableR(p []byte, v reflect.Value) reflect.Value {\n\tif p == nil {\n\t\treturn reflect.Zero(v.Elem().Type())\n\t}\n\treturn reflect.New(v.Type().Elem().Elem())\n}\n\nfunc decInt64(p []byte) int64 {\n\treturn int64(p[0])<<56 | int64(p[1])<<48 | int64(p[2])<<40 | int64(p[3])<<32 | int64(p[4])<<24 | int64(p[5])<<16 | int64(p[6])<<8 | int64(p[7])\n}\n\nfunc decUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<56 | uint64(p[1])<<48 | uint64(p[2])<<40 | uint64(p[3])<<32 | uint64(p[4])<<24 | uint64(p[5])<<16 | uint64(p[6])<<8 | uint64(p[7])\n}\n"
  },
  {
    "path": "serialization/blob/marshal.go",
    "content": "package blob\n\nimport (\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase string:\n\t\treturn EncString(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tcase []byte:\n\t\treturn EncBytes(v)\n\tcase *[]byte:\n\t\treturn EncBytesR(v)\n\tdefault:\n\t\t// Custom types (type MyString string) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(rv)\n\t\t}\n\t\treturn EncReflectR(rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/blob/marshal_utils.go",
    "content": "package blob\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc EncString(v string) ([]byte, error) {\n\treturn encString(v), nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encString(*v), nil\n}\n\nfunc EncBytes(v []byte) ([]byte, error) {\n\treturn v, nil\n}\n\nfunc EncBytesR(v *[]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn *v, nil\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.String:\n\t\treturn encString(v.String()), nil\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal blob: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn EncBytes(v.Bytes())\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal blob: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal blob: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encString(v string) []byte {\n\tif v == \"\" {\n\t\treturn make([]byte, 0)\n\t}\n\treturn []byte(v)\n}\n"
  },
  {
    "path": "serialization/blob/unmarshal.go",
    "content": "package blob\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *string:\n\t\treturn DecString(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tcase *[]byte:\n\t\treturn DecBytes(data, v)\n\tcase **[]byte:\n\t\treturn DecBytesR(data, v)\n\tcase *any:\n\t\treturn DecInterface(data, v)\n\tdefault:\n\t\t// Custom types (type MyString string) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal blob: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/blob/unmarshal_utils.go",
    "content": "package blob\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal blob: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decString(p)\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decStringR(p)\n\treturn nil\n}\n\nfunc DecBytes(p []byte, v *[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tif p == nil {\n\t\t*v = nil\n\t\treturn nil\n\t}\n\tif len(p) == 0 {\n\t\t*v = make([]byte, 0)\n\t\treturn nil\n\t}\n\t*v = append((*v)[:0], p...)\n\treturn nil\n}\n\nfunc DecBytesR(p []byte, v **[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decBytesR(p)\n\treturn nil\n}\n\nfunc DecInterface(p []byte, v *any) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decBytes(p)\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.String:\n\t\tv.SetString(decString(p))\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal blob: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t\t}\n\t\tv.SetBytes(decBytes(p))\n\tcase reflect.Interface:\n\t\tv.Set(reflect.ValueOf(decBytes(p)))\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal blob: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t}\n\treturn nil\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch ev := v.Type().Elem().Elem(); ev.Kind() {\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tcase reflect.Slice:\n\t\tif ev.Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal blob: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t\t}\n\t\treturn decReflectBytesR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal blob: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t}\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Type().Elem()))\n\t\t} else {\n\t\t\tv.Elem().Set(reflect.New(v.Type().Elem().Elem()))\n\t\t}\n\t\treturn nil\n\t}\n\tval := reflect.New(v.Type().Elem().Elem())\n\tval.Elem().SetString(string(p))\n\tv.Elem().Set(val)\n\treturn nil\n}\n\nfunc decReflectBytesR(p []byte, v reflect.Value) error {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Elem().Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetBytes(make([]byte, 0))\n\t\t\tv.Elem().Set(val)\n\t\t}\n\t\treturn nil\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\n\tval := reflect.New(v.Type().Elem().Elem())\n\tval.Elem().SetBytes(tmp)\n\tv.Elem().Set(val)\n\treturn nil\n}\n\nfunc decString(p []byte) string {\n\tif len(p) == 0 {\n\t\treturn \"\"\n\t}\n\treturn string(p)\n}\n\nfunc decStringR(p []byte) *string {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn new(string)\n\t}\n\ttmp := string(p)\n\treturn &tmp\n}\n\nfunc decBytes(p []byte) []byte {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn make([]byte, 0)\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\treturn tmp\n}\n\nfunc decBytesR(p []byte) *[]byte {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\ttmp := make([]byte, 0)\n\t\treturn &tmp\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\treturn &tmp\n}\n"
  },
  {
    "path": "serialization/blob/unmarshal_utils_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage blob\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n)\n\nfunc TestDecBytesArrayBackedSlice(t *testing.T) {\n\tt.Parallel()\n\n\tdata := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C}\n\n\tvar arr [12]byte\n\tslice := arr[:]\n\n\tif err := DecBytes(data, &slice); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !bytes.Equal(arr[:], data) {\n\t\tt.Fatalf(\"expected underlying array to be %v, got %v\", data, arr)\n\t}\n\tif !bytes.Equal(slice, data) {\n\t\tt.Fatalf(\"expected slice to be %v, got %v\", data, slice)\n\t}\n}\n\nfunc TestDecBytesArrayBackedSliceViaUnmarshal(t *testing.T) {\n\tt.Parallel()\n\n\ttype ObjectID [12]byte\n\n\tdata := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C}\n\tvar id ObjectID\n\tpkSlice := id[:]\n\n\tif err := Unmarshal(data, &pkSlice); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !bytes.Equal(id[:], data) {\n\t\tt.Fatalf(\"expected underlying array to be %v, got %v\", data, id)\n\t}\n}\n\nfunc TestDecBytesNil(t *testing.T) {\n\tt.Parallel()\n\n\texisting := []byte{1, 2, 3}\n\tif err := DecBytes(nil, &existing); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif existing != nil {\n\t\tt.Fatalf(\"expected nil, got %v\", existing)\n\t}\n}\n\nfunc TestDecBytesEmpty(t *testing.T) {\n\tt.Parallel()\n\n\tvar dest []byte\n\tif err := DecBytes(make([]byte, 0), &dest); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif dest == nil {\n\t\tt.Fatal(\"expected non-nil empty slice for non-nil empty input\")\n\t}\n\tif len(dest) != 0 {\n\t\tt.Fatalf(\"expected empty slice, got %v\", dest)\n\t}\n}\n\nfunc TestDecBytesPreallocated(t *testing.T) {\n\tt.Parallel()\n\n\tdata := []byte{0xAA, 0xBB, 0xCC}\n\tdest := make([]byte, 5)\n\tif err := DecBytes(data, &dest); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(dest, data) {\n\t\tt.Fatalf(\"expected %v, got %v\", data, dest)\n\t}\n}\n\nfunc TestDecBytesNilReference(t *testing.T) {\n\tt.Parallel()\n\n\tif err := DecBytes([]byte{1}, nil); err == nil {\n\t\tt.Fatal(\"expected error for nil reference\")\n\t}\n}\n"
  },
  {
    "path": "serialization/boolean/marshal.go",
    "content": "package boolean\n\nimport (\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase bool:\n\t\treturn EncBool(v)\n\tcase *bool:\n\t\treturn EncBoolR(v)\n\tdefault:\n\t\t// Custom types (type MyBool bool) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/boolean/marshal_utils.go",
    "content": "package boolean\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc EncBool(v bool) ([]byte, error) {\n\treturn encBool(v), nil\n}\n\nfunc EncBoolR(v *bool) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encBool(*v), nil\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Bool:\n\t\treturn encBool(v.Bool()), nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal boolean: unsupported value type (%T)(%[1]v), supported types: ~bool, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal boolean: unsupported value type (%T)(%[1]v), supported types: ~bool, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encBool(v bool) []byte {\n\tif v {\n\t\treturn []byte{1}\n\t}\n\treturn []byte{0}\n}\n"
  },
  {
    "path": "serialization/boolean/unmarshal.go",
    "content": "package boolean\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *bool:\n\t\treturn DecBool(data, v)\n\tcase **bool:\n\t\treturn DecBoolR(data, v)\n\tdefault:\n\t\t// Custom types (type MyBool bool) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal boolean: unsupported value type (%T)(%[1]v)\", v)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/boolean/unmarshal_utils.go",
    "content": "package boolean\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nvar errWrongDataLen = fmt.Errorf(\"failed to unmarshal boolean: the length of the data should be 0 or 1\")\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal boolean: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecBool(p []byte, v *bool) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = false\n\tcase 1:\n\t\t*v = decBool(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBoolR(p []byte, v **bool) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(bool)\n\t\t}\n\tcase 1:\n\t\tval := decBool(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Bool:\n\t\treturn decReflectBool(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal boolean: unsupported value type (%T)(%[1]v), supported types: ~bool\", v.Interface())\n\t}\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Bool:\n\t\treturn decReflectBoolR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal boolean: unsupported value type (%T)(%[1]v), supported types: ~bool\", v.Interface())\n\t}\n}\n\nfunc decReflectBool(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetBool(false)\n\tcase 1:\n\t\tv.SetBool(decBool(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectBoolR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Type().Elem()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\t\tv.Elem().Set(val)\n\t\t}\n\tcase 1:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetBool(decBool(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decBool(p []byte) bool {\n\treturn p[0] != 0\n}\n"
  },
  {
    "path": "serialization/counter/marshal.go",
    "content": "package counter\n\nimport (\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase int8:\n\t\treturn EncInt8(v)\n\tcase int16:\n\t\treturn EncInt16(v)\n\tcase int32:\n\t\treturn EncInt32(v)\n\tcase int64:\n\t\treturn EncInt64(v)\n\tcase int:\n\t\treturn EncInt(v)\n\n\tcase uint8:\n\t\treturn EncUint8(v)\n\tcase uint16:\n\t\treturn EncUint16(v)\n\tcase uint32:\n\t\treturn EncUint32(v)\n\tcase uint64:\n\t\treturn EncUint64(v)\n\tcase uint:\n\t\treturn EncUint(v)\n\n\tcase big.Int:\n\t\treturn EncBigInt(v)\n\tcase string:\n\t\treturn EncString(v)\n\n\tcase *int8:\n\t\treturn EncInt8R(v)\n\tcase *int16:\n\t\treturn EncInt16R(v)\n\tcase *int32:\n\t\treturn EncInt32R(v)\n\tcase *int64:\n\t\treturn EncInt64R(v)\n\tcase *int:\n\t\treturn EncIntR(v)\n\n\tcase *uint8:\n\t\treturn EncUint8R(v)\n\tcase *uint16:\n\t\treturn EncUint16R(v)\n\tcase *uint32:\n\t\treturn EncUint32R(v)\n\tcase *uint64:\n\t\treturn EncUint64R(v)\n\tcase *uint:\n\t\treturn EncUintR(v)\n\n\tcase *big.Int:\n\t\treturn EncBigIntR(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tdefault:\n\t\t// Custom types (type MyInt int) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/counter/marshal_utils.go",
    "content": "package counter\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nconst supportedTypes = \"~int8, ~int16, ~int32, ~int64, ~int, ~uint8, ~uint16, ~uint32, ~uint64, ~uint, ~string, big.Int\"\n\nfunc EncInt8(v int8) ([]byte, error) {\n\tif v < 0 {\n\t\treturn []byte{255, 255, 255, 255, 255, 255, 255, byte(v)}, nil\n\t}\n\treturn []byte{0, 0, 0, 0, 0, 0, 0, byte(v)}, nil\n}\n\nfunc EncInt8R(v *int8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt8(*v)\n}\n\nfunc EncInt16(v int16) ([]byte, error) {\n\tif v < 0 {\n\t\treturn []byte{255, 255, 255, 255, 255, 255, byte(v >> 8), byte(v)}, nil\n\t}\n\treturn []byte{0, 0, 0, 0, 0, 0, byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncInt16R(v *int16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt16(*v)\n}\n\nfunc EncInt32(v int32) ([]byte, error) {\n\tif v < 0 {\n\t\treturn []byte{255, 255, 255, 255, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n\t}\n\treturn []byte{0, 0, 0, 0, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncInt32R(v *int32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt32(*v)\n}\n\nfunc EncInt64(v int64) ([]byte, error) {\n\treturn encInt64(v), nil\n}\n\nfunc EncInt64R(v *int64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt64(*v)\n}\n\nfunc EncInt(v int) ([]byte, error) {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncIntR(v *int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt(*v)\n}\n\nfunc EncUint8(v uint8) ([]byte, error) {\n\treturn []byte{0, 0, 0, 0, 0, 0, 0, v}, nil\n}\n\nfunc EncUint8R(v *uint8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint8(*v)\n}\n\nfunc EncUint16(v uint16) ([]byte, error) {\n\treturn []byte{0, 0, 0, 0, 0, 0, byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUint16R(v *uint16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint16(*v)\n}\n\nfunc EncUint32(v uint32) ([]byte, error) {\n\treturn []byte{0, 0, 0, 0, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUint32R(v *uint32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint32(*v)\n}\n\nfunc EncUint64(v uint64) ([]byte, error) {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUint64R(v *uint64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint64(*v)\n}\n\nfunc EncUint(v uint) ([]byte, error) {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUintR(v *uint) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint(*v)\n}\n\nfunc EncBigInt(v big.Int) ([]byte, error) {\n\tif !v.IsInt64() {\n\t\treturn nil, fmt.Errorf(\"failed to marshal counter: value (%T)(%s) out of range\", v, v.String())\n\t}\n\treturn encInt64(v.Int64()), nil\n}\n\nfunc EncBigIntR(v *big.Int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\tif !v.IsInt64() {\n\t\treturn nil, fmt.Errorf(\"failed to marshal counter: value (%T)(%s) out of range\", v, v.String())\n\t}\n\treturn encInt64(v.Int64()), nil\n}\n\nfunc EncString(v string) ([]byte, error) {\n\tif v == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tn, err := strconv.ParseInt(v, 10, 64)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal counter: can not marshal %#v %s\", v, err)\n\t}\n\treturn encInt64(n), nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncString(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8:\n\t\treturn EncInt64(v.Int())\n\tcase reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8:\n\t\treturn EncUint64(v.Uint())\n\tcase reflect.String:\n\t\tval := v.String()\n\t\tif val == \"\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\tn, err := strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal counter: can not marshal (%T)(%[1]v) %s\", v.Interface(), err)\n\t\t}\n\t\treturn encInt64(n), nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal counter: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal counter: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encInt64(v int64) []byte {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n"
  },
  {
    "path": "serialization/counter/unmarshal.go",
    "content": "package counter\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\n\tcase *int8:\n\t\treturn DecInt8(data, v)\n\tcase *int16:\n\t\treturn DecInt16(data, v)\n\tcase *int32:\n\t\treturn DecInt32(data, v)\n\tcase *int64:\n\t\treturn DecInt64(data, v)\n\tcase *int:\n\t\treturn DecInt(data, v)\n\n\tcase *uint8:\n\t\treturn DecUint8(data, v)\n\tcase *uint16:\n\t\treturn DecUint16(data, v)\n\tcase *uint32:\n\t\treturn DecUint32(data, v)\n\tcase *uint64:\n\t\treturn DecUint64(data, v)\n\tcase *uint:\n\t\treturn DecUint(data, v)\n\n\tcase *big.Int:\n\t\treturn DecBigInt(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\n\tcase **int8:\n\t\treturn DecInt8R(data, v)\n\tcase **int16:\n\t\treturn DecInt16R(data, v)\n\tcase **int32:\n\t\treturn DecInt32R(data, v)\n\tcase **int64:\n\t\treturn DecInt64R(data, v)\n\tcase **int:\n\t\treturn DecIntR(data, v)\n\n\tcase **uint8:\n\t\treturn DecUint8R(data, v)\n\tcase **uint16:\n\t\treturn DecUint16R(data, v)\n\tcase **uint32:\n\t\treturn DecUint32R(data, v)\n\tcase **uint64:\n\t\treturn DecUint64R(data, v)\n\tcase **uint:\n\t\treturn DecUintR(data, v)\n\n\tcase **big.Int:\n\t\treturn DecBigIntR(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tdefault:\n\n\t\t// Custom types (type MyInt int) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: unsupported value type (%T)(%[1]v), supported types: %s\", value, supportedTypes)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/counter/unmarshal_utils.go",
    "content": "package counter\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nvar errWrongDataLen = fmt.Errorf(\"failed to unmarshal counter: the length of the data should be 0 or 8\")\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal counter: can not unmarshal into nil reference (%T)(%[1]v))\", v)\n}\n\nfunc DecInt8(p []byte, v *int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into int8, the data should be in the int8 range\")\n\t\t}\n\t\t*v = int8(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt8R(p []byte, v **int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int8)\n\t\t}\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into int8, the data should be in the int8 range\")\n\t\t}\n\t\ttmp := int8(val)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt16(p []byte, v *int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into int16, the data should be in the int16 range\")\n\t\t}\n\t\t*v = int16(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt16R(p []byte, v **int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int16)\n\t\t}\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into int16, the data should be in the int16 range\")\n\t\t}\n\t\ttmp := int16(val)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32(p []byte, v *int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt32 || val < math.MinInt32 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into int32, the data should be in the int32 range\")\n\t\t}\n\t\t*v = int32(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32R(p []byte, v **int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int32)\n\t\t}\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt32 || val < math.MinInt32 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into int32, the data should be in the int32 range\")\n\t\t}\n\t\ttmp := int32(val)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64(p []byte, v *int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = decInt64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64R(p []byte, v **int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int64)\n\t\t}\n\tcase 8:\n\t\tval := decInt64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt(p []byte, v *int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = int(p[0])<<56 | int(p[1])<<48 | int(p[2])<<40 | int(p[3])<<32 | int(p[4])<<24 | int(p[5])<<16 | int(p[6])<<8 | int(p[7])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecIntR(p []byte, v **int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int)\n\t\t}\n\tcase 8:\n\t\tval := int(p[0])<<56 | int(p[1])<<48 | int(p[2])<<40 | int(p[3])<<32 | int(p[4])<<24 | int(p[5])<<16 | int(p[6])<<8 | int(p[7])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint8(p []byte, v *uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 || p[6] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into uint8, the data should be in the uint8 range\")\n\t\t}\n\t\t*v = p[7]\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint8R(p []byte, v **uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint8)\n\t\t}\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 || p[6] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into uint8, the data should be in the uint8 range\")\n\t\t}\n\t\tval := p[7]\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint16(p []byte, v *uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into uint16, the data should be in the uint16 range\")\n\t\t}\n\t\t*v = uint16(p[6])<<8 | uint16(p[7])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint16R(p []byte, v **uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint16)\n\t\t}\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into uint16, the data should be in the uint16 range\")\n\t\t}\n\t\tval := uint16(p[6])<<8 | uint16(p[7])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32(p []byte, v *uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into uint32, the data should be in the uint32 range\")\n\t\t}\n\t\t*v = uint32(p[4])<<24 | uint32(p[5])<<16 | uint32(p[6])<<8 | uint32(p[7])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32R(p []byte, v **uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint32)\n\t\t}\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into uint32, the data should be in the uint32 range\")\n\t\t}\n\t\tval := uint32(p[4])<<24 | uint32(p[5])<<16 | uint32(p[6])<<8 | uint32(p[7])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint64(p []byte, v *uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = decUint64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint64R(p []byte, v **uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint64)\n\t\t}\n\tcase 8:\n\t\tval := decUint64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint(p []byte, v *uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = uint(p[0])<<56 | uint(p[1])<<48 | uint(p[2])<<40 | uint(p[3])<<32 | uint(p[4])<<24 | uint(p[5])<<16 | uint(p[6])<<8 | uint(p[7])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUintR(p []byte, v **uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint)\n\t\t}\n\tcase 8:\n\t\tval := uint(p[0])<<56 | uint(p[1])<<48 | uint(p[2])<<40 | uint(p[3])<<32 | uint(p[4])<<24 | uint(p[5])<<16 | uint(p[6])<<8 | uint(p[7])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = \"0\"\n\t\t}\n\tcase 8:\n\t\t*v = strconv.FormatInt(decInt64(p), 10)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\tval := \"0\"\n\t\t\t*v = &val\n\t\t}\n\tcase 8:\n\t\tval := strconv.FormatInt(decInt64(p), 10)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBigInt(p []byte, v *big.Int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt64(0)\n\tcase 8:\n\t\tv.SetInt64(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBigIntR(p []byte, v **big.Int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(big.Int)\n\t\t}\n\tcase 8:\n\t\t*v = big.NewInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal counter: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Int8:\n\t\treturn decReflectInt8(p, v)\n\tcase reflect.Int16:\n\t\treturn decReflectInt16(p, v)\n\tcase reflect.Int32:\n\t\treturn decReflectInt32(p, v)\n\tcase reflect.Int64, reflect.Int:\n\t\treturn decReflectInts(p, v)\n\tcase reflect.Uint8:\n\t\treturn decReflectUint8(p, v)\n\tcase reflect.Uint16:\n\t\treturn decReflectUint16(p, v)\n\tcase reflect.Uint32:\n\t\treturn decReflectUint32(p, v)\n\tcase reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUints(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal counter: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc decReflectInt8(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the int8 range\", v.Interface())\n\t\t}\n\t\tv.SetInt(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt16(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the int16 range\", v.Interface())\n\t\t}\n\t\tv.SetInt(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt32(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt32 || val < math.MinInt32 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the int32 range\", v.Interface())\n\t\t}\n\t\tv.SetInt(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInts(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 8:\n\t\tv.SetInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint8(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 || p[6] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the uint8 range\", v.Interface())\n\t\t}\n\t\tv.SetUint(uint64(p[7]))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint16(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the uint16 range\", v.Interface())\n\t\t}\n\t\tv.SetUint(uint64(p[6])<<8 | uint64(p[7]))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint32(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 8:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the uint32 range\", v.Interface())\n\t\t}\n\t\tv.SetUint(uint64(p[4])<<24 | uint64(p[5])<<16 | uint64(p[6])<<8 | uint64(p[7]))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUints(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 8:\n\t\tv.SetUint(decUint64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(\"0\")\n\t\t}\n\tcase 8:\n\t\tv.SetString(strconv.FormatInt(decInt64(p), 10))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal counter: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Int8:\n\t\treturn decReflectInt8R(p, v)\n\tcase reflect.Int16:\n\t\treturn decReflectInt16R(p, v)\n\tcase reflect.Int32:\n\t\treturn decReflectInt32R(p, v)\n\tcase reflect.Int64, reflect.Int:\n\t\treturn decReflectIntsR(p, v)\n\tcase reflect.Uint8:\n\t\treturn decReflectUint8R(p, v)\n\tcase reflect.Uint16:\n\t\treturn decReflectUint16R(p, v)\n\tcase reflect.Uint32:\n\t\treturn decReflectUint32R(p, v)\n\tcase reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUintsR(p, v)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal counter: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc decReflectInt8R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the int8 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(val)\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt16R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the int16 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(val)\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt32R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt32 || val < math.MinInt32 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the int32 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(val)\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectIntsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetInt(decInt64(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint8R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 || p[6] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the uint8 range\", v.Interface())\n\t\t}\n\t\tnewVal.Elem().SetUint(uint64(p[7]))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint16R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 || p[4] != 0 || p[5] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the uint16 range\", v.Interface())\n\t\t}\n\t\tnewVal.Elem().SetUint(uint64(p[6])<<8 | uint64(p[7]))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint32R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 || p[3] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal counter: to unmarshal into %T, the data should be in the uint32 range\", v.Interface())\n\t\t}\n\t\tnewVal.Elem().SetUint(uint64(p[4])<<24 | uint64(p[5])<<16 | uint64(p[6])<<8 | uint64(p[7]))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUintsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetUint(decUint64(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetString(\"0\")\n\t\t}\n\t\tv.Elem().Set(val)\n\tcase 8:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetString(strconv.FormatInt(decInt64(p), 10))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectNullableR(p []byte, v reflect.Value) reflect.Value {\n\tif p == nil {\n\t\treturn reflect.Zero(v.Elem().Type())\n\t}\n\treturn reflect.New(v.Type().Elem().Elem())\n}\n\nfunc decInt64(p []byte) int64 {\n\treturn int64(p[0])<<56 | int64(p[1])<<48 | int64(p[2])<<40 | int64(p[3])<<32 | int64(p[4])<<24 | int64(p[5])<<16 | int64(p[6])<<8 | int64(p[7])\n}\n\nfunc decUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<56 | uint64(p[1])<<48 | uint64(p[2])<<40 | uint64(p[3])<<32 | uint64(p[4])<<24 | uint64(p[5])<<16 | uint64(p[6])<<8 | uint64(p[7])\n}\n"
  },
  {
    "path": "serialization/cqlint/marshal.go",
    "content": "package cqlint\n\nimport (\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase int8:\n\t\treturn EncInt8(v)\n\tcase int32:\n\t\treturn EncInt32(v)\n\tcase int16:\n\t\treturn EncInt16(v)\n\tcase int64:\n\t\treturn EncInt64(v)\n\tcase int:\n\t\treturn EncInt(v)\n\n\tcase uint8:\n\t\treturn EncUint8(v)\n\tcase uint16:\n\t\treturn EncUint16(v)\n\tcase uint32:\n\t\treturn EncUint32(v)\n\tcase uint64:\n\t\treturn EncUint64(v)\n\tcase uint:\n\t\treturn EncUint(v)\n\n\tcase big.Int:\n\t\treturn EncBigInt(v)\n\tcase string:\n\t\treturn EncString(v)\n\n\tcase *int8:\n\t\treturn EncInt8R(v)\n\tcase *int16:\n\t\treturn EncInt16R(v)\n\tcase *int32:\n\t\treturn EncInt32R(v)\n\tcase *int64:\n\t\treturn EncInt64R(v)\n\tcase *int:\n\t\treturn EncIntR(v)\n\n\tcase *uint8:\n\t\treturn EncUint8R(v)\n\tcase *uint16:\n\t\treturn EncUint16R(v)\n\tcase *uint32:\n\t\treturn EncUint32R(v)\n\tcase *uint64:\n\t\treturn EncUint64R(v)\n\tcase *uint:\n\t\treturn EncUintR(v)\n\n\tcase *big.Int:\n\t\treturn EncBigIntR(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tdefault:\n\t\t// Custom types (type MyInt int) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/cqlint/marshal_utils.go",
    "content": "package cqlint\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nconst supportedTypes = \"~int8, ~int16, ~int32, ~int64, ~int, ~uint8, ~uint16, ~uint32, ~uint64, ~uint, ~string, big.Int\"\n\nvar (\n\tmaxBigInt = big.NewInt(math.MaxInt32)\n\tminBigInt = big.NewInt(math.MinInt32)\n)\n\nfunc EncInt8(v int8) ([]byte, error) {\n\tif v < 0 {\n\t\treturn []byte{255, 255, 255, byte(v)}, nil\n\t}\n\treturn []byte{0, 0, 0, byte(v)}, nil\n}\n\nfunc EncInt8R(v *int8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt8(*v)\n}\n\nfunc EncInt16(v int16) ([]byte, error) {\n\tif v < 0 {\n\t\treturn []byte{255, 255, byte(v >> 8), byte(v)}, nil\n\t}\n\treturn []byte{0, 0, byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncInt16R(v *int16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt16(*v)\n}\n\nfunc EncInt32(v int32) ([]byte, error) {\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncInt32R(v *int32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt32(*v)\n}\n\nfunc EncInt64(v int64) ([]byte, error) {\n\tif v > math.MaxInt32 || v < math.MinInt32 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal int: value %#v out of range\", v)\n\t}\n\treturn encInt64(v), nil\n}\n\nfunc EncInt64R(v *int64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt64(*v)\n}\n\nfunc EncInt(v int) ([]byte, error) {\n\tif v > math.MaxInt32 || v < math.MinInt32 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal int: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncIntR(v *int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt(*v)\n}\n\nfunc EncUint8(v uint8) ([]byte, error) {\n\treturn []byte{0, 0, 0, v}, nil\n}\n\nfunc EncUint8R(v *uint8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint8(*v)\n}\n\nfunc EncUint16(v uint16) ([]byte, error) {\n\treturn []byte{0, 0, byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUint16R(v *uint16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint16(*v)\n}\n\nfunc EncUint32(v uint32) ([]byte, error) {\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUint32R(v *uint32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint32(*v)\n}\n\nfunc EncUint64(v uint64) ([]byte, error) {\n\tif v > math.MaxUint32 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal int: value %#v out of range\", v)\n\t}\n\treturn encUint64(v), nil\n}\n\nfunc EncUint64R(v *uint64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint64(*v)\n}\n\nfunc EncUint(v uint) ([]byte, error) {\n\tif v > math.MaxUint32 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal int: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUintR(v *uint) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint(*v)\n}\n\nfunc EncBigInt(v big.Int) ([]byte, error) {\n\tif v.Cmp(maxBigInt) == 1 || v.Cmp(minBigInt) == -1 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal int: value (%T)(%s) out of range\", v, v.String())\n\t}\n\treturn encInt64(v.Int64()), nil\n}\n\nfunc EncBigIntR(v *big.Int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\tif v.Cmp(maxBigInt) == 1 || v.Cmp(minBigInt) == -1 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal int: value (%T)(%s) out of range\", v, v.String())\n\t}\n\treturn encInt64(v.Int64()), nil\n}\n\nfunc EncString(v string) ([]byte, error) {\n\tif v == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tn, err := strconv.ParseInt(v, 10, 32)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal int: can not marshal (%T)(%[1]v) %s\", v, err)\n\t}\n\treturn encInt64(n), nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncString(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Type().Kind() {\n\tcase reflect.Int8:\n\t\tval := v.Int()\n\t\tif val < 0 {\n\t\t\treturn []byte{255, 255, 255, byte(val)}, nil\n\t\t}\n\t\treturn []byte{0, 0, 0, byte(val)}, nil\n\tcase reflect.Int16:\n\t\tval := v.Int()\n\t\tif val < 0 {\n\t\t\treturn []byte{255, 255, byte(val >> 8), byte(val)}, nil\n\t\t}\n\t\treturn []byte{0, 0, byte(val >> 8), byte(val)}, nil\n\tcase reflect.Int32:\n\t\treturn encInt64(v.Int()), nil\n\tcase reflect.Int, reflect.Int64:\n\t\tval := v.Int()\n\t\tif val > math.MaxInt32 || val < math.MinInt32 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal int: value (%T)(%[1]v) out of range\", v.Interface())\n\t\t}\n\t\treturn encInt64(val), nil\n\tcase reflect.Uint8:\n\t\treturn []byte{0, 0, 0, byte(v.Uint())}, nil\n\tcase reflect.Uint16:\n\t\tval := v.Uint()\n\t\treturn []byte{0, 0, byte(val >> 8), byte(val)}, nil\n\tcase reflect.Uint32:\n\t\treturn encUint64(v.Uint()), nil\n\tcase reflect.Uint, reflect.Uint64:\n\t\tval := v.Uint()\n\t\tif val > math.MaxUint32 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal int: value (%T)(%[1]v) out of range\", v.Interface())\n\t\t}\n\t\treturn encUint64(val), nil\n\tcase reflect.String:\n\t\tval := v.String()\n\t\tif val == \"\" {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tn, err := strconv.ParseInt(val, 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal int: can not marshal (%T)(%[1]v) %s\", v.Interface(), err)\n\t\t}\n\t\treturn encInt64(n), nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal int: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal int: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encInt64(v int64) []byte {\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n\nfunc encUint64(v uint64) []byte {\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n"
  },
  {
    "path": "serialization/cqlint/unmarshal.go",
    "content": "package cqlint\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\n\tcase *int8:\n\t\treturn DecInt8(data, v)\n\tcase *int16:\n\t\treturn DecInt16(data, v)\n\tcase *int32:\n\t\treturn DecInt32(data, v)\n\tcase *int64:\n\t\treturn DecInt64(data, v)\n\tcase *int:\n\t\treturn DecInt(data, v)\n\n\tcase *uint8:\n\t\treturn DecUint8(data, v)\n\tcase *uint16:\n\t\treturn DecUint16(data, v)\n\tcase *uint32:\n\t\treturn DecUint32(data, v)\n\tcase *uint64:\n\t\treturn DecUint64(data, v)\n\tcase *uint:\n\t\treturn DecUint(data, v)\n\n\tcase *big.Int:\n\t\treturn DecBigInt(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\n\tcase **int8:\n\t\treturn DecInt8R(data, v)\n\tcase **int16:\n\t\treturn DecInt16R(data, v)\n\tcase **int32:\n\t\treturn DecInt32R(data, v)\n\tcase **int64:\n\t\treturn DecInt64R(data, v)\n\tcase **int:\n\t\treturn DecIntR(data, v)\n\n\tcase **uint8:\n\t\treturn DecUint8R(data, v)\n\tcase **uint16:\n\t\treturn DecUint16R(data, v)\n\tcase **uint32:\n\t\treturn DecUint32R(data, v)\n\tcase **uint64:\n\t\treturn DecUint64R(data, v)\n\tcase **uint:\n\t\treturn DecUintR(data, v)\n\n\tcase **big.Int:\n\t\treturn DecBigIntR(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tdefault:\n\n\t\t// Custom types (type MyInt int) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: unsupported value type (%T)(%[1]v), supported types: %s\", value, supportedTypes)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/cqlint/unmarshal_utils.go",
    "content": "package cqlint\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nconst (\n\tnegInt64 = int64(-1) << 32\n\tnegInt   = int(-1) << 32\n)\n\nvar errWrongDataLen = fmt.Errorf(\"failed to unmarshal int: the length of the data should be 0 or 4\")\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal int: can not unmarshal into nil reference (%T)(%[1]v))\", v)\n}\n\nfunc DecInt8(p []byte, v *int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\tval := decInt32(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into int8, the data should be in the int8 range\")\n\t\t}\n\t\t*v = int8(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt8R(p []byte, v **int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int8)\n\t\t}\n\tcase 4:\n\t\tval := decInt32(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into int8, the data should be in the int8 range\")\n\t\t}\n\t\ttmp := int8(val)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt16(p []byte, v *int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\tval := decInt32(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into int16, the data should be in the int16 range\")\n\t\t}\n\t\t*v = int16(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt16R(p []byte, v **int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int16)\n\t\t}\n\tcase 4:\n\t\tval := decInt32(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into int16, the data should be in the int16 range\")\n\t\t}\n\t\ttmp := int16(val)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32(p []byte, v *int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\t*v = decInt32(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32R(p []byte, v **int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int32)\n\t\t}\n\tcase 4:\n\t\ttmp := decInt32(p)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64(p []byte, v *int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\t*v = decInt64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64R(p []byte, v **int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int64)\n\t\t}\n\tcase 4:\n\t\tval := decInt64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt(p []byte, v *int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\t*v = decInt(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecIntR(p []byte, v **int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int)\n\t\t}\n\tcase 4:\n\t\tval := decInt(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint8(p []byte, v *uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into uint8, the data should be in the uint8 range\")\n\t\t}\n\t\t*v = p[3]\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint8R(p []byte, v **uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint8)\n\t\t}\n\tcase 4:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into uint8, the data should be in the uint8 range\")\n\t\t}\n\t\tval := p[3]\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint16(p []byte, v *uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\tif p[0] != 0 || p[1] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into uint16, the data should be in the uint16 range\")\n\t\t}\n\t\t*v = uint16(p[2])<<8 | uint16(p[3])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint16R(p []byte, v **uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint16)\n\t\t}\n\tcase 4:\n\t\tif p[0] != 0 || p[1] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into uint16, the data should be in the uint16 range\")\n\t\t}\n\t\tval := uint16(p[2])<<8 | uint16(p[3])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32(p []byte, v *uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\t*v = uint32(p[0])<<24 | uint32(p[1])<<16 | uint32(p[2])<<8 | uint32(p[3])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32R(p []byte, v **uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint32)\n\t\t}\n\tcase 4:\n\t\tval := uint32(p[0])<<24 | uint32(p[1])<<16 | uint32(p[2])<<8 | uint32(p[3])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint64(p []byte, v *uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\t*v = decUint64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint64R(p []byte, v **uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint64)\n\t\t}\n\tcase 4:\n\t\tval := decUint64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint(p []byte, v *uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\t*v = uint(p[0])<<24 | uint(p[1])<<16 | uint(p[2])<<8 | uint(p[3])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUintR(p []byte, v **uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint)\n\t\t}\n\tcase 4:\n\t\tval := uint(p[0])<<24 | uint(p[1])<<16 | uint(p[2])<<8 | uint(p[3])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = \"0\"\n\t\t}\n\tcase 4:\n\t\t*v = strconv.FormatInt(decInt64(p), 10)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\tval := \"0\"\n\t\t\t*v = &val\n\t\t}\n\tcase 4:\n\t\tval := strconv.FormatInt(decInt64(p), 10)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBigInt(p []byte, v *big.Int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt64(0)\n\tcase 4:\n\t\tv.SetInt64(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBigIntR(p []byte, v **big.Int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = big.NewInt(0)\n\t\t}\n\tcase 4:\n\t\t*v = big.NewInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal int: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Int8:\n\t\treturn decReflectInt8(p, v)\n\tcase reflect.Int16:\n\t\treturn decReflectInt16(p, v)\n\tcase reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn decReflectInts(p, v)\n\tcase reflect.Uint8:\n\t\treturn decReflectUint8(p, v)\n\tcase reflect.Uint16:\n\t\treturn decReflectUint16(p, v)\n\tcase reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUints(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal int: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal int: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Int8:\n\t\treturn decReflectInt8R(p, v)\n\tcase reflect.Int16:\n\t\treturn decReflectInt16R(p, v)\n\tcase reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn decReflectIntsR(p, v)\n\tcase reflect.Uint8:\n\t\treturn decReflectUint8R(p, v)\n\tcase reflect.Uint16:\n\t\treturn decReflectUint16R(p, v)\n\tcase reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUintsR(p, v)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal int: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc decReflectInt8(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 4:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into (%T), the data should be in the int8 range\", v.Interface())\n\t\t}\n\t\tv.SetInt(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt16(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 4:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into (%T), the data should be in the int16 range\", v.Interface())\n\t\t}\n\t\tv.SetInt(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInts(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 4:\n\t\tv.SetInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint8(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 4:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into (%T), the data should be in the uint8 range\", v.Interface())\n\t\t}\n\t\tv.SetUint(uint64(p[3]))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint16(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 4:\n\t\tif p[0] != 0 || p[1] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into (%T), the data should be in the uint16 range\", v.Interface())\n\t\t}\n\t\tv.SetUint(uint64(p[2])<<8 | uint64(p[3]))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUints(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 4:\n\t\tv.SetUint(decUint64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(\"0\")\n\t\t}\n\tcase 4:\n\t\tv.SetString(strconv.FormatInt(decInt64(p), 10))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectNullableR(p []byte, v reflect.Value) reflect.Value {\n\tif p == nil {\n\t\treturn reflect.Zero(v.Elem().Type())\n\t}\n\treturn reflect.New(v.Type().Elem().Elem())\n}\n\nfunc decReflectInt8R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 4:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into (%T), the data should be in the int8 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(val)\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt16R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 4:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into (%T), the data should be in the int16 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(val)\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectIntsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 4:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(decInt64(p))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint8R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 4:\n\t\tif p[0] != 0 || p[1] != 0 || p[2] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into (%T), the data should be in the uint8 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(uint64(p[3]))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint16R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 4:\n\t\tif p[0] != 0 || p[1] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal int: to unmarshal into (%T), the data should be in the uint16 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(uint64(p[2])<<8 | uint64(p[3]))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUintsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 4:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(decUint64(p))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetString(\"0\")\n\t\t}\n\t\tv.Elem().Set(val)\n\tcase 4:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(strconv.FormatInt(decInt64(p), 10))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decInt32(p []byte) int32 {\n\treturn int32(p[0])<<24 | int32(p[1])<<16 | int32(p[2])<<8 | int32(p[3])\n}\n\nfunc decInt64(p []byte) int64 {\n\tif p[0] > math.MaxInt8 {\n\t\treturn negInt64 | int64(p[0])<<24 | int64(p[1])<<16 | int64(p[2])<<8 | int64(p[3])\n\t}\n\treturn int64(p[0])<<24 | int64(p[1])<<16 | int64(p[2])<<8 | int64(p[3])\n}\n\nfunc decInt(p []byte) int {\n\tif p[0] > math.MaxInt8 {\n\t\treturn negInt | int(p[0])<<24 | int(p[1])<<16 | int(p[2])<<8 | int(p[3])\n\t}\n\treturn int(p[0])<<24 | int(p[1])<<16 | int(p[2])<<8 | int(p[3])\n}\n\nfunc decUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<24 | uint64(p[1])<<16 | uint64(p[2])<<8 | uint64(p[3])\n}\n"
  },
  {
    "path": "serialization/cqltime/marshal.go",
    "content": "package cqltime\n\nimport (\n\t\"reflect\"\n\t\"time\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase int64:\n\t\treturn EncInt64(v)\n\tcase *int64:\n\t\treturn EncInt64R(v)\n\tcase time.Duration:\n\t\treturn EncDuration(v)\n\tcase *time.Duration:\n\t\treturn EncDurationR(v)\n\n\tdefault:\n\t\t// Custom types (type MyTime int64) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/cqltime/marshal_utils.go",
    "content": "package cqltime\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nconst (\n\tmaxValInt64 int64         = 86399999999999\n\tminValInt64 int64         = 0\n\tmaxValDur   time.Duration = 86399999999999\n\tminValDur   time.Duration = 0\n)\n\nvar (\n\terrOutRangeInt64 = fmt.Errorf(\"failed to marshal time: the (int64) should be in the range 0 to 86399999999999\")\n\terrOutRangeDur   = fmt.Errorf(\"failed to marshal time: the (time.Duration) should be in the range 0 to 86399999999999\")\n)\n\nfunc EncInt64(v int64) ([]byte, error) {\n\tif v > maxValInt64 || v < minValInt64 {\n\t\treturn nil, errOutRangeInt64\n\t}\n\treturn encInt64(v), nil\n}\n\nfunc EncInt64R(v *int64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt64(*v)\n}\n\nfunc EncDuration(v time.Duration) ([]byte, error) {\n\tif v > maxValDur || v < minValDur {\n\t\treturn nil, errOutRangeDur\n\t}\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncDurationR(v *time.Duration) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncDuration(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Int64:\n\t\tval := v.Int()\n\t\tif val > maxValInt64 || val < minValInt64 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal time: the (%T) should be in the range 0 to 86399999999999\", v.Interface())\n\t\t}\n\t\treturn encInt64(val), nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal time: unsupported value type (%T)(%[1]v), supported types: ~int64, time.Duration, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal time: unsupported value type (%T)(%[1]v), supported types: ~int64, time.Duration, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encInt64(v int64) []byte {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n"
  },
  {
    "path": "serialization/cqltime/unmarshal.go",
    "content": "package cqltime\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\n\tcase *int64:\n\t\treturn DecInt64(data, v)\n\tcase **int64:\n\t\treturn DecInt64R(data, v)\n\tcase *time.Duration:\n\t\treturn DecDuration(data, v)\n\tcase **time.Duration:\n\t\treturn DecDurationR(data, v)\n\tdefault:\n\n\t\t// Custom types (type MyTime int64) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal time: unsupported value type (%T)(%[1]v), supported types: ~int64, time.Duration\", value)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/cqltime/unmarshal_utils.go",
    "content": "package cqltime\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nvar (\n\terrWrongDataLen      = fmt.Errorf(\"failed to unmarshal time: the length of the data should be 0 or 8\")\n\terrDataOutRangeInt64 = fmt.Errorf(\"failed to unmarshal time: (int64) the data should be in the range 0 to 86399999999999\")\n\terrDataOutRangeDur   = fmt.Errorf(\"failed to unmarshal time: (time.Duration) the data should be in the range 0 to 86399999999999\")\n)\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal time: can not unmarshal into nil reference (%T)(%[1]v))\", v)\n}\n\nfunc DecInt64(p []byte, v *int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = decInt64(p)\n\t\tif *v > maxValInt64 || *v < minValInt64 {\n\t\t\treturn errDataOutRangeInt64\n\t\t}\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64R(p []byte, v **int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int64)\n\t\t}\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > maxValInt64 || val < minValInt64 {\n\t\t\treturn errDataOutRangeInt64\n\t\t}\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecDuration(p []byte, v *time.Duration) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = decDur(p)\n\t\tif *v > maxValDur || *v < minValDur {\n\t\t\treturn errDataOutRangeDur\n\t\t}\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecDurationR(p []byte, v **time.Duration) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(time.Duration)\n\t\t}\n\tcase 8:\n\t\tval := decDur(p)\n\t\tif val > maxValDur || val < minValDur {\n\t\t\treturn errDataOutRangeDur\n\t\t}\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal time: can not unmarshal into nil reference (%T)(%[1]v))\", v.Interface())\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Int64, reflect.Int:\n\t\treturn decReflectInt64(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal time: unsupported value type (%T)(%[1]v), supported types: ~int64, time.Duration\", v.Interface())\n\t}\n}\n\nfunc decReflectInt64(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 8:\n\t\tval := decInt64(p)\n\t\tif val > maxValInt64 || val < minValInt64 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal time: (%T) the data should be in the range 0 to 86399999999999\", v.Interface())\n\t\t}\n\t\tv.SetInt(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal time: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Int64, reflect.Int:\n\t\treturn decReflectIntsR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal time: unsupported value type (%T)(%[1]v), supported types: ~int64, time.Duration\", v.Interface())\n\t}\n}\n\nfunc decReflectIntsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Elem().Type()))\n\t\t} else {\n\t\t\tv.Elem().Set(reflect.New(v.Type().Elem().Elem()))\n\t\t}\n\tcase 8:\n\t\tvv := decInt64(p)\n\t\tif vv > maxValInt64 || vv < minValInt64 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal time: (%T) the data should be in the range 0 to 86399999999999\", v.Interface())\n\t\t}\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetInt(vv)\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decInt64(p []byte) int64 {\n\treturn int64(p[0])<<56 | int64(p[1])<<48 | int64(p[2])<<40 | int64(p[3])<<32 | int64(p[4])<<24 | int64(p[5])<<16 | int64(p[6])<<8 | int64(p[7])\n}\n\nfunc decDur(p []byte) time.Duration {\n\treturn time.Duration(p[0])<<56 | time.Duration(p[1])<<48 | time.Duration(p[2])<<40 | time.Duration(p[3])<<32 | time.Duration(p[4])<<24 | time.Duration(p[5])<<16 | time.Duration(p[6])<<8 | time.Duration(p[7])\n}\n"
  },
  {
    "path": "serialization/date/marshal.go",
    "content": "package date\n\nimport (\n\t\"reflect\"\n\t\"time\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase int32:\n\t\treturn EncInt32(v)\n\tcase int64:\n\t\treturn EncInt64(v)\n\tcase uint32:\n\t\treturn EncUint32(v)\n\tcase string:\n\t\treturn EncString(v)\n\tcase time.Time:\n\t\treturn EncTime(v)\n\n\tcase *int32:\n\t\treturn EncInt32R(v)\n\tcase *int64:\n\t\treturn EncInt64R(v)\n\tcase *uint32:\n\t\treturn EncUint32R(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tcase *time.Time:\n\t\treturn EncTimeR(v)\n\tdefault:\n\t\t// Custom types (type MyDate uint32) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/date/marshal_utils.go",
    "content": "package date\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tmillisecondsInADay int64 = 24 * 60 * 60 * 1000\n\tcenterEpoch        int64 = 1 << 31\n\tmaxYear            int   = 5881580\n\tminYear            int   = -5877641\n\tmaxMilliseconds    int64 = 185542587100800000\n\tminMilliseconds    int64 = -185542587187200000\n)\n\nvar (\n\tmaxDate = time.Date(5881580, 07, 11, 0, 0, 0, 0, time.UTC)\n\tminDate = time.Date(-5877641, 06, 23, 0, 0, 0, 0, time.UTC)\n)\n\nfunc errWrongStringFormat(v any) error {\n\treturn fmt.Errorf(`failed to marshal date: the (%T)(%[1]v) should have fromat \"2006-01-02\"`, v)\n}\n\nfunc EncInt32(v int32) ([]byte, error) {\n\treturn encInt32(v), nil\n}\n\nfunc EncInt32R(v *int32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encInt32(*v), nil\n}\n\nfunc EncInt64(v int64) ([]byte, error) {\n\tif v > maxMilliseconds || v < minMilliseconds {\n\t\treturn nil, fmt.Errorf(\"failed to marshal date: the (int64)(%v) value out of range\", v)\n\t}\n\treturn encInt64(days(v)), nil\n}\n\nfunc EncInt64R(v *int64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt64(*v)\n}\n\nfunc EncUint32(v uint32) ([]byte, error) {\n\treturn encUint32(v), nil\n}\n\nfunc EncUint32R(v *uint32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encUint32(*v), nil\n}\n\nfunc EncTime(v time.Time) ([]byte, error) {\n\tif v.After(maxDate) || v.Before(minDate) {\n\t\treturn nil, fmt.Errorf(\"failed to marshal date: the (%T)(%s) value should be in the range from -5877641-06-23 to 5881580-07-11\", v, v.Format(\"2006-01-02\"))\n\t}\n\treturn encTime(v), nil\n}\n\nfunc EncTimeR(v *time.Time) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncTime(*v)\n}\n\nfunc EncString(v string) ([]byte, error) {\n\tif v == \"\" {\n\t\treturn nil, nil\n\t}\n\tvar err error\n\tvar y, m, d int\n\tvar t time.Time\n\tswitch ps := strings.Split(v, \"-\"); len(ps) {\n\tcase 3:\n\t\tif y, err = strconv.Atoi(ps[0]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v)\n\t\t}\n\t\tif m, err = strconv.Atoi(ps[1]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v)\n\t\t}\n\t\tif d, err = strconv.Atoi(ps[2]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v)\n\t\t}\n\tcase 4:\n\t\tif y, err = strconv.Atoi(ps[1]); err != nil || ps[0] != \"\" {\n\t\t\treturn nil, errWrongStringFormat(v)\n\t\t}\n\t\ty = -y\n\t\tif m, err = strconv.Atoi(ps[2]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v)\n\t\t}\n\t\tif d, err = strconv.Atoi(ps[3]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v)\n\t\t}\n\tdefault:\n\t\treturn nil, errWrongStringFormat(v)\n\t}\n\tif y > maxYear || y < minYear {\n\t\treturn nil, fmt.Errorf(\"failed to marshal date: the (%T)(%[1]v) value should be in the range from -5877641-06-23 to 5881580-07-11\", v)\n\t}\n\tt = time.Date(y, time.Month(m), d, 0, 0, 0, 0, time.UTC)\n\tif t.After(maxDate) || t.Before(minDate) {\n\t\treturn nil, fmt.Errorf(\"failed to marshal date: the (%T)(%[1]v) value should be in the range from -5877641-06-23 to 5881580-07-11\", v)\n\t}\n\treturn encTime(t), nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncString(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Int32:\n\t\treturn encInt64(v.Int()), nil\n\tcase reflect.Int64:\n\t\tval := v.Int()\n\t\tif val > maxMilliseconds || val < minMilliseconds {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal date: the value (%T)(%[1]v) out of range\", v.Interface())\n\t\t}\n\t\treturn encInt64(days(val)), nil\n\tcase reflect.Uint32:\n\t\tval := v.Uint()\n\t\treturn []byte{byte(val >> 24), byte(val >> 16), byte(val >> 8), byte(val)}, nil\n\tcase reflect.String:\n\t\treturn encReflectString(v)\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal date: unsupported value type (%T)(%[1]v), supported types: ~int32, ~int64, ~uint32,  ~string, time.Time, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal date: unsupported value type (%T)(%[1]v), supported types: ~int32, ~int64, ~uint32,  ~string, time.Time, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encReflectString(v reflect.Value) ([]byte, error) {\n\tval := v.String()\n\tif val == \"\" {\n\t\treturn nil, nil\n\t}\n\tvar err error\n\tvar y, m, d int\n\tvar t time.Time\n\tps := strings.Split(val, \"-\")\n\tswitch len(ps) {\n\tcase 3:\n\t\tif y, err = strconv.Atoi(ps[0]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v.Interface())\n\t\t}\n\t\tif m, err = strconv.Atoi(ps[1]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v.Interface())\n\t\t}\n\t\tif d, err = strconv.Atoi(ps[2]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v.Interface())\n\t\t}\n\tcase 4:\n\t\tif y, err = strconv.Atoi(ps[1]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v.Interface())\n\t\t}\n\t\ty = -y\n\t\tif m, err = strconv.Atoi(ps[2]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v.Interface())\n\t\t}\n\t\tif d, err = strconv.Atoi(ps[3]); err != nil {\n\t\t\treturn nil, errWrongStringFormat(v.Interface())\n\t\t}\n\tdefault:\n\t\treturn nil, errWrongStringFormat(v.Interface())\n\t}\n\tif y > maxYear || y < minYear {\n\t\treturn nil, fmt.Errorf(\"failed to marshal date: the (%T)(%[1]v) value should be in the range from -5877641-06-23 to 5881580-07-11\", v.Interface())\n\t}\n\tt = time.Date(y, time.Month(m), d, 0, 0, 0, 0, time.UTC)\n\tif t.After(maxDate) || t.Before(minDate) {\n\t\treturn nil, fmt.Errorf(\"failed to marshal date: the (%T)(%[1]v) value should be in the range from -5877641-06-23 to 5881580-07-11\", v.Interface())\n\t}\n\treturn encTime(t), nil\n}\n\nfunc encInt64(v int64) []byte {\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n\nfunc encInt32(v int32) []byte {\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n\nfunc encUint32(v uint32) []byte {\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n\nfunc encTime(v time.Time) []byte {\n\td := days(v.UnixMilli())\n\treturn []byte{byte(d >> 24), byte(d >> 16), byte(d >> 8), byte(d)}\n}\n\nfunc days(v int64) int64 {\n\treturn v/millisecondsInADay + centerEpoch\n}\n"
  },
  {
    "path": "serialization/date/unmarshal.go",
    "content": "package date\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\n\tcase *int32:\n\t\treturn DecInt32(data, v)\n\tcase *int64:\n\t\treturn DecInt64(data, v)\n\tcase *uint32:\n\t\treturn DecUint32(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\tcase *time.Time:\n\t\treturn DecTime(data, v)\n\n\tcase **int32:\n\t\treturn DecInt32R(data, v)\n\tcase **int64:\n\t\treturn DecInt64R(data, v)\n\tcase **uint32:\n\t\treturn DecUint32R(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tcase **time.Time:\n\t\treturn DecTimeR(data, v)\n\tdefault:\n\n\t\t// Custom types (type MyDate uint32) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal date: unsupported value type (%T)(%[1]v), supported types: ~int32, ~int64, ~uint32,  ~string, time.Time\", value)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/date/unmarshal_utils.go",
    "content": "package date\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"time\"\n)\n\nconst (\n\tnegInt64       = int64(-1) << 32\n\tzeroDate       = \"-5877641-06-23\"\n\tzeroMS   int64 = -185542587187200000\n)\n\nvar errWrongDataLen = fmt.Errorf(\"failed to unmarshal date: the length of the data should be 0 or 4\")\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal date: can not unmarshal into nil reference (%T)(%[1]v))\", v)\n}\n\nfunc DecInt32(p []byte, v *int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\t*v = decInt32(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32R(p []byte, v **int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int32)\n\t\t}\n\tcase 4:\n\t\tval := decInt32(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64(p []byte, v *int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = zeroMS\n\tcase 4:\n\t\t*v = decMilliseconds(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64R(p []byte, v **int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\tval := zeroMS\n\t\t\t*v = &val\n\t\t}\n\tcase 4:\n\t\tval := decMilliseconds(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32(p []byte, v *uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\t*v = decUint32(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32R(p []byte, v **uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint32)\n\t\t}\n\tcase 4:\n\t\tval := decUint32(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = zeroDate\n\t\t}\n\tcase 4:\n\t\t*v = decString(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\tval := zeroDate\n\t\t\t*v = &val\n\t\t}\n\tcase 4:\n\t\tval := decString(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecTime(p []byte, v *time.Time) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = minDate\n\tcase 4:\n\t\t*v = decTime(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecTimeR(p []byte, v **time.Time) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\tval := minDate\n\t\t\t*v = &val\n\t\t}\n\tcase 4:\n\t\tval := decTime(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal date: can not unmarshal into nil reference (%T)(%[1]v))\", v.Interface())\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Int32:\n\t\treturn decReflectInt32(p, v)\n\tcase reflect.Int64:\n\t\treturn decReflectInt64(p, v)\n\tcase reflect.Uint32:\n\t\treturn decReflectUint32(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal date: unsupported value type (%T)(%[1]v), supported types: ~int32, ~int64, ~uint32,  ~string, time.Time\", v.Interface())\n\t}\n}\n\nfunc decReflectInt32(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 4:\n\t\tv.SetInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt64(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(zeroMS)\n\tcase 4:\n\t\tv.SetInt(decMilliseconds(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint32(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 4:\n\t\tv.SetUint(decUint64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(zeroDate)\n\t\t}\n\tcase 4:\n\t\tv.SetString(decString(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal date: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Int32:\n\t\treturn decReflectInt32R(p, v)\n\tcase reflect.Int64:\n\t\treturn decReflectInt64R(p, v)\n\tcase reflect.Uint32:\n\t\treturn decReflectUint32R(p, v)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal date: unsupported value type (%T)(%[1]v), supported types: ~int32, ~int64, ~uint32,  ~string, time.Time\", v.Interface())\n\t}\n}\n\nfunc decReflectInt32R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 4:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(decInt64(p))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInt64R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetInt(zeroMS)\n\t\t\tv.Elem().Set(val)\n\t\t}\n\t\tv.Elem().Set(val)\n\tcase 4:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetInt(decMilliseconds(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint32R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 4:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(decUint64(p))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetString(zeroDate)\n\t\t}\n\t\tv.Elem().Set(val)\n\tcase 4:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetString(decString(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectNullableR(p []byte, v reflect.Value) reflect.Value {\n\tif p == nil {\n\t\treturn reflect.Zero(v.Elem().Type())\n\t}\n\treturn reflect.New(v.Type().Elem().Elem())\n}\n\nfunc decInt32(p []byte) int32 {\n\treturn int32(p[0])<<24 | int32(p[1])<<16 | int32(p[2])<<8 | int32(p[3])\n}\n\nfunc decInt64(p []byte) int64 {\n\tif p[0] > math.MaxInt8 {\n\t\treturn negInt64 | int64(p[0])<<24 | int64(p[1])<<16 | int64(p[2])<<8 | int64(p[3])\n\t}\n\treturn int64(p[0])<<24 | int64(p[1])<<16 | int64(p[2])<<8 | int64(p[3])\n}\n\nfunc decMilliseconds(p []byte) int64 {\n\treturn (int64(p[0])<<24 | int64(p[1])<<16 | int64(p[2])<<8 | int64(p[3]) - centerEpoch) * millisecondsInADay\n}\n\nfunc decUint32(p []byte) uint32 {\n\treturn uint32(p[0])<<24 | uint32(p[1])<<16 | uint32(p[2])<<8 | uint32(p[3])\n}\n\nfunc decUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<24 | uint64(p[1])<<16 | uint64(p[2])<<8 | uint64(p[3])\n}\n\nfunc decString(p []byte) string {\n\treturn decTime(p).Format(\"2006-01-02\")\n}\n\nfunc decTime(p []byte) time.Time {\n\treturn time.UnixMilli(decMilliseconds(p)).UTC()\n}\n"
  },
  {
    "path": "serialization/decimal/marshal.go",
    "content": "package decimal\n\nimport (\n\t\"reflect\"\n\n\t\"gopkg.in/inf.v0\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase inf.Dec:\n\t\treturn EncInfDec(v)\n\tcase *inf.Dec:\n\t\treturn EncInfDecR(v)\n\tcase string:\n\t\treturn EncString(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tdefault:\n\t\t// Custom types (type MyString string) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/decimal/marshal_utils.go",
    "content": "package decimal\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"gopkg.in/inf.v0\"\n\n\t\"github.com/gocql/gocql/serialization/varint\"\n)\n\nfunc EncInfDec(v inf.Dec) ([]byte, error) {\n\tsign := v.Sign()\n\tif sign == 0 {\n\t\treturn []byte{0, 0, 0, 0, 0}, nil\n\t}\n\treturn append(encScale(v.Scale()), varint.EncBigIntRS(v.UnscaledBig())...), nil\n}\n\nfunc EncInfDecR(v *inf.Dec) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encInfDecR(v), nil\n}\n\n// EncString encodes decimal string which should contains `scale` and `unscaled` strings separated by `;`.\nfunc EncString(v string) ([]byte, error) {\n\tif v == \"\" {\n\t\treturn nil, nil\n\t}\n\tvs := strings.Split(v, \";\")\n\tif len(vs) != 2 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal decimal: invalid decimal string %s\", v)\n\t}\n\tscale, err := strconv.ParseInt(vs[0], 10, 32)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal decimal: invalid decimal scale string %s\", vs[0])\n\t}\n\tunscaleData, err := encUnscaledString(vs[1])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn append(encScale64(scale), unscaleData...), nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncString(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Type().Kind() {\n\tcase reflect.String:\n\t\treturn encReflectString(v)\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal decimal: unsupported value type (%T)(%[1]v), supported types: ~string, inf.Dec, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal decimal: unsupported value type (%T)(%[1]v), supported types: ~string, inf.Dec, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encReflectString(v reflect.Value) ([]byte, error) {\n\tval := v.String()\n\tif val == \"\" {\n\t\treturn nil, nil\n\t}\n\tvs := strings.Split(val, \";\")\n\tif len(vs) != 2 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal decimal: invalid decimal string (%T)(%[1]v)\", v.Interface())\n\t}\n\tscale, err := strconv.ParseInt(vs[0], 10, 32)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal decimal: invalid decimal scale string (%T)(%s)\", v.Interface(), vs[0])\n\t}\n\tunscaledData, err := encUnscaledString(vs[1])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn append(encScale64(scale), unscaledData...), nil\n}\n\nfunc encInfDecR(v *inf.Dec) []byte {\n\tsign := v.Sign()\n\tif sign == 0 {\n\t\treturn []byte{0, 0, 0, 0, 0}\n\t}\n\treturn append(encScale(v.Scale()), varint.EncBigIntRS(v.UnscaledBig())...)\n}\n\nfunc encScale(v inf.Scale) []byte {\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n\nfunc encScale64(v int64) []byte {\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n\nfunc encUnscaledString(v string) ([]byte, error) {\n\tswitch {\n\tcase len(v) == 0:\n\t\treturn nil, nil\n\tcase len(v) <= 18:\n\t\tn, err := strconv.ParseInt(v, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal decimal: invalid unscaled string %s, %s\", v, err)\n\t\t}\n\t\treturn varint.EncInt64Ext(n), nil\n\tcase len(v) <= 20:\n\t\tn, err := strconv.ParseInt(v, 10, 64)\n\t\tif err == nil {\n\t\t\treturn varint.EncInt64Ext(n), nil\n\t\t}\n\n\t\tt, ok := new(big.Int).SetString(v, 10)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal decimal: invalid unscaled string %s\", v)\n\t\t}\n\t\treturn varint.EncBigIntRS(t), nil\n\tdefault:\n\t\tt, ok := new(big.Int).SetString(v, 10)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal decimal: invalid unscaled string %s\", v)\n\t\t}\n\t\treturn varint.EncBigIntRS(t), nil\n\t}\n}\n"
  },
  {
    "path": "serialization/decimal/unmarshal.go",
    "content": "package decimal\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"gopkg.in/inf.v0\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *inf.Dec:\n\t\treturn DecInfDec(data, v)\n\tcase **inf.Dec:\n\t\treturn DecInfDecR(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tdefault:\n\t\t// Custom types (type MyString string) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal decimal: unsupported value type (%T)(%#[1]v), supported types: ~string, inf.Dec\", value)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/decimal/unmarshal_ints.go",
    "content": "package decimal\n\nimport (\n\t\"gopkg.in/inf.v0\"\n)\n\nconst (\n\tneg8     = int64(-1) << 8\n\tneg16    = int64(-1) << 16\n\tneg24    = int64(-1) << 24\n\tneg32    = int64(-1) << 32\n\tneg40    = int64(-1) << 40\n\tneg48    = int64(-1) << 48\n\tneg56    = int64(-1) << 56\n\tneg32Int = int(-1) << 32\n)\n\nfunc decScale(p []byte) inf.Scale {\n\treturn inf.Scale(p[0])<<24 | inf.Scale(p[1])<<16 | inf.Scale(p[2])<<8 | inf.Scale(p[3])\n}\n\nfunc decScaleInt64(p []byte) int64 {\n\tif p[0] > 127 {\n\t\treturn neg32 | int64(p[0])<<24 | int64(p[1])<<16 | int64(p[2])<<8 | int64(p[3])\n\t}\n\treturn int64(p[0])<<24 | int64(p[1])<<16 | int64(p[2])<<8 | int64(p[3])\n}\n\nfunc dec1toInt64(p []byte) int64 {\n\tif p[4] > 127 {\n\t\treturn neg8 | int64(p[4])\n\t}\n\treturn int64(p[4])\n}\n\nfunc dec2toInt64(p []byte) int64 {\n\tif p[4] > 127 {\n\t\treturn neg16 | int64(p[4])<<8 | int64(p[5])\n\t}\n\treturn int64(p[4])<<8 | int64(p[5])\n}\n\nfunc dec3toInt64(p []byte) int64 {\n\tif p[4] > 127 {\n\t\treturn neg24 | int64(p[4])<<16 | int64(p[5])<<8 | int64(p[6])\n\t}\n\treturn int64(p[4])<<16 | int64(p[5])<<8 | int64(p[6])\n}\n\nfunc dec4toInt64(p []byte) int64 {\n\tif p[4] > 127 {\n\t\treturn neg32 | int64(p[4])<<24 | int64(p[5])<<16 | int64(p[6])<<8 | int64(p[7])\n\t}\n\treturn int64(p[4])<<24 | int64(p[5])<<16 | int64(p[6])<<8 | int64(p[7])\n}\n\nfunc dec5toInt64(p []byte) int64 {\n\tif p[4] > 127 {\n\t\treturn neg40 | int64(p[4])<<32 | int64(p[5])<<24 | int64(p[6])<<16 | int64(p[7])<<8 | int64(p[8])\n\t}\n\treturn int64(p[4])<<32 | int64(p[5])<<24 | int64(p[6])<<16 | int64(p[7])<<8 | int64(p[8])\n}\n\nfunc dec6toInt64(p []byte) int64 {\n\tif p[4] > 127 {\n\t\treturn neg48 | int64(p[4])<<40 | int64(p[5])<<32 | int64(p[6])<<24 | int64(p[7])<<16 | int64(p[8])<<8 | int64(p[9])\n\t}\n\treturn int64(p[4])<<40 | int64(p[5])<<32 | int64(p[6])<<24 | int64(p[7])<<16 | int64(p[8])<<8 | int64(p[9])\n}\n\nfunc dec7toInt64(p []byte) int64 {\n\tif p[4] > 127 {\n\t\treturn neg56 | int64(p[4])<<48 | int64(p[5])<<40 | int64(p[6])<<32 | int64(p[7])<<24 | int64(p[8])<<16 | int64(p[9])<<8 | int64(p[10])\n\t}\n\treturn int64(p[4])<<48 | int64(p[5])<<40 | int64(p[6])<<32 | int64(p[7])<<24 | int64(p[8])<<16 | int64(p[9])<<8 | int64(p[10])\n}\n\nfunc dec8toInt64(p []byte) int64 {\n\treturn int64(p[4])<<56 | int64(p[5])<<48 | int64(p[6])<<40 | int64(p[7])<<32 | int64(p[8])<<24 | int64(p[9])<<16 | int64(p[10])<<8 | int64(p[11])\n}\n"
  },
  {
    "path": "serialization/decimal/unmarshal_utils.go",
    "content": "package decimal\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\n\t\"gopkg.in/inf.v0\"\n\n\t\"github.com/gocql/gocql/serialization/varint\"\n)\n\nvar errWrongDataLen = fmt.Errorf(\"failed to unmarshal decimal: the length of the data should be 0 or more than 5\")\n\nfunc errBrokenData(p []byte) error {\n\tif p[4] == 0 && p[5] <= 127 || p[4] == 255 && p[5] > 127 {\n\t\treturn fmt.Errorf(\"failed to unmarshal decimal: the data is broken\")\n\t}\n\treturn nil\n}\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal decimal: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecInfDec(p []byte, v *inf.Dec) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetScale(0).SetUnscaled(0)\n\t\treturn nil\n\tcase 1, 2, 3, 4:\n\t\treturn errWrongDataLen\n\tcase 5:\n\t\tv.SetScale(decScale(p)).SetUnscaled(dec1toInt64(p))\n\t\treturn nil\n\tcase 6:\n\t\tv.SetScale(decScale(p)).SetUnscaled(dec2toInt64(p))\n\tcase 7:\n\t\tv.SetScale(decScale(p)).SetUnscaled(dec3toInt64(p))\n\tcase 8:\n\t\tv.SetScale(decScale(p)).SetUnscaled(dec4toInt64(p))\n\tcase 9:\n\t\tv.SetScale(decScale(p)).SetUnscaled(dec5toInt64(p))\n\tcase 10:\n\t\tv.SetScale(decScale(p)).SetUnscaled(dec6toInt64(p))\n\tcase 11:\n\t\tv.SetScale(decScale(p)).SetUnscaled(dec7toInt64(p))\n\tcase 12:\n\t\tv.SetScale(decScale(p)).SetUnscaled(dec8toInt64(p))\n\tdefault:\n\t\tv.SetScale(decScale(p)).SetUnscaledBig(varint.Dec2BigInt(p[4:]))\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecInfDecR(p []byte, v **inf.Dec) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = inf.NewDec(0, 0)\n\t\t}\n\t\treturn nil\n\tcase 1, 2, 3, 4:\n\t\treturn errWrongDataLen\n\tcase 5:\n\t\t*v = inf.NewDec(dec1toInt64(p), decScale(p))\n\t\treturn nil\n\tcase 6:\n\t\t*v = inf.NewDec(dec2toInt64(p), decScale(p))\n\tcase 7:\n\t\t*v = inf.NewDec(dec3toInt64(p), decScale(p))\n\tcase 8:\n\t\t*v = inf.NewDec(dec4toInt64(p), decScale(p))\n\tcase 9:\n\t\t*v = inf.NewDec(dec5toInt64(p), decScale(p))\n\tcase 10:\n\t\t*v = inf.NewDec(dec6toInt64(p), decScale(p))\n\tcase 11:\n\t\t*v = inf.NewDec(dec7toInt64(p), decScale(p))\n\tcase 12:\n\t\t*v = inf.NewDec(dec8toInt64(p), decScale(p))\n\tdefault:\n\t\t*v = inf.NewDecBig(varint.Dec2BigInt(p[4:]), decScale(p))\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = \"0;0\"\n\t\t}\n\t\treturn nil\n\tcase 1, 2, 3, 4:\n\t\treturn errWrongDataLen\n\tcase 5:\n\t\t*v = decString5(p)\n\t\treturn nil\n\tcase 6:\n\t\t*v = decString6(p)\n\tcase 7:\n\t\t*v = decString7(p)\n\tcase 8:\n\t\t*v = decString8(p)\n\tcase 9:\n\t\t*v = decString9(p)\n\tcase 10:\n\t\t*v = decString10(p)\n\tcase 11:\n\t\t*v = decString11(p)\n\tcase 12:\n\t\t*v = decString12(p)\n\tdefault:\n\t\t*v = decString(p)\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\ttmp := \"0;0\"\n\t\t\t*v = &tmp\n\t\t}\n\t\treturn nil\n\tcase 1, 2, 3, 4:\n\t\treturn errWrongDataLen\n\tcase 5:\n\t\ttmp := decString5(p)\n\t\t*v = &tmp\n\t\treturn nil\n\tcase 6:\n\t\ttmp := decString6(p)\n\t\t*v = &tmp\n\tcase 7:\n\t\ttmp := decString7(p)\n\t\t*v = &tmp\n\tcase 8:\n\t\ttmp := decString8(p)\n\t\t*v = &tmp\n\tcase 9:\n\t\ttmp := decString9(p)\n\t\t*v = &tmp\n\tcase 10:\n\t\ttmp := decString10(p)\n\t\t*v = &tmp\n\tcase 11:\n\t\ttmp := decString11(p)\n\t\t*v = &tmp\n\tcase 12:\n\t\ttmp := decString12(p)\n\t\t*v = &tmp\n\tdefault:\n\t\ttmp := decString(p)\n\t\t*v = &tmp\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal decimal: can not unmarshal into nil reference (%T)(%#[1]v)\", v.Interface())\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal decimal: unsupported value type (%T)(%#[1]v), supported types: ~string, inf.Dec\", v.Interface())\n\t}\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal decimal: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal decimal: unsupported value type (%T)(%[1]v), supported types: ~string, inf.Dec\", v.Interface())\n\t}\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(\"0;0\")\n\t\t}\n\t\treturn nil\n\tcase 1, 2, 3, 4:\n\t\treturn errWrongDataLen\n\tcase 5:\n\t\tv.SetString(decString5(p))\n\t\treturn nil\n\tcase 6:\n\t\tv.SetString(decString6(p))\n\tcase 7:\n\t\tv.SetString(decString7(p))\n\tcase 8:\n\t\tv.SetString(decString8(p))\n\tcase 9:\n\t\tv.SetString(decString9(p))\n\tcase 10:\n\t\tv.SetString(decString10(p))\n\tcase 11:\n\t\tv.SetString(decString11(p))\n\tcase 12:\n\t\tv.SetString(decString12(p))\n\tdefault:\n\t\tv.SetString(decString(p))\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetString(\"0;0\")\n\t\t}\n\t\tv.Elem().Set(val)\n\t\treturn nil\n\tcase 1, 2, 3, 4:\n\t\treturn errWrongDataLen\n\tcase 5:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(decString5(p))\n\t\tv.Elem().Set(newVal)\n\t\treturn nil\n\tcase 6:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(decString6(p))\n\t\tv.Elem().Set(newVal)\n\tcase 7:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(decString7(p))\n\t\tv.Elem().Set(newVal)\n\tcase 8:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(decString8(p))\n\t\tv.Elem().Set(newVal)\n\tcase 9:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(decString9(p))\n\t\tv.Elem().Set(newVal)\n\tcase 10:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(decString10(p))\n\t\tv.Elem().Set(newVal)\n\tcase 11:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(decString11(p))\n\t\tv.Elem().Set(newVal)\n\tcase 12:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(decString12(p))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(decString(p))\n\t\tv.Elem().Set(newVal)\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decString5(p []byte) string {\n\treturn strconv.FormatInt(decScaleInt64(p), 10) + \";\" + strconv.FormatInt(dec1toInt64(p), 10)\n}\n\nfunc decString6(p []byte) string {\n\treturn strconv.FormatInt(decScaleInt64(p), 10) + \";\" + strconv.FormatInt(dec2toInt64(p), 10)\n}\n\nfunc decString7(p []byte) string {\n\treturn strconv.FormatInt(decScaleInt64(p), 10) + \";\" + strconv.FormatInt(dec3toInt64(p), 10)\n}\nfunc decString8(p []byte) string {\n\treturn strconv.FormatInt(decScaleInt64(p), 10) + \";\" + strconv.FormatInt(dec4toInt64(p), 10)\n}\nfunc decString9(p []byte) string {\n\treturn strconv.FormatInt(decScaleInt64(p), 10) + \";\" + strconv.FormatInt(dec5toInt64(p), 10)\n}\nfunc decString10(p []byte) string {\n\treturn strconv.FormatInt(decScaleInt64(p), 10) + \";\" + strconv.FormatInt(dec6toInt64(p), 10)\n}\nfunc decString11(p []byte) string {\n\treturn strconv.FormatInt(decScaleInt64(p), 10) + \";\" + strconv.FormatInt(dec7toInt64(p), 10)\n}\nfunc decString12(p []byte) string {\n\treturn strconv.FormatInt(decScaleInt64(p), 10) + \";\" + strconv.FormatInt(dec8toInt64(p), 10)\n}\n\nfunc decString(p []byte) string {\n\treturn strconv.FormatInt(decScaleInt64(p), 10) + \";\" + varint.Dec2BigInt(p[4:]).String()\n}\n"
  },
  {
    "path": "serialization/double/marshal.go",
    "content": "package double\n\nimport (\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase float64:\n\t\treturn EncFloat64(v)\n\tcase *float64:\n\t\treturn EncFloat64R(v)\n\tdefault:\n\t\t// Custom types (type MyFloat float64) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/double/marshal_utils.go",
    "content": "package double\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nfunc EncFloat64(v float64) ([]byte, error) {\n\treturn encFloat64(v), nil\n}\n\nfunc EncFloat64R(v *float64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encFloat64R(v), nil\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Float64:\n\t\treturn encFloat64(v.Float()), nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal double: unsupported value type (%T)(%[1]v), supported types: ~float64, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal double: unsupported value type (%T)(%[1]v), supported types: ~float64, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encFloat64(v float64) []byte {\n\treturn encUint64(floatToUint(v))\n}\n\nfunc encFloat64R(v *float64) []byte {\n\treturn encUint64(floatToUintR(v))\n}\n\nfunc encUint64(v uint64) []byte {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n\nfunc floatToUint(v float64) uint64 {\n\treturn *(*uint64)(unsafe.Pointer(&v))\n}\n\nfunc floatToUintR(v *float64) uint64 {\n\treturn *(*uint64)(unsafe.Pointer(v))\n}\n"
  },
  {
    "path": "serialization/double/unmarshal.go",
    "content": "package double\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *float64:\n\t\treturn DecFloat64(data, v)\n\tcase **float64:\n\t\treturn DecFloat64R(data, v)\n\tdefault:\n\t\t// Custom types (type MyFloat float64) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal double: unsupported value type (%T)(%[1]v), supported types: ~float64\", v)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/double/unmarshal_utils.go",
    "content": "package double\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nvar errWrongDataLen = fmt.Errorf(\"failed to unmarshal double: the length of the data should be 0 or 8\")\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal double: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecFloat64(p []byte, v *float64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = decFloat64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecFloat64R(p []byte, v **float64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(float64)\n\t\t}\n\tcase 8:\n\t\t*v = decFloat64R(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Float64:\n\t\treturn decReflectFloat64(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal double: unsupported value type (%T)(%[1]v), supported types: ~float64\", v.Interface())\n\t}\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Float64:\n\t\treturn decReflectFloat64R(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal double: unsupported value type (%T)(%[1]v), supported types: ~float64\", v.Interface())\n\t}\n}\n\nfunc decReflectFloat64(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetFloat(0)\n\tcase 8:\n\t\tv.SetFloat(decFloat64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectFloat64R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 8:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetFloat(decFloat64(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectNullableR(p []byte, v reflect.Value) reflect.Value {\n\tif p == nil {\n\t\treturn reflect.Zero(v.Elem().Type())\n\t}\n\treturn reflect.New(v.Type().Elem().Elem())\n}\n\nfunc decFloat64(p []byte) float64 {\n\treturn uint64ToFloat(decUint64(p))\n}\n\nfunc decFloat64R(p []byte) *float64 {\n\treturn uint64ToFloatR(decUint64(p))\n}\n\nfunc uint64ToFloat(v uint64) float64 {\n\treturn *(*float64)(unsafe.Pointer(&v))\n}\n\nfunc uint64ToFloatR(v uint64) *float64 {\n\tf := *(*float64)(unsafe.Pointer(&v))\n\treturn &f\n}\n\nfunc decUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<56 | uint64(p[1])<<48 | uint64(p[2])<<40 | uint64(p[3])<<32 | uint64(p[4])<<24 | uint64(p[5])<<16 | uint64(p[6])<<8 | uint64(p[7])\n}\n"
  },
  {
    "path": "serialization/duration/duration.go",
    "content": "package duration\n\ntype Duration struct {\n\tMonths      int32\n\tDays        int32\n\tNanoseconds int64\n}\n\nfunc (d Duration) Valid() bool {\n\treturn validDuration(d.Months, d.Days, d.Nanoseconds)\n}\n\nfunc validDuration(m, d int32, n int64) bool {\n\tif m >= 0 && d >= 0 && n >= 0 {\n\t\treturn true\n\t}\n\tif m <= 0 && d <= 0 && n <= 0 {\n\t\treturn true\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "serialization/duration/marshal.go",
    "content": "package duration\n\nimport (\n\t\"reflect\"\n\t\"time\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase int64:\n\t\treturn EncInt64(v)\n\tcase time.Duration:\n\t\treturn EncDur(v)\n\tcase string:\n\t\treturn EncString(v)\n\tcase Duration:\n\t\treturn EncDuration(v)\n\n\tcase *int64:\n\t\treturn EncInt64R(v)\n\tcase *time.Duration:\n\t\treturn EncDurR(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tcase *Duration:\n\t\treturn EncDurationR(v)\n\tdefault:\n\t\t// Custom types (type MyDate uint32) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/duration/marshal_str.go",
    "content": "package duration\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\nconst (\n\tnanosecond  = 1\n\tmicrosecond = 1000 * nanosecond\n\tmillisecond = 1000 * microsecond\n\tsecond      = 1000 * millisecond\n\tminute      = 60 * second\n\thour        = 60 * minute\n\tweek        = 7\n\tyear        = 12\n\n\tmicrosecondFloat = float64(microsecond)\n\tmillisecondFloat = float64(millisecond)\n\tsecondFloat      = float64(second)\n\tminuteFloat      = float64(minute)\n\thourFloat        = float64(hour)\n\tweekFloat        = float64(week)\n\tyearFloat        = float64(year)\n\n\tmaxNanosecondsNeg = uint64(9223372036854775808)\n\tmaxNanosecondsPos = maxNanosecondsNeg - 1\n\tmaxMonthsDaysNeg  = uint64(2147483648)\n\tmaxMonthsDaysPos  = maxMonthsDaysNeg - 1\n\n\tmaxMicrosecondsNeg = maxNanosecondsNeg / microsecond\n\tmaxMillisecondsNeg = maxMicrosecondsNeg / 1000\n\tmaxSecondsNeg      = maxMillisecondsNeg / 1000\n\tmaxMinutesNeg      = maxSecondsNeg / 60\n\tmaxHoursNeg        = maxMinutesNeg / 60\n\tmaxWeeksNeg        = maxNanosecondsNeg / 7\n\tmaxYearsNeg        = maxNanosecondsNeg / 12\n)\n\ntype parseReadState byte\n\nconst (\n\treadInteger parseReadState = iota\n\treadFraction\n\treadUnit\n\treadSkipFraction\n)\n\ntype parseWriteState byte\n\nconst (\n\twriteNanoseconds parseWriteState = iota\n\twriteMilliseconds\n\twriteMicroseconds\n\twriteSeconds\n\twriteMinutes\n\twriteHours\n\twriteDays\n\twriteWeeks\n\twriteMonths\n\twriteYears\n)\n\nfunc errorOutRange(valName string, integer uint64) error {\n\treturn fmt.Errorf(\"%s %d out of the range\", valName, integer)\n}\n\nfunc errorUnknownChars(chars string) error {\n\treturn fmt.Errorf(\"unknown charesters \\\"%s\\\"\", chars)\n}\n\nfunc encString(s string) ([]byte, error) {\n\tmonths, days, nanos, neg, err := encStringToUints(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif neg {\n\t\treturn encVintMonthsDaysNanosNeg(months, days, nanos), nil\n\t}\n\treturn encVintMonthsDaysNanosPos(months, days, nanos), nil\n}\n\nfunc encStringToUints(s string) (months uint64, days uint64, nanos uint64, neg bool, err error) {\n\t// Special case: if all that is left is \"0\", this is zero.\n\tif s == zeroDuration {\n\t\treturn 0, 0, 0, false, nil\n\t}\n\t// get are sing\n\tif c := s[0]; c == '-' || c == '+' {\n\t\tneg = c == '-'\n\t\ts = s[1:]\n\t}\n\n\tvar writeState parseWriteState\n\treadState := readInteger\n\tscale := float64(1)\n\tvar integer, fraction uint64\n\tvar ok bool\n\tfor i := 0; i < len(s); i++ {\n\t\tc := s[i]\n\t\tswitch readState {\n\t\tcase readInteger: // Consume [0-9.]* as integer part of value\n\t\t\tswitch {\n\t\t\tcase c >= '0' && c <= '9':\n\t\t\t\tinteger = integer*10 + uint64(c) - '0'\n\t\t\t\tif integer > maxNanosecondsNeg {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"value\", integer)\n\t\t\t\t}\n\t\t\tcase c == '.':\n\t\t\t\treadState = readFraction\n\t\t\tdefault:\n\t\t\t\ti--\n\t\t\t\treadState = readUnit\n\t\t\t}\n\t\tcase readFraction: // Consume [0-9]* as fraction part of value\n\t\t\tif c >= '0' && c <= '9' {\n\t\t\t\tscale *= 10\n\t\t\t\tfraction = fraction*10 + uint64(c) - '0'\n\t\t\t\tif fraction > maxNanosecondsNeg {\n\t\t\t\t\treadState = readSkipFraction\n\t\t\t\t\tfraction /= 10\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ti--\n\t\t\t\treadState = readUnit\n\t\t\t}\n\t\tcase readUnit: // Consume unit part of the string, adding the integer and fraction parts of the value to the output values.\n\t\t\t// Supported units:\n\t\t\t// \"ns\"\tnanosecond,\n\t\t\t// \"us\"\tmicrosecond,\n\t\t\t// \"µs\"\tmicrosecond U+00B5 = micro symbol,\n\t\t\t// \"μs\" microsecond U+03BC = Greek letter mu\n\t\t\t// \"ms\"\tmillisecond,\n\t\t\t// \"s\"\tsecond,\n\t\t\t// \"m\"\tminute,\n\t\t\t// \"h\"\thour,\n\t\t\t// \"d\"\tday,\n\t\t\t// \"w\"\tweek,\n\t\t\t// \"mo\"\tmonth,\n\t\t\t// \"y\"\tyear,\n\n\t\t\tswitch c {\n\t\t\tcase 'n': // \"ns\" nanosecond\n\t\t\t\tif i+1 == len(s) || s[i+1] != 's' {\n\t\t\t\t\treturn 0, 0, 0, false, errorUnknownChars(s[i:])\n\t\t\t\t}\n\t\t\t\ti++\n\t\t\t\twriteState = writeNanoseconds\n\t\t\tcase 'u': // \"us\" microsecond\n\t\t\t\tif i+1 == len(s) || s[i+1] != 's' {\n\t\t\t\t\treturn 0, 0, 0, false, errorUnknownChars(s[i:])\n\t\t\t\t}\n\t\t\t\ti++\n\t\t\t\twriteState = writeMicroseconds\n\t\t\tcase 194: // \"µs\" microsecond U+00B5 = micro symbol\n\t\t\t\tif i+2 >= len(s) || s[i+1] != 181 || s[i+2] != 's' {\n\t\t\t\t\treturn 0, 0, 0, false, errorUnknownChars(s[i:])\n\t\t\t\t}\n\t\t\t\ti++\n\t\t\t\twriteState = writeMicroseconds\n\t\t\tcase 206: // \"μs\" microsecond U+03BC = Greek letter mu\n\t\t\t\tif i+2 >= len(s) || s[i+1] != 188 || s[i+2] != 's' {\n\t\t\t\t\treturn 0, 0, 0, false, errorUnknownChars(s[i:])\n\t\t\t\t}\n\t\t\t\ti++\n\t\t\t\twriteState = writeMicroseconds\n\t\t\tcase 'm': // \"ms\" millisecond,\"mo\" month,\"m\" minute,\n\t\t\t\tif i+1 == len(s) { // \"m\" minute\n\t\t\t\t\twriteState = writeMinutes\n\t\t\t\t} else {\n\t\t\t\t\tswitch s[i+1] { // \"ms\" millisecond,\"mo\" month,\"m\" minute,\n\t\t\t\t\tcase 's': // \"ms\" millisecond\n\t\t\t\t\t\ti++\n\t\t\t\t\t\twriteState = writeMilliseconds\n\t\t\t\t\tcase 'o': // \"mo\" month\n\t\t\t\t\t\ti++\n\t\t\t\t\t\twriteState = writeMonths\n\t\t\t\t\tcase '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.': // \"m\" minute\n\t\t\t\t\t\twriteState = writeMinutes\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn 0, 0, 0, false, errorUnknownChars(s[i:])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase 's': // \"s\" second\n\t\t\t\twriteState = writeSeconds\n\t\t\tcase 'h': // \"h\" hour\n\t\t\t\twriteState = writeHours\n\t\t\tcase 'd': // \"d\" day\n\t\t\t\twriteState = writeDays\n\t\t\tcase 'w': // \"w\" week\n\t\t\t\twriteState = writeWeeks\n\t\t\tcase 'y': // \"y\" year\n\t\t\t\twriteState = writeYears\n\t\t\tdefault: // unsupported characters\n\t\t\t\treturn 0, 0, 0, false, errorUnknownChars(s[i:])\n\t\t\t}\n\n\t\t\tswitch writeState {\n\t\t\tcase writeNanoseconds:\n\t\t\t\tif nanos, ok = addNanoseconds(nanos, integer, fraction, scale); !ok {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"nanoseconds\", nanos)\n\t\t\t\t}\n\t\t\tcase writeMicroseconds:\n\t\t\t\tif nanos, ok = addMicroseconds(nanos, integer, fraction, scale); !ok {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"nanoseconds\", nanos)\n\t\t\t\t}\n\t\t\tcase writeMilliseconds:\n\t\t\t\tif nanos, ok = addMilliseconds(nanos, integer, fraction, scale); !ok {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"nanoseconds\", nanos)\n\t\t\t\t}\n\t\t\tcase writeSeconds:\n\t\t\t\tif nanos, ok = addSeconds(nanos, integer, fraction, scale); !ok {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"nanoseconds\", nanos)\n\t\t\t\t}\n\t\t\tcase writeMinutes:\n\t\t\t\tif nanos, ok = addMinutes(nanos, integer, fraction, scale); !ok {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"nanoseconds\", nanos)\n\t\t\t\t}\n\t\t\tcase writeHours:\n\t\t\t\tif nanos, ok = addHours(nanos, integer, fraction, scale); !ok {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"nanoseconds\", nanos)\n\t\t\t\t}\n\t\t\tcase writeDays:\n\t\t\t\tif days, ok = addDaysMonths(days, integer, fraction, scale); !ok {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"days\", days)\n\t\t\t\t}\n\t\t\tcase writeWeeks:\n\t\t\t\tif days, ok = addWeeks(days, integer, fraction, scale); !ok {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"days\", days)\n\t\t\t\t}\n\t\t\tcase writeMonths:\n\t\t\t\tif months, ok = addDaysMonths(months, integer, fraction, scale); !ok {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"months\", months)\n\t\t\t\t}\n\t\t\tdefault: // writeYears\n\t\t\t\tif months, ok = addYears(months, integer, fraction, scale); !ok {\n\t\t\t\t\treturn 0, 0, 0, false, errorOutRange(\"years\", months)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// reset the temporary values, after write\n\t\t\treadState = readInteger\n\t\t\tinteger, fraction, scale = 0, 0, 1\n\t\tdefault: // Consume [0-9]* in case with overflow of the fraction part of value. Just skip digits.\n\t\t\tif c < '0' || c > '9' {\n\t\t\t\ti--\n\t\t\t\treadState = readUnit\n\t\t\t}\n\t\t}\n\t}\n\tif integer != 0 || fraction != 0 || scale != 1 { // if the temporary values are not reset, it means that the reading is not fully completed.\n\t\treturn 0, 0, 0, false, errors.New(\"unsupported format\")\n\t}\n\tif !neg {\n\t\tif months > maxMonthsDaysPos {\n\t\t\treturn 0, 0, 0, false, errorOutRange(\"months\", months)\n\t\t}\n\t\tif days > maxMonthsDaysPos {\n\t\t\treturn 0, 0, 0, false, errorOutRange(\"days\", days)\n\t\t}\n\t\tif nanos > maxNanosecondsPos {\n\t\t\treturn 0, 0, 0, false, errorOutRange(\"nanoseconds\", days)\n\t\t}\n\t}\n\treturn\n}\n\nfunc addNanoseconds(in, add, fraction uint64, scale float64) (uint64, bool) {\n\tif fraction > 0 {\n\t\tadd += uint64(float64(fraction) * (float64(1) / scale))\n\t\tif add > maxNanosecondsNeg {\n\t\t\treturn add, false\n\t\t}\n\t}\n\tin += add\n\tif in > maxNanosecondsNeg {\n\t\treturn in, false\n\t}\n\treturn in, true\n}\n\nfunc addMicroseconds(in, add, fraction uint64, scale float64) (uint64, bool) {\n\tif add > maxMicrosecondsNeg {\n\t\treturn add, false\n\t}\n\tadd *= microsecond\n\tif fraction > 0 {\n\t\tadd += uint64(float64(fraction) * (microsecondFloat / scale))\n\t\tif add > maxNanosecondsNeg {\n\t\t\treturn add, false\n\t\t}\n\t}\n\tin += add\n\tif in > maxNanosecondsNeg {\n\t\treturn in, false\n\t}\n\treturn in, true\n}\n\nfunc addMilliseconds(in, add, fraction uint64, scale float64) (uint64, bool) {\n\tif add > maxMillisecondsNeg {\n\t\treturn add, false\n\t}\n\tadd *= millisecond\n\tif fraction > 0 {\n\t\tadd += uint64(float64(fraction) * (millisecondFloat / scale))\n\t\tif add > maxNanosecondsNeg {\n\t\t\treturn add, false\n\t\t}\n\t}\n\tin += add\n\tif in > maxNanosecondsNeg {\n\t\treturn in, false\n\t}\n\treturn in, true\n}\n\nfunc addSeconds(in, add, fraction uint64, scale float64) (uint64, bool) {\n\tif add > maxSecondsNeg {\n\t\treturn add, false\n\t}\n\tadd *= second\n\tif fraction > 0 {\n\t\tadd += uint64(float64(fraction) * (secondFloat / scale))\n\t\tif add > maxNanosecondsNeg {\n\t\t\treturn add, false\n\t\t}\n\t}\n\tin += add\n\tif in > maxNanosecondsNeg {\n\t\treturn in, false\n\t}\n\treturn in, true\n}\n\nfunc addMinutes(in, add, fraction uint64, scale float64) (uint64, bool) {\n\tif add > maxMinutesNeg {\n\t\treturn add, false\n\t}\n\tadd *= minute\n\tif fraction > 0 {\n\t\tadd += uint64(float64(fraction) * (minuteFloat / scale))\n\t\tif add > maxNanosecondsNeg {\n\t\t\treturn add, false\n\t\t}\n\t}\n\tin += add\n\tif in > maxNanosecondsNeg {\n\t\treturn in, false\n\t}\n\treturn in, true\n}\n\nfunc addHours(in, add, fraction uint64, scale float64) (uint64, bool) {\n\tif add > maxHoursNeg {\n\t\treturn add, false\n\t}\n\tadd *= hour\n\tif fraction > 0 {\n\t\tadd += uint64(float64(fraction) * (hourFloat / scale))\n\t\tif add > maxNanosecondsNeg {\n\t\t\treturn add, false\n\t\t}\n\t}\n\tin += add\n\tif in > maxNanosecondsNeg {\n\t\treturn in, false\n\t}\n\treturn in, true\n}\n\nfunc addDaysMonths(in, add, fraction uint64, scale float64) (uint64, bool) {\n\tif fraction > 0 {\n\t\tadd += uint64(float64(fraction) * (float64(1) / scale))\n\t\tif add > maxMonthsDaysNeg {\n\t\t\treturn add, false\n\t\t}\n\t}\n\tin += add\n\tif in > maxMonthsDaysNeg {\n\t\treturn in, false\n\t}\n\treturn in, true\n}\n\nfunc addWeeks(in, add, fraction uint64, scale float64) (uint64, bool) {\n\tif add > maxWeeksNeg {\n\t\treturn add, false\n\t}\n\tadd *= week\n\tif fraction > 0 {\n\t\tadd += uint64(float64(fraction) * (weekFloat / scale))\n\t\tif add > maxMonthsDaysNeg {\n\t\t\treturn add, false\n\t\t}\n\t}\n\tin += add\n\tif in > maxMonthsDaysNeg {\n\t\treturn in, false\n\t}\n\treturn in, true\n}\n\nfunc addYears(in, add, fraction uint64, scale float64) (uint64, bool) {\n\tif add > maxYearsNeg {\n\t\treturn add, false\n\t}\n\tadd *= year\n\tif fraction > 0 {\n\t\tadd += uint64(float64(fraction) * (yearFloat / scale))\n\t\tif add > maxMonthsDaysNeg {\n\t\t\treturn add, false\n\t\t}\n\t}\n\tin += add\n\tif in > maxMonthsDaysNeg {\n\t\treturn in, false\n\t}\n\treturn in, true\n}\n"
  },
  {
    "path": "serialization/duration/marshal_str_test.go",
    "content": "package duration\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\nfunc TestEncStr(t *testing.T) {\n\tfor n := int64(math.MaxInt64); n != 1; n = n / 2 {\n\t\tm, d := int32(n), int32(n)\n\t\tif n > math.MaxInt32 {\n\t\t\tm, d = math.MaxInt32, math.MaxInt32\n\t\t}\n\t\ttestEncString(t, m, d, n)\n\t\ttestEncString(t, 0, d, n)\n\t\ttestEncString(t, m, 0, n)\n\t\ttestEncString(t, m, d, 0)\n\t}\n\n\tfor n := int64(math.MinInt64); n != -1; n = n / 2 {\n\t\tm, d := int32(n), int32(n)\n\t\tif n < math.MinInt32 {\n\t\t\tm, d = math.MinInt32, math.MinInt32\n\t\t}\n\t\ttestEncString(t, m, d, n)\n\t\ttestEncString(t, 0, d, n)\n\t\ttestEncString(t, m, 0, n)\n\t\ttestEncString(t, m, d, 0)\n\t}\n}\n\nfunc testEncString(t *testing.T, m, d int32, n int64) {\n\tt.Helper()\n\ttestStr := getTestString(m, d, n)\n\tmu, du, nu, neg, err := encStringToUints(testStr)\n\tif err != nil {\n\t\tt.Fatalf(\"failed on encoding testcase value:m:%d,d:%d,n:%d\\ntest string:%s\\nerror:%s\", m, d, n, testStr, err)\n\t}\n\tme, de, ne := int32(mu), int32(du), int64(nu)\n\tif neg {\n\t\tme, de, ne = -me, -de, -ne\n\t}\n\tif me != m {\n\t\tt.Fatalf(\"testcase:%s\\nexpected and recieved months not equal expected:%d received:%d\", testStr, m, me)\n\t}\n\tif de != d {\n\t\tt.Fatalf(\"testcase:%s\\nexpected and recieved days not equal expected:%d received:%d\", testStr, d, de)\n\t}\n\tif ne != n {\n\t\tt.Fatalf(\"testcase:%s\\nexpected and recieved nonoseconds not equal expected:%d received:%d\", testStr, n, ne)\n\t}\n}\n"
  },
  {
    "path": "serialization/duration/marshal_utils.go",
    "content": "package duration\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nconst (\n\tvintPrefix1 byte = 128\n\tvintPrefix2 byte = 192\n\tvintPrefix3 byte = 224\n\tvintPrefix4 byte = 240\n\tvintPrefix5 byte = 248\n\tvintPrefix6 byte = 252\n\tvintPrefix7 byte = 254\n\tvintPrefix8 byte = 255\n\n\tnanoDayPos = 24 * 60 * 60 * 1000 * 1000 * 1000\n\tnanoDayNeg = -nanoDayPos\n)\n\nfunc EncInt64(v int64) ([]byte, error) {\n\treturn encInt64(v), nil\n}\n\nfunc EncInt64R(v *int64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encInt64(*v), nil\n}\n\nfunc EncDur(v time.Duration) ([]byte, error) {\n\treturn encDur(v), nil\n}\n\nfunc EncDurR(v *time.Duration) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encDur(*v), nil\n}\n\nfunc EncString(v string) ([]byte, error) {\n\tif v == \"\" {\n\t\treturn nil, nil\n\t}\n\tdata, err := encString(v)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal duration: the parse error of the (string)(%s): %v\", v, err)\n\t}\n\treturn data, nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncString(*v)\n}\n\nfunc EncDuration(v Duration) ([]byte, error) {\n\tif !v.Valid() {\n\t\treturn nil, fmt.Errorf(\"failed to marshal duration: the (Duration) values of months (%d), days (%d) and nanoseconds (%d) should have the same sign\", v.Months, v.Days, v.Nanoseconds)\n\t}\n\treturn encVintMonthsDaysNanos(v.Months, v.Days, v.Nanoseconds), nil\n}\n\nfunc EncDurationR(v *Duration) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\tif !v.Valid() {\n\t\treturn nil, fmt.Errorf(\"failed to marshal duration: the (*Duration) values of the months (%d), days (%d) and nanoseconds (%d) should have same sign\", v.Months, v.Days, v.Nanoseconds)\n\t}\n\treturn encVintMonthsDaysNanos(v.Months, v.Days, v.Nanoseconds), nil\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Int64:\n\t\treturn encInt64(v.Int()), nil\n\tcase reflect.String:\n\t\tval := v.String()\n\t\tif val == \"\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\tdata, err := encString(val)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal duration: the (%T)(%[1]v) have invalid format, %v\", v, err)\n\t\t}\n\t\treturn data, nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal duration: unsupported value type (%T)(%[1]v), supported types: ~int64, ~string, time.Duration, gocql.Duration, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal duration: unsupported value type (%T)(%[1]v), supported types: ~int64, ~string, time.Duration, gocql.Duration, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encDur(v time.Duration) []byte {\n\tif v < nanoDayPos && v > nanoDayNeg {\n\t\treturn encNanos(encIntZigZagDur(v))\n\t}\n\tn := v % nanoDayPos\n\treturn encDaysNanos(encIntZigZag32(int32((v-n)/nanoDayPos)), encIntZigZagDur(n))\n}\n\nfunc encInt64(v int64) []byte {\n\tif v < nanoDayPos && v > nanoDayNeg {\n\t\treturn encNanos(encIntZigZag64(v))\n\t}\n\tn := v % nanoDayPos\n\treturn encDaysNanos(encIntZigZag32(int32((v-n)/nanoDayPos)), encIntZigZag64(n))\n}\n\nfunc encZigZagUint64Pos(v uint64) uint64 {\n\treturn v << 1\n}\n\nfunc encIntZigZag32(v int32) uint32 {\n\treturn uint32((v >> 31) ^ (v << 1))\n}\n\nfunc encIntZigZag64(v int64) uint64 {\n\treturn uint64((v >> 63) ^ (v << 1))\n}\n\nfunc encIntZigZagDur(v time.Duration) uint64 {\n\treturn uint64((v >> 63) ^ (v << 1))\n}\n\nfunc encVint32(v uint32) []byte {\n\tswitch {\n\tcase byte(v>>28) != 0:\n\t\treturn []byte{vintPrefix4, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>21) != 0:\n\t\treturn []byte{vintPrefix3 | byte(v>>24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>14) != 0:\n\t\treturn []byte{vintPrefix2 | byte(v>>16), byte(v >> 8), byte(v)}\n\tcase byte(v>>7) != 0:\n\t\treturn []byte{vintPrefix1 | byte(v>>8), byte(v)}\n\tdefault:\n\t\treturn []byte{byte(v)}\n\t}\n}\n\nfunc encVint64as32(v uint64) []byte {\n\tswitch {\n\tcase byte(v>>28) != 0:\n\t\treturn []byte{vintPrefix4, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>21) != 0:\n\t\treturn []byte{vintPrefix3 | byte(v>>24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>14) != 0:\n\t\treturn []byte{vintPrefix2 | byte(v>>16), byte(v >> 8), byte(v)}\n\tcase byte(v>>7) != 0:\n\t\treturn []byte{vintPrefix1 | byte(v>>8), byte(v)}\n\tdefault:\n\t\treturn []byte{byte(v)}\n\t}\n}\n\nfunc encVint64(v uint64) []byte {\n\tswitch {\n\tcase byte(v>>56) != 0:\n\t\treturn []byte{vintPrefix8, byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>49) != 0:\n\t\treturn []byte{vintPrefix7 | byte(v>>56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>42) != 0:\n\t\treturn []byte{vintPrefix6 | byte(v>>48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>35) != 0:\n\t\treturn []byte{vintPrefix5 | byte(v>>40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>28) != 0:\n\t\treturn []byte{vintPrefix4 | byte(v>>32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>21) != 0:\n\t\treturn []byte{vintPrefix3 | byte(v>>24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>14) != 0:\n\t\treturn []byte{vintPrefix2 | byte(v>>16), byte(v >> 8), byte(v)}\n\tcase byte(v>>7) != 0:\n\t\treturn []byte{vintPrefix1 | byte(v>>8), byte(v)}\n\tdefault:\n\t\treturn []byte{byte(v)}\n\t}\n}\n\nfunc encVintMonthsDaysNanos(m, d int32, n int64) []byte {\n\tif m == 0 {\n\t\tif d == 0 {\n\t\t\treturn encNanos(encIntZigZag64(n))\n\t\t}\n\t\treturn append(encDays(encIntZigZag32(d)), encVint64(encIntZigZag64(n))...)\n\t}\n\treturn append(append(encVint32(encIntZigZag32(m)), encVint32(encIntZigZag32(d))...), encVint64(encIntZigZag64(n))...)\n}\n\nfunc encVintMonthsDaysNanosPos(m, d, n uint64) []byte {\n\tif m == 0 {\n\t\tif d == 0 {\n\t\t\treturn encNanos(encZigZagUint64Pos(n))\n\t\t}\n\t\treturn append(encDays64(encZigZagUint64Pos(d)), encVint64(encZigZagUint64Pos(n))...)\n\t}\n\treturn append(append(encVint64as32(encZigZagUint64Pos(m)), encVint64as32(encZigZagUint64Pos(d))...), encVint64(encZigZagUint64Pos(n))...)\n}\n\nfunc encVintMonthsDaysNanosNeg(m, d, n uint64) []byte {\n\tif m == 0 {\n\t\tif d == 0 {\n\t\t\treturn encNanos(encIntZigZag64(int64(-n)))\n\t\t}\n\t\treturn append(encDays(encIntZigZag32(int32(-d))), encVint64(encIntZigZag64(int64(-n)))...)\n\t}\n\treturn append(append(encVint32(encIntZigZag32(int32(-m))), encVint32(encIntZigZag32(int32(-d)))...), encVint64(encIntZigZag64(int64(-n)))...)\n}\n\nfunc encDaysNanos(d uint32, n uint64) []byte {\n\treturn append(encDays(d), encVint64(n)...)\n}\n\nfunc encDays(v uint32) []byte {\n\tswitch {\n\tcase byte(v>>28) != 0:\n\t\treturn []byte{0, vintPrefix4, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>21) != 0:\n\t\treturn []byte{0, vintPrefix3 | byte(v>>24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>14) != 0:\n\t\treturn []byte{0, vintPrefix2 | byte(v>>16), byte(v >> 8), byte(v)}\n\tcase byte(v>>7) != 0:\n\t\treturn []byte{0, vintPrefix1 | byte(v>>8), byte(v)}\n\tdefault:\n\t\treturn []byte{0, byte(v)}\n\t}\n}\n\nfunc encDays64(v uint64) []byte {\n\tswitch {\n\tcase byte(v>>28) != 0:\n\t\treturn []byte{0, vintPrefix4, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>21) != 0:\n\t\treturn []byte{0, vintPrefix3 | byte(v>>24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>14) != 0:\n\t\treturn []byte{0, vintPrefix2 | byte(v>>16), byte(v >> 8), byte(v)}\n\tcase byte(v>>7) != 0:\n\t\treturn []byte{0, vintPrefix1 | byte(v>>8), byte(v)}\n\tdefault:\n\t\treturn []byte{0, byte(v)}\n\t}\n}\n\nfunc encNanos(v uint64) []byte {\n\tswitch {\n\tcase byte(v>>56) != 0:\n\t\treturn []byte{0, 0, vintPrefix8, byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>49) != 0:\n\t\treturn []byte{0, 0, vintPrefix7 | byte(v>>56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>42) != 0:\n\t\treturn []byte{0, 0, vintPrefix6 | byte(v>>48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>35) != 0:\n\t\treturn []byte{0, 0, vintPrefix5 | byte(v>>40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>28) != 0:\n\t\treturn []byte{0, 0, vintPrefix4 | byte(v>>32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>21) != 0:\n\t\treturn []byte{0, 0, vintPrefix3 | byte(v>>24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>14) != 0:\n\t\treturn []byte{0, 0, vintPrefix2 | byte(v>>16), byte(v >> 8), byte(v)}\n\tcase byte(v>>7) != 0:\n\t\treturn []byte{0, 0, vintPrefix1 | byte(v>>8), byte(v)}\n\tdefault:\n\t\treturn []byte{0, 0, byte(v)}\n\t}\n}\n"
  },
  {
    "path": "serialization/duration/marshal_vint_test.go",
    "content": "package duration\n\nimport (\n\t\"bytes\"\n\t\"math\"\n\t\"math/bits\"\n\t\"testing\"\n)\n\nfunc TestEncVint32(t *testing.T) {\n\tfor i := int32(math.MaxInt32); i != 1; i = i / 2 {\n\t\ttestEnc32(t, i)\n\t\ttestEnc32(t, -i-1)\n\t}\n}\n\nfunc TestEncVint64(t *testing.T) {\n\tfor i := int64(math.MaxInt64); i != 1; i = i / 2 {\n\t\ttestEnc64(t, i)\n\t\ttestEnc64(t, -i-1)\n\t}\n}\n\nfunc testEnc32(t *testing.T, v int32) {\n\tt.Helper()\n\texpected := genVintData(int64(v))\n\treceived := encVint32(encIntZigZag32(v))\n\n\tif !bytes.Equal(expected, received) {\n\t\tt.Fatalf(\"expected and recieved data not equal\\nvalue:%d\\ndata expected:%b\\ndata received:%b\", v, expected, received)\n\t}\n}\n\nfunc testEnc64(t *testing.T, v int64) {\n\tt.Helper()\n\texpected := genVintData(v)\n\treceived := encVint64(encIntZigZag64(v))\n\n\tif !bytes.Equal(expected, received) {\n\t\tt.Fatalf(\"expected and recieved data not equal\\nvalue:%d\\ndata expected:%b\\ndata received:%b\", v, expected, received)\n\t}\n}\n\nfunc genVintData(v int64) []byte {\n\tvEnc := encIntZigZag64(v)\n\tlead0 := bits.LeadingZeros64(vEnc)\n\tnumBytes := (639 - lead0*9) >> 6\n\n\t// It can be 1 or 0 is v ==0\n\tif numBytes <= 1 {\n\t\treturn []byte{byte(vEnc)}\n\t}\n\textraBytes := numBytes - 1\n\tvar buf = make([]byte, numBytes)\n\tfor i := extraBytes; i >= 0; i-- {\n\t\tbuf[i] = byte(vEnc)\n\t\tvEnc >>= 8\n\t}\n\tbuf[0] |= byte(^(0xff >> uint(extraBytes)))\n\treturn buf\n}\n"
  },
  {
    "path": "serialization/duration/unmarshal.go",
    "content": "package duration\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\n\tcase *int64:\n\t\treturn DecInt64(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\tcase *time.Duration:\n\t\treturn DecDur(data, v)\n\tcase *Duration:\n\t\treturn DecDuration(data, v)\n\n\tcase **int64:\n\t\treturn DecInt64R(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tcase **time.Duration:\n\t\treturn DecDurR(data, v)\n\tcase **Duration:\n\t\treturn DecDurationR(data, v)\n\tdefault:\n\n\t\t// Custom types (type MyDate uint32) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: unsupported value type (%T)(%[1]v), supported types: ~int64, ~string, time.Duration, gocql.Duration\", value)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/duration/unmarshal_str.go",
    "content": "package duration\n\nfunc decString(m, d int32, n int64) string {\n\tvar neg bool\n\tvar tmp uint64\n\tout := new([50]byte) // max string \"-178956970y8mo306783378w2d2562047h47m16.854775808s\"\n\tpos := 49\n\tif m < 0 || d < 0 || n < 0 {\n\t\tneg = true\n\t}\n\tif n != 0 {\n\t\ttmp = uint64(n)\n\t\tif neg {\n\t\t\ttmp = -tmp\n\t\t}\n\t\tpos = printNanoseconds(out, tmp, pos)\n\t}\n\tif d != 0 {\n\t\ttmp = uint64(d)\n\t\tif neg {\n\t\t\ttmp = -tmp\n\t\t}\n\t\tpos = printDays(out, tmp, pos)\n\t}\n\tif m != 0 {\n\t\ttmp = uint64(m)\n\t\tif neg {\n\t\t\ttmp = -tmp\n\t\t}\n\t\tpos = printMonths(out, tmp, pos)\n\t}\n\tif pos == 49 {\n\t\treturn zeroDuration\n\t}\n\tif neg {\n\t\tout[pos] = '-'\n\t\tpos--\n\t}\n\treturn string(out[pos+1:])\n}\n\nfunc printMonths(out *[50]byte, m uint64, pos int) int {\n\tmonths := m % 12\n\tif months == 0 {\n\t\tm--\n\t\tmonths = 12\n\t}\n\tout[pos] = 'o'\n\tpos--\n\tout[pos] = 'm'\n\tpos--\n\tpos = printInt(out, months, pos)\n\tif m > 12 { // print years\n\t\tout[pos] = 'y'\n\t\tpos--\n\t\tpos = printInt(out, m/12, pos)\n\t}\n\treturn pos\n}\n\nfunc printDays(out *[50]byte, d uint64, pos int) int {\n\tdays := d % 7\n\tif days == 0 {\n\t\td--\n\t\tdays = 7\n\t}\n\tout[pos] = 'd' // print days\n\tpos--\n\tout[pos] = byte(days) + '0'\n\tpos--\n\tif d > 7 { // print weeks\n\t\tout[pos] = 'w'\n\t\tpos--\n\t\tpos = printInt(out, d/7, pos)\n\t}\n\treturn pos\n}\n\nfunc printNanoseconds(out *[50]byte, n uint64, pos int) int {\n\tif n < second {\n\t\t// Special case: if nanoseconds is smaller than a second,\n\t\t// use smaller units, like 1.2ms\n\t\tdotPos := 0\n\t\tout[pos] = 's'\n\t\tpos--\n\t\tswitch {\n\t\tcase n < microsecond: // case for nanoseconds\n\t\t\tout[pos] = 'n'\n\t\t\tpos--\n\t\tcase n < millisecond: // case for microseconds\n\t\t\tcopy(out[pos-1:], \"µ\") // U+00B5 'µ' micro sign == 0xC2 0xB5\n\t\t\tpos -= 2\n\t\t\tif n%microsecond == 0 {\n\t\t\t\tn /= microsecond\n\t\t\t} else {\n\t\t\t\tdotPos = 3\n\t\t\t}\n\t\tdefault: // case for milliseconds\n\t\t\tout[pos] = 'm'\n\t\t\tpos--\n\t\t\tif n%millisecond == 0 {\n\t\t\t\tn /= millisecond\n\t\t\t} else {\n\t\t\t\tdotPos = 6\n\t\t\t}\n\t\t}\n\t\tif dotPos == 0 {\n\t\t\treturn printInt(out, n, pos)\n\t\t}\n\t\treturn printIntFrac(out, n, dotPos, pos)\n\t}\n\tif s := n % 60000000000; s != 0 { // case for seconds\n\t\tout[pos] = 's'\n\t\tpos--\n\t\tpos = printIntFrac(out, s, 9, pos)\n\t}\n\tif n >= 60000000000 {\n\t\tn /= 60000000000           // n is now integer minutes\n\t\tif mn := n % 60; mn != 0 { // case for minutes\n\t\t\tout[pos] = 'm'\n\t\t\tpos--\n\t\t\tpos = printInt(out, mn, pos)\n\t\t}\n\t\tif n >= 60 {\n\t\t\tout[pos] = 'h'\n\t\t\tpos--\n\t\t\tpos = printInt(out, n/60, pos) // n is now integer hours\n\t\t}\n\t}\n\treturn pos\n}\n\nfunc printIntFrac(out *[50]byte, n uint64, dotPos, pos int) int {\n\tstart := false\n\tfor i := 0; n > 0; i++ {\n\t\tdigit := n % 10\n\t\tif start && i == dotPos {\n\t\t\tout[pos] = '.'\n\t\t\tpos--\n\t\t}\n\t\tstart = start || digit != 0 || i == dotPos\n\t\tif start {\n\t\t\tout[pos] = byte(digit) + '0'\n\t\t\tpos--\n\t\t}\n\t\tn /= 10\n\t}\n\treturn pos\n}\n\nfunc printInt(out *[50]byte, n uint64, pos int) int {\n\tswitch {\n\tcase n >= 100:\n\t\tfor n > 0 {\n\t\t\tout[pos] = byte(n%10) + '0'\n\t\t\tn /= 10\n\t\t\tpos--\n\t\t}\n\tcase n >= 10:\n\t\tout[pos] = byte(n%10) + '0'\n\t\tpos--\n\t\tout[pos] = byte(n/10) + '0'\n\t\tpos--\n\tdefault:\n\t\tout[pos] = byte(n) + '0'\n\t\tpos--\n\t}\n\treturn pos\n}\n"
  },
  {
    "path": "serialization/duration/unmarshal_str_test.go",
    "content": "package duration\n\nimport (\n\t\"math\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestDecStr(t *testing.T) {\n\tfor n := int64(math.MaxInt64); n != 1; n = n / 2 {\n\t\tm, d := int32(n), int32(n)\n\t\tif n > math.MaxInt32 {\n\t\t\tm, d = math.MaxInt32, math.MaxInt32\n\t\t}\n\t\ttestDecString(t, m, d, n)\n\t\ttestDecString(t, 0, d, n)\n\t\ttestDecString(t, m, 0, n)\n\t\ttestDecString(t, m, d, 0)\n\t}\n\n\tfor n := int64(math.MinInt64); n != -1; n = n / 2 {\n\t\tm, d := int32(n), int32(n)\n\t\tif n < math.MinInt32 {\n\t\t\tm, d = math.MinInt32, math.MinInt32\n\t\t}\n\t\ttestDecString(t, m, d, n)\n\t\ttestDecString(t, 0, d, n)\n\t\ttestDecString(t, m, 0, n)\n\t\ttestDecString(t, m, d, 0)\n\t}\n}\n\nfunc testDecString(t *testing.T, m, d int32, n int64) {\n\tt.Helper()\n\texpected := getTestString(m, d, n)\n\treceived := decString(m, d, n)\n\n\tif expected != received {\n\t\tt.Fatalf(\"expected and recieved strings not equal\\nvalue:m:%d,d:%d,n:%d\\nexpected:%s\\nreceived:%s\", m, d, n, expected, received)\n\t}\n}\n\nfunc getTestString(m, d int32, n int64) string {\n\tout := \"\"\n\tif m < 0 || d < 0 || n < 0 {\n\t\tout += \"-\"\n\t}\n\tif m != 0 {\n\t\tout += getStringMonths(m)\n\t}\n\tif d != 0 {\n\t\tout += getStringDays(d)\n\t}\n\tif n != 0 {\n\t\tout += getStringNanos(n)\n\t}\n\tif out == \"\" {\n\t\treturn zeroDuration\n\t}\n\treturn out\n}\n\nfunc getStringMonths(m int32) string {\n\tout := \"\"\n\tmu := uint64(m)\n\tif m < 0 {\n\t\tmu = -mu\n\t}\n\ty := mu / 12\n\tif mu = mu % 12; mu == 0 {\n\t\ty--\n\t\tmu = 12\n\t}\n\tif y != 0 {\n\t\tout += strconv.FormatUint(y, 10) + \"y\"\n\t}\n\tout += strconv.FormatUint(mu, 10) + \"mo\"\n\treturn out\n}\n\nfunc getStringDays(d int32) string {\n\tout := \"\"\n\tdu := uint64(d)\n\tif d < 0 {\n\t\tdu = -du\n\t}\n\tw := du / 7\n\tif du = du % 7; du == 0 {\n\t\tw--\n\t\tdu = 7\n\t}\n\tif w != 0 {\n\t\tout += strconv.FormatUint(w, 10) + \"w\"\n\t}\n\tout += strconv.FormatUint(du, 10) + \"d\"\n\treturn out\n}\n\nfunc getStringNanos(d int64) string {\n\tout := time.Duration(d).String()\n\tif d < 0 {\n\t\tout = out[1:]\n\t}\n\treturn out\n}\n"
  },
  {
    "path": "serialization/duration/unmarshal_utils.go",
    "content": "package duration\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"time\"\n)\n\nconst (\n\tmaxDays      = (math.MaxInt64 - math.MaxInt64%nanoDayPos) / nanoDayPos\n\tminDays      = -maxDays\n\tmaxDaysNanos = maxDays * nanoDayPos\n\tminDaysNanos = minDays * nanoDayPos\n\tzeroDuration = \"0s\"\n)\n\nvar (\n\terrWrongDataLen = fmt.Errorf(\"failed to unmarshal duration: the length of the data should be 0 or 3-19\")\n\terrBrokenData   = fmt.Errorf(\"failed to unmarshal duration: the data is broken\")\n\terrInvalidSign  = fmt.Errorf(\"failed to unmarshal duration: the data values of months, days and nanoseconds should have the same sign\")\n)\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal duration: can not unmarshal into nil reference (%T)(%[1]v))\", v)\n}\n\nfunc DecInt64(p []byte, v *int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\t*v = 0\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (int64) the months value should be 0\")\n\t\t}\n\t\tif p[1] == 0 {\n\t\t\tvar ok bool\n\t\t\tif *v, ok = decNanos64(p); !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t} else {\n\t\t\td, n, ok := decDaysNanos64(p)\n\t\t\tif !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t\tif !validSignDateNanos(d, n) {\n\t\t\t\treturn errInvalidSign\n\t\t\t}\n\t\t\tif *v, ok = daysToNanos(d, n); !ok {\n\t\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (int64) the data value should be in int64 range\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DecInt64R(p []byte, v **int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int64)\n\t\t}\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (*int64) the months value should be 0\")\n\t\t}\n\t\tif p[1] == 0 {\n\t\t\tn, ok := decNanos64(p)\n\t\t\tif !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t\t*v = &n\n\t\t} else {\n\t\t\td, n, ok := decDaysNanos64(p)\n\t\t\tif !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t\tif !validSignDateNanos(d, n) {\n\t\t\t\treturn errInvalidSign\n\t\t\t}\n\t\t\tif n, ok = daysToNanos(d, n); !ok {\n\t\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (*int64) the data value should be in int64 range\")\n\t\t\t}\n\t\t\t*v = &n\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = zeroDuration\n\t\t}\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tm, d, n, ok := decDuration(p)\n\t\tif !ok {\n\t\t\treturn errBrokenData\n\t\t}\n\t\tif !validDuration(m, d, n) {\n\t\t\treturn errInvalidSign\n\t\t}\n\t\t*v = decString(m, d, n)\n\t}\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\tval := zeroDuration\n\t\t\t*v = &val\n\t\t}\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tvar val string\n\t\tm, d, n, ok := decDuration(p)\n\t\tif !ok {\n\t\t\treturn errBrokenData\n\t\t}\n\t\tif !validDuration(m, d, n) {\n\t\t\treturn errInvalidSign\n\t\t}\n\t\tval = decString(m, d, n)\n\t\t*v = &val\n\t}\n\treturn nil\n}\n\nfunc DecDur(p []byte, v *time.Duration) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\t*v = 0\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (time.Duration) the months value should be 0\")\n\t\t}\n\t\tif p[1] == 0 {\n\t\t\tvar ok bool\n\t\t\tif *v, ok = decNanosDur(p); !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t} else {\n\t\t\td, n, ok := decDaysNanosDur(p)\n\t\t\tif !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t\tif !validDateNanosDur(d, n) {\n\t\t\t\treturn errInvalidSign\n\t\t\t}\n\t\t\tif n, ok = daysToNanosDur(d, n); !ok {\n\t\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (time.Duration) the data value should be in int64 range\")\n\t\t\t}\n\t\t\t*v = n\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DecDurR(p []byte, v **time.Duration) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(time.Duration)\n\t\t}\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (*time.Duration) the months value should be 0\")\n\t\t}\n\t\tif p[1] == 0 {\n\t\t\tn, ok := decNanosDur(p)\n\t\t\tif !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t\t*v = &n\n\t\t} else {\n\t\t\td, n, ok := decDaysNanosDur(p)\n\t\t\tif !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t\tif !validDateNanosDur(d, n) {\n\t\t\t\treturn errInvalidSign\n\t\t\t}\n\t\t\tif n, ok = daysToNanosDur(d, n); !ok {\n\t\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (*time.Duration) the data value should be in int64 range\")\n\t\t\t}\n\t\t\t*v = &n\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DecDuration(p []byte, v *Duration) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\t*v = Duration{}\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tvar ok bool\n\t\tv.Months, v.Days, v.Nanoseconds, ok = decVints(p)\n\t\tif !ok {\n\t\t\treturn errBrokenData\n\t\t}\n\t\tif !v.Valid() {\n\t\t\treturn errInvalidSign\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DecDurationR(p []byte, v **Duration) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(Duration)\n\t\t}\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tvar ok bool\n\t\tvar val Duration\n\t\tval.Months, val.Days, val.Nanoseconds, ok = decVints(p)\n\t\tif !ok {\n\t\t\treturn errBrokenData\n\t\t}\n\t\tif !val.Valid() {\n\t\t\treturn errInvalidSign\n\t\t}\n\t\t*v = &val\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal duration: can not unmarshal into nil reference (%T)(%[1]v))\", v.Interface())\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Int64:\n\t\treturn decReflectInt64(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal duration: unsupported value type (%T)(%[1]v), supported types: ~int64, ~string, time.Duration, gocql.Duration\", v.Interface())\n\t}\n}\n\nfunc decReflectInt64(p []byte, v reflect.Value) error {\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\tv.SetInt(0)\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (%T) the months value should be 0\", v.Interface())\n\t\t}\n\t\tif p[1] == 0 {\n\t\t\tn, ok := decNanos64(p)\n\t\t\tif !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t\tv.SetInt(n)\n\t\t} else {\n\t\t\td, n, ok := decDaysNanos64(p)\n\t\t\tif !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t\tif !validSignDateNanos(d, n) {\n\t\t\t\treturn errInvalidSign\n\t\t\t}\n\t\t\tif n, ok = daysToNanos(d, n); !ok {\n\t\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (%T) the data value should be in int64 range\", v.Interface())\n\t\t\t}\n\t\t\tv.SetInt(n)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(zeroDuration)\n\t\t}\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tm, d, n, ok := decDuration(p)\n\t\tif !ok {\n\t\t\treturn errBrokenData\n\t\t}\n\t\tif !validDuration(m, d, n) {\n\t\t\treturn errInvalidSign\n\t\t}\n\t\tv.SetString(decString(m, d, n))\n\t}\n\treturn nil\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal duration: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Int64:\n\t\treturn decReflectInt64R(p, v)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal duration: unsupported value type (%T)(%[1]v), supported types: ~int64, ~string, time.Duration, gocql.Duration\", v.Interface())\n\t}\n}\n\nfunc decReflectInt64R(p []byte, v reflect.Value) error {\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t}\n\t\tv.Elem().Set(val)\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (%T) the months value should be 0\", v.Interface())\n\t\t}\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tif p[1] == 0 {\n\t\t\tn, ok := decNanos64(p)\n\t\t\tif !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t\tval.Elem().SetInt(n)\n\t\t} else {\n\t\t\td, n, ok := decDaysNanos64(p)\n\t\t\tif !ok {\n\t\t\t\treturn errBrokenData\n\t\t\t}\n\t\t\tif !validSignDateNanos(d, n) {\n\t\t\t\treturn errInvalidSign\n\t\t\t}\n\t\t\tif n, ok = daysToNanos(d, n); !ok {\n\t\t\t\treturn fmt.Errorf(\"failed to unmarshal duration: to unmarshal into (%T) the data value should be in int64 range\", v.Interface())\n\t\t\t}\n\t\t\tval.Elem().SetInt(n)\n\t\t}\n\t\tv.Elem().Set(val)\n\t}\n\treturn nil\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch l := len(p); {\n\tcase l == 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetString(zeroDuration)\n\t\t}\n\t\tv.Elem().Set(val)\n\tcase l < 3:\n\t\treturn errWrongDataLen\n\tdefault:\n\t\tm, d, n, ok := decDuration(p)\n\t\tif !ok {\n\t\t\treturn errBrokenData\n\t\t}\n\t\tif !validDuration(m, d, n) {\n\t\t\treturn errInvalidSign\n\t\t}\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetString(decString(m, d, n))\n\t\tv.Elem().Set(val)\n\t}\n\treturn nil\n}\n\nfunc validSignDateNanos(d int64, n int64) bool {\n\tif d >= 0 && n >= 0 {\n\t\treturn true\n\t}\n\tif d <= 0 && n <= 0 {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc daysToNanos(d int64, n int64) (int64, bool) {\n\tif d > maxDays || d < minDays {\n\t\treturn 0, false\n\t}\n\td *= nanoDayPos\n\tif (d > 0 && math.MaxInt64-d < n) || (d < 0 && math.MinInt64-d > n) {\n\t\treturn 0, false\n\t}\n\treturn n + d, true\n}\n\nfunc daysToNanosDur(d time.Duration, n time.Duration) (time.Duration, bool) {\n\tif d > maxDays || d < minDays {\n\t\treturn 0, false\n\t}\n\td *= nanoDayPos\n\tif (d > 0 && math.MaxInt64-d < n) || (d < 0 && math.MinInt64-d > n) {\n\t\treturn 0, false\n\t}\n\treturn n + d, true\n}\n\nfunc validDateNanosDur(d time.Duration, n time.Duration) bool {\n\tif d >= 0 && n >= 0 {\n\t\treturn true\n\t}\n\tif d <= 0 && n <= 0 {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc decDuration(p []byte) (m int32, d int32, n int64, ok bool) {\n\tif p[0] != 0 {\n\t\tm, d, n, ok = decVints(p)\n\t} else if p[1] != 0 {\n\t\td, n, ok = decDaysNanos(p)\n\t} else {\n\t\tn, ok = decNanos64(p)\n\t}\n\treturn\n}\n\nfunc decVints(p []byte) (int32, int32, int64, bool) {\n\tm, read := decVint32(p, 0)\n\tif read == 0 {\n\t\treturn 0, 0, 0, false\n\t}\n\td, read := decVint32(p, read)\n\tif read == 0 {\n\t\treturn 0, 0, 0, false\n\t}\n\tn, read := decVint64(p, read)\n\tif read == 0 {\n\t\treturn 0, 0, 0, false\n\t}\n\treturn decZigZag32(m), decZigZag32(d), decZigZag64(n), true\n}\n\nfunc decDaysNanos(p []byte) (int32, int64, bool) {\n\td, read := decVint32(p, 1)\n\tif read == 0 {\n\t\treturn 0, 0, false\n\t}\n\tn, read := decVint64(p, read)\n\tif read == 0 {\n\t\treturn 0, 0, false\n\t}\n\treturn decZigZag32(d), decZigZag64(n), true\n}\n\nfunc decDaysNanos64(p []byte) (int64, int64, bool) {\n\td, read := decVint3264(p, 1)\n\tif read == 0 {\n\t\treturn 0, 0, false\n\t}\n\tn, read := decVint64(p, read)\n\tif read == 0 {\n\t\treturn 0, 0, false\n\t}\n\treturn decZigZag64(d), decZigZag64(n), true\n}\n\nfunc decNanos64(p []byte) (int64, bool) {\n\tn, read := decVint64(p, 2)\n\tif read == 0 {\n\t\treturn 0, false\n\t}\n\treturn decZigZag64(n), true\n}\n\nfunc decNanosDur(p []byte) (time.Duration, bool) {\n\tn, read := decVint64(p, 2)\n\tif read == 0 {\n\t\treturn 0, false\n\t}\n\treturn decZigZagDur(n), true\n}\n\nfunc decDaysNanosDur(p []byte) (time.Duration, time.Duration, bool) {\n\td, read := decVint3264(p, 1)\n\tif read == 0 {\n\t\treturn 0, 0, false\n\t}\n\tn, read := decVint64(p, read)\n\tif read == 0 {\n\t\treturn 0, 0, false\n\t}\n\treturn decZigZagDur(d), decZigZagDur(n), true\n}\n\nfunc decVint64(p []byte, s int) (uint64, int) {\n\tvintLen := decVintLen(p[s:])\n\tif vintLen+s != len(p) {\n\t\treturn 0, 0\n\t}\n\tswitch vintLen {\n\tcase 9:\n\t\treturn dec9Vint64(p[s:]), s + 9\n\tcase 8:\n\t\treturn dec8Vint64(p[s:]), s + 8\n\tcase 7:\n\t\treturn dec7Vint64(p[s:]), s + 7\n\tcase 6:\n\t\treturn dec6Vint64(p[s:]), s + 6\n\tcase 5:\n\t\treturn dec5Vint64(p[s:]), s + 5\n\tcase 4:\n\t\treturn dec4Vint64(p[s:]), s + 4\n\tcase 3:\n\t\treturn dec3Vint64(p[s:]), s + 3\n\tcase 2:\n\t\treturn dec2Vint64(p[s:]), s + 2\n\tcase 1:\n\t\treturn dec1Vint64(p[s:]), s + 1\n\tcase 0:\n\t\treturn 0, s + 1\n\tdefault:\n\t\treturn 0, 0\n\t}\n}\n\nfunc decVint32(p []byte, s int) (uint32, int) {\n\tvintLen := decVintLen(p[s:])\n\tif vintLen+s >= len(p) {\n\t\treturn 0, 0\n\t}\n\tswitch vintLen {\n\tcase 5:\n\t\tif p[s] != vintPrefix4 {\n\t\t\treturn 0, 0\n\t\t}\n\t\treturn dec5Vint32(p[s:]), s + 5\n\tcase 4:\n\t\treturn dec4Vint32(p[s:]), s + 4\n\tcase 3:\n\t\treturn dec3Vint32(p[s:]), s + 3\n\tcase 2:\n\t\treturn dec2Vint32(p[s:]), s + 2\n\tcase 1:\n\t\treturn dec1Vint32(p[s:]), s + 1\n\tcase 0:\n\t\treturn 0, s + 1\n\tdefault:\n\t\treturn 0, 0\n\t}\n}\n\nfunc decVint3264(p []byte, s int) (uint64, int) {\n\tvintLen := decVintLen(p[s:])\n\tif vintLen+s >= len(p) {\n\t\treturn 0, 0\n\t}\n\tswitch vintLen {\n\tcase 5:\n\t\tif p[s] != vintPrefix4 {\n\t\t\treturn 0, 0\n\t\t}\n\t\treturn dec5Vint64(p[s:]), s + 5\n\tcase 4:\n\t\treturn dec4Vint64(p[s:]), s + 4\n\tcase 3:\n\t\treturn dec3Vint64(p[s:]), s + 3\n\tcase 2:\n\t\treturn dec2Vint64(p[s:]), s + 2\n\tcase 1:\n\t\treturn dec1Vint64(p[s:]), s + 1\n\tcase 0:\n\t\treturn 0, s + 1\n\tdefault:\n\t\treturn 0, 0\n\t}\n}\n\nfunc decVintLen(p []byte) int {\n\tswitch {\n\tcase p[0] == 255:\n\t\treturn 9\n\tcase p[0]>>1 == 127:\n\t\treturn 8\n\tcase p[0]>>2 == 63:\n\t\treturn 7\n\tcase p[0]>>3 == 31:\n\t\treturn 6\n\tcase p[0]>>4 == 15:\n\t\treturn 5\n\tcase p[0]>>5 == 7:\n\t\treturn 4\n\tcase p[0]>>6 == 3:\n\t\treturn 3\n\tcase p[0]>>7 == 1:\n\t\treturn 2\n\tdefault:\n\t\treturn 1\n\t}\n}\n\nfunc decZigZag32(n uint32) int32 {\n\treturn int32((n >> 1) ^ -(n & 1))\n}\n\nfunc decZigZag64(n uint64) int64 {\n\treturn int64((n >> 1) ^ -(n & 1))\n}\n\nfunc decZigZagDur(n uint64) time.Duration {\n\treturn time.Duration((n >> 1) ^ -(n & 1))\n}\n\nfunc dec5Vint32(p []byte) uint32 {\n\treturn uint32(p[1])<<24 | uint32(p[2])<<16 | uint32(p[3])<<8 | uint32(p[4])\n}\n\nfunc dec4Vint32(p []byte) uint32 {\n\treturn uint32(p[0]&^vintPrefix3)<<24 | uint32(p[1])<<16 | uint32(p[2])<<8 | uint32(p[3])\n}\n\nfunc dec3Vint32(p []byte) uint32 {\n\treturn uint32(p[0]&^vintPrefix2)<<16 | uint32(p[1])<<8 | uint32(p[2])\n}\n\nfunc dec2Vint32(p []byte) uint32 {\n\treturn uint32(p[0]&^vintPrefix1)<<8 | uint32(p[1])\n}\n\nfunc dec1Vint32(p []byte) uint32 {\n\treturn uint32(p[0])\n}\n\nfunc dec9Vint64(p []byte) uint64 {\n\treturn uint64(p[1])<<56 | uint64(p[2])<<48 | uint64(p[3])<<40 | uint64(p[4])<<32 | uint64(p[5])<<24 | uint64(p[6])<<16 | uint64(p[7])<<8 | uint64(p[8])\n}\n\nfunc dec8Vint64(p []byte) uint64 {\n\treturn uint64(p[0]&^vintPrefix7)<<56 | uint64(p[1])<<48 | uint64(p[2])<<40 | uint64(p[3])<<32 | uint64(p[4])<<24 | uint64(p[5])<<16 | uint64(p[6])<<8 | uint64(p[7])\n}\n\nfunc dec7Vint64(p []byte) uint64 {\n\treturn uint64(p[0]&^vintPrefix6)<<48 | uint64(p[1])<<40 | uint64(p[2])<<32 | uint64(p[3])<<24 | uint64(p[4])<<16 | uint64(p[5])<<8 | uint64(p[6])\n}\n\nfunc dec6Vint64(p []byte) uint64 {\n\treturn uint64(p[0]&^vintPrefix5)<<40 | uint64(p[1])<<32 | uint64(p[2])<<24 | uint64(p[3])<<16 | uint64(p[4])<<8 | uint64(p[5])\n}\n\nfunc dec5Vint64(p []byte) uint64 {\n\treturn uint64(p[0]&^vintPrefix4)<<32 | uint64(p[1])<<24 | uint64(p[2])<<16 | uint64(p[3])<<8 | uint64(p[4])\n}\n\nfunc dec4Vint64(p []byte) uint64 {\n\treturn uint64(p[0]&^vintPrefix3)<<24 | uint64(p[1])<<16 | uint64(p[2])<<8 | uint64(p[3])\n}\n\nfunc dec3Vint64(p []byte) uint64 {\n\treturn uint64(p[0]&^vintPrefix2)<<16 | uint64(p[1])<<8 | uint64(p[2])\n}\n\nfunc dec2Vint64(p []byte) uint64 {\n\treturn uint64(p[0]&^vintPrefix1)<<8 | uint64(p[1])\n}\n\nfunc dec1Vint64(p []byte) uint64 {\n\treturn uint64(p[0])\n}\n"
  },
  {
    "path": "serialization/duration/unmarshal_vint_test.go",
    "content": "package duration\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\nfunc TestDecVint32(t *testing.T) {\n\tfor i := int32(math.MaxInt32); i != 1; i = i / 2 {\n\t\ttestDec32(t, i)\n\t\ttestDec32(t, -i-1)\n\t}\n}\n\nfunc TestDecVint64(t *testing.T) {\n\tfor i := int64(math.MaxInt64); i != 1; i = i / 2 {\n\t\ttestDec64(t, i)\n\t\ttestDec64(t, -i-1)\n\t}\n}\n\nfunc testDec32(t *testing.T, expected int32) {\n\tt.Helper()\n\t// appending one byte is necessary because the `decVint32` function looks at the length of the data for the next vint len read.\n\tdata := append(genVintData(int64(expected)), 0)\n\n\tvint, read := decVint32(data, 0)\n\tif read == 0 {\n\t\tt.Fatalf(\"decVint32 function can`t read vint data: value %d, data %b\", expected, data)\n\t}\n\n\treceived := decZigZag32(vint)\n\tif expected != received {\n\t\tt.Fatalf(\"\\nexpected:%d\\nreceived:%d\\ndata:%b\", expected, received, data)\n\t}\n}\n\nfunc testDec64(t *testing.T, expected int64) {\n\tt.Helper()\n\tdata := genVintData(int64(expected))\n\n\tvint, read := decVint64(data, 0)\n\tif read == 0 {\n\t\tt.Fatalf(\"decVint64 function can`t read vint data: value %d, data %b\", expected, data)\n\t}\n\n\treceived := decZigZag64(vint)\n\tif expected != received {\n\t\tt.Fatalf(\"\\nexpected:%d\\nreceived:%d\\ndata:%b\", expected, received, data)\n\t}\n}\n"
  },
  {
    "path": "serialization/float/marshal.go",
    "content": "package float\n\nimport (\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase float32:\n\t\treturn EncFloat32(v)\n\tcase *float32:\n\t\treturn EncFloat32R(v)\n\tdefault:\n\t\t// Custom types (type MyFloat float32) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/float/marshal_utils.go",
    "content": "package float\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nfunc EncFloat32(v float32) ([]byte, error) {\n\treturn encFloat32(v), nil\n}\n\nfunc EncFloat32R(v *float32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encFloat32R(v), nil\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Float32:\n\t\treturn encFloat32(float32(v.Float())), nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal float: unsupported value type (%T)(%[1]v), supported types: ~float32, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal float: unsupported value type (%T)(%[1]v), supported types: ~float32, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encFloat32(v float32) []byte {\n\treturn encUint32(floatToUint(v))\n}\n\nfunc encFloat32R(v *float32) []byte {\n\treturn encUint32(floatToUintR(v))\n}\n\nfunc encUint32(v uint32) []byte {\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n\nfunc floatToUint(v float32) uint32 {\n\treturn *(*uint32)(unsafe.Pointer(&v))\n}\n\nfunc floatToUintR(v *float32) uint32 {\n\treturn *(*uint32)(unsafe.Pointer(v))\n}\n"
  },
  {
    "path": "serialization/float/unmarshal.go",
    "content": "package float\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *float32:\n\t\treturn DecFloat32(data, v)\n\tcase **float32:\n\t\treturn DecFloat32R(data, v)\n\tdefault:\n\t\t// Custom types (type MyFloat float32) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal float: unsupported value type (%T)(%[1]v), supported types: ~float32\", v)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/float/unmarshal_utils.go",
    "content": "package float\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nvar errWrongDataLen = fmt.Errorf(\"failed to unmarshal float: the length of the data should be 0 or 4\")\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal float: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecFloat32(p []byte, v *float32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 4:\n\t\t*v = decFloat32(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecFloat32R(p []byte, v **float32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(float32)\n\t\t}\n\tcase 4:\n\t\t*v = decFloat32R(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Float32:\n\t\treturn decReflectFloat32(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal float: unsupported value type (%T)(%[1]v), supported types: ~float32\", v.Interface())\n\t}\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Float32:\n\t\treturn decReflectFloat32R(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal float: unsupported value type (%T)(%[1]v), supported types: ~float32\", v.Interface())\n\t}\n}\n\nfunc decReflectFloat32(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetFloat(0)\n\tcase 4:\n\t\tv.SetFloat(float64(decFloat32(p)))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectFloat32R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 4:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetFloat(float64(decFloat32(p)))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectNullableR(p []byte, v reflect.Value) reflect.Value {\n\tif p == nil {\n\t\treturn reflect.Zero(v.Elem().Type())\n\t}\n\treturn reflect.New(v.Type().Elem().Elem())\n}\n\nfunc decFloat32(p []byte) float32 {\n\treturn uint32ToFloat(decUint32(p))\n}\n\nfunc decFloat32R(p []byte) *float32 {\n\treturn uint32ToFloatR(decUint32(p))\n}\n\nfunc uint32ToFloat(v uint32) float32 {\n\treturn *(*float32)(unsafe.Pointer(&v))\n}\n\nfunc uint32ToFloatR(v uint32) *float32 {\n\tf := *(*float32)(unsafe.Pointer(&v))\n\treturn &f\n}\n\nfunc decUint32(p []byte) uint32 {\n\treturn uint32(p[0])<<24 | uint32(p[1])<<16 | uint32(p[2])<<8 | uint32(p[3])\n}\n"
  },
  {
    "path": "serialization/inet/marshal.go",
    "content": "package inet\n\nimport (\n\t\"net\"\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase []byte:\n\t\treturn EncBytes(v)\n\tcase *[]byte:\n\t\treturn EncBytesR(v)\n\tcase net.IP:\n\t\treturn EncNetIP(v)\n\tcase *net.IP:\n\t\treturn EncNetIPr(v)\n\tcase [4]byte:\n\t\treturn EncArray4(v)\n\tcase *[4]byte:\n\t\treturn EncArray4R(v)\n\tcase [16]byte:\n\t\treturn EncArray16(v)\n\tcase *[16]byte:\n\t\treturn EncArray16R(v)\n\tcase string:\n\t\treturn EncString(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tdefault:\n\t\t// Custom types (type MyIP []byte) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/inet/marshal_utils.go",
    "content": "package inet\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"reflect\"\n)\n\nfunc EncBytes(v []byte) ([]byte, error) {\n\tswitch len(v) {\n\tcase 0:\n\t\tif v == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn make([]byte, 0), nil\n\tcase 4:\n\t\ttmp := make([]byte, 4)\n\t\tcopy(tmp, v)\n\t\treturn tmp, nil\n\tcase 16:\n\t\ttmp := make([]byte, 16)\n\t\tcopy(tmp, v)\n\t\treturn tmp, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal inet: the ([]byte) length can be 0,4,16\")\n\t}\n}\n\nfunc EncBytesR(v *[]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncBytes(*v)\n}\n\nfunc EncNetIP(v net.IP) ([]byte, error) {\n\tswitch len(v) {\n\tcase 0:\n\t\tif v == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn make([]byte, 0), nil\n\tcase 4, 16:\n\t\tt := v.To4()\n\t\tif t == nil {\n\t\t\treturn v.To16(), nil\n\t\t}\n\t\treturn t, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal inet: the (net.IP) length can be 0,4,16\")\n\t}\n}\n\nfunc EncNetIPr(v *net.IP) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncNetIP(*v)\n}\n\nfunc EncArray16(v [16]byte) ([]byte, error) {\n\ttmp := make([]byte, 16)\n\tcopy(tmp, v[:])\n\treturn tmp, nil\n}\n\nfunc EncArray16R(v *[16]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncArray16(*v)\n}\n\nfunc EncArray4(v [4]byte) ([]byte, error) {\n\ttmp := make([]byte, 4)\n\tcopy(tmp, v[:])\n\treturn tmp, nil\n}\n\nfunc EncArray4R(v *[4]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncArray4(*v)\n}\n\nfunc EncString(v string) ([]byte, error) {\n\tif len(v) == 0 {\n\t\treturn nil, nil\n\t}\n\tb := net.ParseIP(v)\n\tif b != nil {\n\t\tt := b.To4()\n\t\tif t == nil {\n\t\t\treturn b.To16(), nil\n\t\t}\n\t\treturn t, nil\n\t}\n\treturn nil, fmt.Errorf(\"failed to marshal inet: invalid IP string %s\", v)\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncString(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Array:\n\t\tif l := v.Len(); v.Type().Elem().Kind() != reflect.Uint8 || (l != 16 && l != 4) {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP, unsetColumn\", v.Interface())\n\t\t}\n\t\tnv := reflect.New(v.Type())\n\t\tnv.Elem().Set(v)\n\t\treturn nv.Elem().Bytes(), nil\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn encReflectBytes(v)\n\tcase reflect.String:\n\t\treturn encReflectString(v)\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\tswitch ev := v.Elem(); ev.Kind() {\n\tcase reflect.Array:\n\t\tif l := v.Len(); ev.Type().Elem().Kind() != reflect.Uint8 || (l != 16 && l != 4) {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn v.Elem().Bytes(), nil\n\tcase reflect.Slice:\n\t\tif ev.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn encReflectBytes(ev)\n\tcase reflect.String:\n\t\treturn encReflectString(ev)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc encReflectString(v reflect.Value) ([]byte, error) {\n\tval := v.String()\n\tif len(val) == 0 {\n\t\treturn nil, nil\n\t}\n\tb := net.ParseIP(val)\n\tif b != nil {\n\t\tt := b.To4()\n\t\tif t == nil {\n\t\t\treturn b.To16(), nil\n\t\t}\n\t\treturn t, nil\n\t}\n\treturn nil, fmt.Errorf(\"failed to marshal inet: invalid IP string (%T)(%[1]v)\", v.Interface())\n}\n\nfunc encReflectBytes(v reflect.Value) ([]byte, error) {\n\tval := v.Bytes()\n\tswitch len(val) {\n\tcase 0:\n\t\tif val == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn make([]byte, 0), nil\n\tcase 4:\n\t\ttmp := make([]byte, 4)\n\t\tcopy(tmp, val)\n\t\treturn tmp, nil\n\tcase 16:\n\t\ttmp := make([]byte, 16)\n\t\tcopy(tmp, val)\n\t\treturn tmp, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal inet: the (%T) length can be 0,4,16\", v.Interface())\n\t}\n}\n"
  },
  {
    "path": "serialization/inet/unmarshal.go",
    "content": "package inet\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *[]byte:\n\t\treturn DecBytes(data, v)\n\tcase **[]byte:\n\t\treturn DecBytesR(data, v)\n\tcase *net.IP:\n\t\treturn DecNetIP(data, v)\n\tcase **net.IP:\n\t\treturn DecNetIPr(data, v)\n\tcase *[4]byte:\n\t\treturn DecArray4(data, v)\n\tcase **[4]byte:\n\t\treturn DecArray4R(data, v)\n\tcase *[16]byte:\n\t\treturn DecArray16(data, v)\n\tcase **[16]byte:\n\t\treturn DecArray16R(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tdefault:\n\t\t// Custom types (type MyIP []byte) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP\", v)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/inet/unmarshal_utils.go",
    "content": "package inet\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net/netip\"\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nvar (\n\terrWrongDataLen = fmt.Errorf(\"failed to unmarshal inet: the length of the data can be 0,4,16\")\n\n\tdigits = getDigits()\n)\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal inet: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecBytes(p []byte, v *[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = make([]byte, 0)\n\t\t}\n\tcase 4:\n\t\t*v = make([]byte, 4)\n\t\tcopy(*v, p)\n\tcase 16:\n\t\t*v = make([]byte, 16)\n\t\tcopy(*v, p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBytesR(p []byte, v **[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\ttmp := make([]byte, 0)\n\t\t\t*v = &tmp\n\t\t}\n\tcase 4:\n\t\t*v = &[]byte{0, 0, 0, 0}\n\t\tcopy(**v, p)\n\tcase 16:\n\t\t*v = &[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}\n\t\tcopy(**v, p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecNetIP(p []byte, v *net.IP) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = make(net.IP, 0)\n\t\t}\n\tcase 4:\n\t\t*v = make(net.IP, 4)\n\t\tcopy(*v, p)\n\tcase 16:\n\t\t*v = make(net.IP, 16)\n\t\tcopy(*v, p)\n\t\tif v4 := v.To4(); v4 != nil {\n\t\t\t*v = v4\n\t\t}\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecNetIPr(p []byte, v **net.IP) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\ttmp := make(net.IP, 0)\n\t\t\t*v = &tmp\n\t\t}\n\tcase 4:\n\t\t*v = &net.IP{0, 0, 0, 0}\n\t\tcopy(**v, p)\n\tcase 16:\n\t\t*v = &net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}\n\t\tcopy(**v, p)\n\t\tif v4 := (*v).To4(); v4 != nil {\n\t\t\t**v = v4\n\t\t}\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecArray4(p []byte, v *[4]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = [4]byte{}\n\tcase 4:\n\t\t*v = [4]byte{}\n\t\tcopy(v[:], p)\n\tcase 16:\n\t\tif !isFist10Zeros(p) {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal inet: can not unmarshal ipV6 into [4]byte\")\n\t\t}\n\t\t*v = [4]byte{}\n\t\tcopy(v[:], p[12:16])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecArray4R(p []byte, v **[4]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = &[4]byte{}\n\t\t}\n\tcase 4:\n\t\t*v = &[4]byte{}\n\t\tcopy((*v)[:], p)\n\tcase 16:\n\t\tif !isFist10Zeros(p) {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal inet: can not unmarshal ipV6 into [4]byte\")\n\t\t}\n\t\t*v = &[4]byte{}\n\t\tcopy((*v)[:], p[12:16])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecArray16(p []byte, v *[16]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = [16]byte{}\n\tcase 4, 16:\n\t\t*v = [16]byte{}\n\t\tcopy(v[:], p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecArray16R(p []byte, v **[16]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = &[16]byte{}\n\t\t}\n\tcase 4, 16:\n\t\t*v = &[16]byte{}\n\t\tcopy((*v)[:], p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = \"0.0.0.0\"\n\t\t}\n\tcase 4:\n\t\t*v = decString4(p)\n\tcase 16:\n\t\t*v = decString16(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\ttmp := \"0.0.0.0\"\n\t\t\t*v = &tmp\n\t\t}\n\tcase 4:\n\t\ttmp := decString4(p)\n\t\t*v = &tmp\n\tcase 16:\n\t\ttmp := decString16(p)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Array:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP\", v.Interface())\n\t\t}\n\t\tswitch v.Len() {\n\t\tcase 4:\n\t\t\treturn decReflectArray4(p, v)\n\t\tcase 16:\n\t\t\treturn decReflectArray16(p, v)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"failed to unmarshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP\", v.Interface())\n\t\t}\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP\", v.Interface())\n\t\t}\n\t\treturn decReflectBytes(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP\", v.Interface())\n\t}\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tev := v.Elem()\n\tswitch evt := ev.Type().Elem(); evt.Kind() {\n\tcase reflect.Array:\n\t\tif evt.Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP\", v.Interface())\n\t\t}\n\t\tswitch ev.Len() {\n\t\tcase 4:\n\t\t\treturn decReflectArray4R(p, ev)\n\t\tcase 16:\n\t\t\treturn decReflectArray16R(p, ev)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"failed to unmarshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP\", v.Interface())\n\t\t}\n\tcase reflect.Slice:\n\t\tif evt.Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP\", v.Interface())\n\t\t}\n\t\treturn decReflectBytesR(p, ev)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, ev)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal inet: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[4]byte, ~[16]byte, ~string, net.IP\", v.Interface())\n\t}\n}\n\nfunc decReflectArray4(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetZero()\n\tcase 4:\n\t\tval := reflect.New(v.Type())\n\t\tcopy((*[4]byte)(val.UnsafePointer())[:], p)\n\t\tv.Set(val.Elem())\n\tcase 16:\n\t\tif !isFist10Zeros(p) {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal inet: can not unmarshal ipV6 into (%T)\", v.Interface())\n\t\t}\n\t\tval := reflect.New(v.Type())\n\t\tcopy((*[4]byte)(val.UnsafePointer())[:], p[12:16])\n\t\tv.Set(val.Elem())\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal inet: to unmarshal into (%T) the length of the data can be 0,4,16\", v.Interface())\n\t}\n\treturn nil\n}\n\nfunc decReflectArray16(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetZero()\n\tcase 4, 16:\n\t\tval := reflect.New(v.Type())\n\t\tcopy((*[16]byte)(val.UnsafePointer())[:], p)\n\t\tv.Set(val.Elem())\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal inet: to unmarshal into (%T) the length of the data can be 0,4,16\", v.Interface())\n\t}\n\treturn nil\n}\n\nfunc decReflectBytes(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetBytes(nil)\n\t\t} else {\n\t\t\tv.SetBytes(make([]byte, 0))\n\t\t}\n\tcase 4:\n\t\ttmp := make([]byte, 4)\n\t\tcopy(tmp, p)\n\t\tv.SetBytes(tmp)\n\tcase 16:\n\t\ttmp := make([]byte, 16)\n\t\tcopy(tmp, p)\n\t\tv.SetBytes(tmp)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(\"0.0.0.0\")\n\t\t}\n\tcase 4:\n\t\tv.SetString(decString4(p))\n\tcase 16:\n\t\tv.SetString(decString16(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectArray4R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Set(reflect.Zero(v.Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem())\n\t\t\tv.Set(val)\n\t\t}\n\tcase 4:\n\t\tval := reflect.New(v.Type().Elem())\n\t\tcopy((*[4]byte)(val.UnsafePointer())[:], p)\n\t\tv.Set(val)\n\tcase 16:\n\t\tif !isFist10Zeros(p) {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal inet: can not unmarshal ipV6 into (%T)\", v.Interface())\n\t\t}\n\t\tval := reflect.New(v.Type().Elem())\n\t\tcopy((*[4]byte)(val.UnsafePointer())[:], p[12:16])\n\t\tv.Set(val)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal inet: to unmarshal into (%T) the length of the data can be 0,4,16\", v.Interface())\n\t}\n\treturn nil\n}\n\nfunc decReflectArray16R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Set(reflect.Zero(v.Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem())\n\t\t\tv.Set(val)\n\t\t}\n\tcase 4, 16:\n\t\tval := reflect.New(v.Type().Elem())\n\t\tcopy((*[16]byte)(val.UnsafePointer())[:], p)\n\t\tv.Set(val)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal inet: to unmarshal into (%T) the length of the data can be 0,4,16\", v.Interface())\n\t}\n\treturn nil\n}\n\nfunc decReflectBytesR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Set(reflect.Zero(v.Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem())\n\t\t\tval.Elem().SetBytes(make([]byte, 0))\n\t\t\tv.Set(val)\n\t\t}\n\tcase 4:\n\t\ttmp := make([]byte, 4)\n\t\tcopy(tmp, p)\n\t\tval := reflect.New(v.Type().Elem())\n\t\tval.Elem().SetBytes(tmp)\n\t\tv.Set(val)\n\tcase 16:\n\t\ttmp := make([]byte, 16)\n\t\tcopy(tmp, p)\n\t\tval := reflect.New(v.Type().Elem())\n\t\tval.Elem().SetBytes(tmp)\n\t\tv.Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Set(reflect.Zero(v.Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem())\n\t\t\tval.Elem().SetString(\"0.0.0.0\")\n\t\t\tv.Set(val)\n\t\t}\n\tcase 4:\n\t\tval := reflect.New(v.Type().Elem())\n\t\tval.Elem().SetString(decString4(p))\n\t\tv.Set(val)\n\tcase 16:\n\t\tval := reflect.New(v.Type().Elem())\n\t\tval.Elem().SetString(decString16(p))\n\t\tv.Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decString4(p []byte) string {\n\tout := make([]byte, 0, 15)\n\tfor _, x := range p {\n\t\tout = append(out, digits[x]...)\n\t}\n\treturn string(out[:len(out)-1])\n}\n\nfunc decString16(p []byte) string {\n\tif isV4MappedToV6(p) {\n\t\treturn decString4(p[12:16])\n\t}\n\treturn netip.AddrFrom16(*(*[16]byte)(unsafe.Pointer(&p[0]))).String()\n}\n\nfunc getDigits() []string {\n\tout := make([]string, 256)\n\tfor i := range out {\n\t\tout[i] = fmt.Sprintf(\"%d.\", i)\n\t}\n\treturn out\n}\n\nfunc isV4MappedToV6(p []byte) bool {\n\treturn p[0] == 0 && p[1] == 0 && p[2] == 0 && p[3] == 0 && p[4] == 0 &&\n\t\tp[5] == 0 && p[6] == 0 && p[7] == 0 && p[8] == 0 && p[9] == 0 && p[10] == 255 && p[11] == 255\n}\n\nfunc isFist10Zeros(p []byte) bool {\n\treturn p[0] == 0 && p[1] == 0 && p[2] == 0 && p[3] == 0 && p[4] == 0 &&\n\t\tp[5] == 0 && p[6] == 0 && p[7] == 0 && p[8] == 0 && p[9] == 0\n}\n"
  },
  {
    "path": "serialization/smallint/marshal.go",
    "content": "package smallint\n\nimport (\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase int8:\n\t\treturn EncInt8(v)\n\tcase int32:\n\t\treturn EncInt32(v)\n\tcase int16:\n\t\treturn EncInt16(v)\n\tcase int64:\n\t\treturn EncInt64(v)\n\tcase int:\n\t\treturn EncInt(v)\n\n\tcase uint8:\n\t\treturn EncUint8(v)\n\tcase uint16:\n\t\treturn EncUint16(v)\n\tcase uint32:\n\t\treturn EncUint32(v)\n\tcase uint64:\n\t\treturn EncUint64(v)\n\tcase uint:\n\t\treturn EncUint(v)\n\n\tcase big.Int:\n\t\treturn EncBigInt(v)\n\tcase string:\n\t\treturn EncString(v)\n\n\tcase *int8:\n\t\treturn EncInt8R(v)\n\tcase *int16:\n\t\treturn EncInt16R(v)\n\tcase *int32:\n\t\treturn EncInt32R(v)\n\tcase *int64:\n\t\treturn EncInt64R(v)\n\tcase *int:\n\t\treturn EncIntR(v)\n\n\tcase *uint8:\n\t\treturn EncUint8R(v)\n\tcase *uint16:\n\t\treturn EncUint16R(v)\n\tcase *uint32:\n\t\treturn EncUint32R(v)\n\tcase *uint64:\n\t\treturn EncUint64R(v)\n\tcase *uint:\n\t\treturn EncUintR(v)\n\n\tcase *big.Int:\n\t\treturn EncBigIntR(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tdefault:\n\t\t// Custom types (type MyInt int) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/smallint/marshal_utils.go",
    "content": "package smallint\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nconst supportedTypes = \"~int8, ~int16, ~int32, ~int64, ~int, ~uint8, ~uint16, ~uint32, ~uint64, ~uint, ~string, big.Int\"\n\nvar (\n\tmaxBigInt = big.NewInt(math.MaxInt16)\n\tminBigInt = big.NewInt(math.MinInt16)\n)\n\nfunc EncInt8(v int8) ([]byte, error) {\n\tif v < 0 {\n\t\treturn []byte{255, byte(v)}, nil\n\t}\n\treturn []byte{0, byte(v)}, nil\n}\n\nfunc EncInt8R(v *int8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt8(*v)\n}\n\nfunc EncInt16(v int16) ([]byte, error) {\n\treturn encInt16(v), nil\n}\n\nfunc EncInt16R(v *int16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt16(*v)\n}\n\nfunc EncInt32(v int32) ([]byte, error) {\n\tif v > math.MaxInt16 || v < math.MinInt16 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncInt32R(v *int32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt32(*v)\n}\n\nfunc EncInt64(v int64) ([]byte, error) {\n\tif v > math.MaxInt16 || v < math.MinInt16 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: value %#v out of range\", v)\n\t}\n\treturn encInt64(v), nil\n}\n\nfunc EncInt64R(v *int64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt64(*v)\n}\n\nfunc EncInt(v int) ([]byte, error) {\n\tif v > math.MaxInt16 || v < math.MinInt16 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncIntR(v *int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt(*v)\n}\n\nfunc EncUint8(v uint8) ([]byte, error) {\n\treturn []byte{0, v}, nil\n}\n\nfunc EncUint8R(v *uint8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint8(*v)\n}\n\nfunc EncUint16(v uint16) ([]byte, error) {\n\treturn []byte{byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUint16R(v *uint16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint16(*v)\n}\n\nfunc EncUint32(v uint32) ([]byte, error) {\n\tif v > math.MaxUint16 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUint32R(v *uint32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint32(*v)\n}\n\nfunc EncUint64(v uint64) ([]byte, error) {\n\tif v > math.MaxUint16 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: value %#v out of range\", v)\n\t}\n\treturn encUint64(v), nil\n}\n\nfunc EncUint64R(v *uint64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint64(*v)\n}\n\nfunc EncUint(v uint) ([]byte, error) {\n\tif v > math.MaxUint16 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v >> 8), byte(v)}, nil\n}\n\nfunc EncUintR(v *uint) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint(*v)\n}\n\nfunc EncBigInt(v big.Int) ([]byte, error) {\n\tif v.Cmp(maxBigInt) == 1 || v.Cmp(minBigInt) == -1 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: value (%T)(%s) out of range\", v, v.String())\n\t}\n\treturn encInt64(v.Int64()), nil\n}\n\nfunc EncBigIntR(v *big.Int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\tif v.Cmp(maxBigInt) == 1 || v.Cmp(minBigInt) == -1 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: value (%T)(%s) out of range\", v, v.String())\n\t}\n\treturn encInt64(v.Int64()), nil\n}\n\nfunc EncString(v string) ([]byte, error) {\n\tif v == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tn, err := strconv.ParseInt(v, 10, 16)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: can not marshal (%T)(%[1]v) %s\", v, err)\n\t}\n\treturn encInt64(n), nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncString(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Type().Kind() {\n\tcase reflect.Int8:\n\t\tval := v.Int()\n\t\tif val < 0 {\n\t\t\treturn []byte{255, byte(val)}, nil\n\t\t}\n\t\treturn []byte{0, byte(val)}, nil\n\tcase reflect.Int16:\n\t\treturn encInt64(v.Int()), nil\n\tcase reflect.Int, reflect.Int64, reflect.Int32:\n\t\tval := v.Int()\n\t\tif val > math.MaxInt16 || val < math.MinInt16 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: custom type value (%T)(%[1]v) out of range\", v.Interface())\n\t\t}\n\t\treturn encInt64(val), nil\n\tcase reflect.Uint8:\n\t\treturn []byte{0, byte(v.Uint())}, nil\n\tcase reflect.Uint16:\n\t\treturn encUint64(v.Uint()), nil\n\tcase reflect.Uint, reflect.Uint64, reflect.Uint32:\n\t\tval := v.Uint()\n\t\tif val > math.MaxUint16 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: custom type value (%T)(%[1]v) out of range\", v.Interface())\n\t\t}\n\t\treturn encUint64(val), nil\n\tcase reflect.String:\n\t\tval := v.String()\n\t\tif val == \"\" {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tn, err := strconv.ParseInt(val, 10, 16)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: can not marshal (%T)(%[1]v), %s\", v.Interface(), err)\n\t\t}\n\t\treturn encInt64(n), nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal smallint: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encInt16(v int16) []byte {\n\treturn []byte{byte(v >> 8), byte(v)}\n}\n\nfunc encInt64(v int64) []byte {\n\treturn []byte{byte(v >> 8), byte(v)}\n}\n\nfunc encUint64(v uint64) []byte {\n\treturn []byte{byte(v >> 8), byte(v)}\n}\n"
  },
  {
    "path": "serialization/smallint/unmarshal.go",
    "content": "package smallint\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\n\tcase *int8:\n\t\treturn DecInt8(data, v)\n\tcase *int16:\n\t\treturn DecInt16(data, v)\n\tcase *int32:\n\t\treturn DecInt32(data, v)\n\tcase *int64:\n\t\treturn DecInt64(data, v)\n\tcase *int:\n\t\treturn DecInt(data, v)\n\n\tcase *uint8:\n\t\treturn DecUint8(data, v)\n\tcase *uint16:\n\t\treturn DecUint16(data, v)\n\tcase *uint32:\n\t\treturn DecUint32(data, v)\n\tcase *uint64:\n\t\treturn DecUint64(data, v)\n\tcase *uint:\n\t\treturn DecUint(data, v)\n\n\tcase *big.Int:\n\t\treturn DecBigInt(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\n\tcase **int8:\n\t\treturn DecInt8R(data, v)\n\tcase **int16:\n\t\treturn DecInt16R(data, v)\n\tcase **int32:\n\t\treturn DecInt32R(data, v)\n\tcase **int64:\n\t\treturn DecInt64R(data, v)\n\tcase **int:\n\t\treturn DecIntR(data, v)\n\n\tcase **uint8:\n\t\treturn DecUint8R(data, v)\n\tcase **uint16:\n\t\treturn DecUint16R(data, v)\n\tcase **uint32:\n\t\treturn DecUint32R(data, v)\n\tcase **uint64:\n\t\treturn DecUint64R(data, v)\n\tcase **uint:\n\t\treturn DecUintR(data, v)\n\n\tcase **big.Int:\n\t\treturn DecBigIntR(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tdefault:\n\n\t\t// Custom types (type MyInt int) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal smallint: unsupported value type (%T)(%[1]v)\", value)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/smallint/unmarshal_utils.go",
    "content": "package smallint\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nconst (\n\tnegInt32 = int32(-1) << 16\n\tnegInt64 = int64(-1) << 16\n\tnegInt   = int(-1) << 16\n)\n\nvar errWrongDataLen = fmt.Errorf(\"failed to unmarshal smallint: the length of the data should be 0 or 2\")\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal smallint: can not unmarshal into nil reference (%T)(%[1]v))\", v)\n}\n\nfunc DecInt8(p []byte, v *int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 2:\n\t\tval := decInt16(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal smallint: to unmarshal into int8, the data should be in the int8 range\")\n\t\t}\n\t\t*v = int8(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt8R(p []byte, v **int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int8)\n\t\t}\n\tcase 2:\n\t\tval := decInt16(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal smallint: to unmarshal into int8, the data should be in the int8 range\")\n\t\t}\n\t\ttmp := int8(val)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt16(p []byte, v *int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 2:\n\t\t*v = decInt16(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt16R(p []byte, v **int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int16)\n\t\t}\n\tcase 2:\n\t\tval := decInt16(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32(p []byte, v *int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 2:\n\t\t*v = decInt32(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32R(p []byte, v **int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int32)\n\t\t}\n\tcase 2:\n\t\tval := decInt32(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64(p []byte, v *int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 2:\n\t\t*v = decInt64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64R(p []byte, v **int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int64)\n\t\t}\n\tcase 2:\n\t\tval := decInt64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt(p []byte, v *int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 2:\n\t\t*v = decInt(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecIntR(p []byte, v **int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int)\n\t\t}\n\tcase 2:\n\t\tval := decInt(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint8(p []byte, v *uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 2:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal smallint: to unmarshal into uint8, the data should be in the uint8 range\")\n\t\t}\n\t\t*v = p[1]\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint8R(p []byte, v **uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint8)\n\t\t}\n\tcase 2:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal smallint: to unmarshal into uint8, the data should be in the uint8 range\")\n\t\t}\n\t\tval := p[1]\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint16(p []byte, v *uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 2:\n\t\t*v = decUint16(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint16R(p []byte, v **uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint16)\n\t\t}\n\tcase 2:\n\t\tval := decUint16(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32(p []byte, v *uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 2:\n\t\t*v = decUint32(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32R(p []byte, v **uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint32)\n\t\t}\n\tcase 2:\n\t\tval := decUint32(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint64(p []byte, v *uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 2:\n\t\t*v = decUint64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint64R(p []byte, v **uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint64)\n\t\t}\n\tcase 2:\n\t\tval := decUint64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint(p []byte, v *uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 2:\n\t\t*v = decUint(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUintR(p []byte, v **uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint)\n\t\t}\n\tcase 2:\n\t\tval := decUint(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = \"0\"\n\t\t}\n\tcase 2:\n\t\t*v = strconv.FormatInt(decInt64(p), 10)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\tval := \"0\"\n\t\t\t*v = &val\n\t\t}\n\tcase 2:\n\t\tval := strconv.FormatInt(decInt64(p), 10)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBigInt(p []byte, v *big.Int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt64(0)\n\tcase 2:\n\t\tv.SetInt64(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBigIntR(p []byte, v **big.Int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = big.NewInt(0)\n\t\t}\n\tcase 2:\n\t\t*v = big.NewInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal smallint: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Int8:\n\t\treturn decReflectInt8(p, v)\n\tcase reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn decReflectInts(p, v)\n\tcase reflect.Uint8:\n\t\treturn decReflectUint8(p, v)\n\tcase reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUints(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal smallint: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal smallint: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Int8:\n\t\treturn decReflectInt8R(p, v)\n\tcase reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn decReflectIntsR(p, v)\n\tcase reflect.Uint8:\n\t\treturn decReflectUint8R(p, v)\n\tcase reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUintsR(p, v)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal smallint: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc decReflectInt8(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 2:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal smallint: to unmarshal into (%T), the data should be in the int8 range\", v.Interface())\n\t\t}\n\t\tv.SetInt(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectInts(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 2:\n\t\tv.SetInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint8(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 2:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal smallint: to unmarshal into (%T), the data should be in the uint8 range\", v.Interface())\n\t\t}\n\t\tv.SetUint(uint64(p[1]))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUints(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 2:\n\t\tv.SetUint(decUint64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p != nil {\n\t\t\tv.SetString(\"0\")\n\t\t} else {\n\t\t\tv.SetString(\"\")\n\t\t}\n\tcase 2:\n\t\tv.SetString(strconv.FormatInt(decInt64(p), 10))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectNullableR(p []byte, v reflect.Value) reflect.Value {\n\tif p == nil {\n\t\treturn reflect.Zero(v.Elem().Type())\n\t}\n\treturn reflect.New(v.Type().Elem().Elem())\n}\n\nfunc decReflectInt8R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 2:\n\t\tval := decInt64(p)\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal smallint: to unmarshal into (%T), the data should be in the int8 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(val)\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectIntsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 2:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetInt(decInt64(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUint8R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 2:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal smallint: to unmarshal into (%T), the data should be in the uint8 range\", v.Interface())\n\t\t}\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(uint64(p[1]))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUintsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 2:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetUint(decUint64(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetString(\"0\")\n\t\t}\n\t\tv.Elem().Set(val)\n\tcase 2:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetString(strconv.FormatInt(decInt64(p), 10))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decInt16(p []byte) int16 {\n\treturn int16(p[0])<<8 | int16(p[1])\n}\n\nfunc decInt32(p []byte) int32 {\n\tif p[0] > math.MaxInt8 {\n\t\treturn negInt32 | int32(p[0])<<8 | int32(p[1])\n\t}\n\treturn int32(p[0])<<8 | int32(p[1])\n}\n\nfunc decInt64(p []byte) int64 {\n\tif p[0] > math.MaxInt8 {\n\t\treturn negInt64 | int64(p[0])<<8 | int64(p[1])\n\t}\n\treturn int64(p[0])<<8 | int64(p[1])\n}\n\nfunc decInt(p []byte) int {\n\tif p[0] > math.MaxInt8 {\n\t\treturn negInt | int(p[0])<<8 | int(p[1])\n\t}\n\treturn int(p[0])<<8 | int(p[1])\n}\n\nfunc decUint16(p []byte) uint16 {\n\treturn uint16(p[0])<<8 | uint16(p[1])\n}\n\nfunc decUint32(p []byte) uint32 {\n\treturn uint32(p[0])<<8 | uint32(p[1])\n}\n\nfunc decUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<8 | uint64(p[1])\n}\n\nfunc decUint(p []byte) uint {\n\treturn uint(p[0])<<8 | uint(p[1])\n}\n"
  },
  {
    "path": "serialization/text/marshal.go",
    "content": "package text\n\nimport (\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase string:\n\t\treturn EncString(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tcase []byte:\n\t\treturn EncBytes(v)\n\tcase *[]byte:\n\t\treturn EncBytesR(v)\n\tdefault:\n\t\t// Custom types (type MyString string) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(rv)\n\t\t}\n\t\treturn EncReflectR(rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/text/marshal_utils.go",
    "content": "package text\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc EncString(v string) ([]byte, error) {\n\treturn encString(v), nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encString(*v), nil\n}\n\nfunc EncBytes(v []byte) ([]byte, error) {\n\treturn v, nil\n}\n\nfunc EncBytesR(v *[]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn *v, nil\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.String:\n\t\treturn encString(v.String()), nil\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal text: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn EncBytes(v.Bytes())\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal text: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal text: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encString(v string) []byte {\n\tif v == \"\" {\n\t\treturn make([]byte, 0)\n\t}\n\treturn []byte(v)\n}\n"
  },
  {
    "path": "serialization/text/unmarshal.go",
    "content": "package text\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *string:\n\t\treturn DecString(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tcase *[]byte:\n\t\treturn DecBytes(data, v)\n\tcase **[]byte:\n\t\treturn DecBytesR(data, v)\n\tcase *any:\n\t\treturn DecInterface(data, v)\n\tdefault:\n\t\t// Custom types (type MyString string) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal text: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/text/unmarshal_utils.go",
    "content": "package text\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal text: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decString(p)\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decStringR(p)\n\treturn nil\n}\n\nfunc DecBytes(p []byte, v *[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tif p == nil {\n\t\t*v = nil\n\t\treturn nil\n\t}\n\tif len(p) == 0 {\n\t\t*v = make([]byte, 0)\n\t\treturn nil\n\t}\n\t*v = append((*v)[:0], p...)\n\treturn nil\n}\n\nfunc DecBytesR(p []byte, v **[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decBytesR(p)\n\treturn nil\n}\n\nfunc DecInterface(p []byte, v *any) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decBytes(p)\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.String:\n\t\tv.SetString(decString(p))\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal text: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t\t}\n\t\tv.SetBytes(decBytes(p))\n\tcase reflect.Interface:\n\t\tv.Set(reflect.ValueOf(decBytes(p)))\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal text: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t}\n\treturn nil\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch ev := v.Type().Elem().Elem(); ev.Kind() {\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tcase reflect.Slice:\n\t\tif ev.Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal text: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t\t}\n\t\treturn decReflectBytesR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal text: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t}\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Type().Elem()))\n\t\t} else {\n\t\t\tv.Elem().Set(reflect.New(v.Type().Elem().Elem()))\n\t\t}\n\t\treturn nil\n\t}\n\tval := reflect.New(v.Type().Elem().Elem())\n\tval.Elem().SetString(string(p))\n\tv.Elem().Set(val)\n\treturn nil\n}\n\nfunc decReflectBytesR(p []byte, v reflect.Value) error {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Elem().Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetBytes(make([]byte, 0))\n\t\t\tv.Elem().Set(val)\n\t\t}\n\t\treturn nil\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\n\tval := reflect.New(v.Type().Elem().Elem())\n\tval.Elem().SetBytes(tmp)\n\tv.Elem().Set(val)\n\treturn nil\n}\n\nfunc decString(p []byte) string {\n\tif len(p) == 0 {\n\t\treturn \"\"\n\t}\n\treturn string(p)\n}\n\nfunc decStringR(p []byte) *string {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn new(string)\n\t}\n\ttmp := string(p)\n\treturn &tmp\n}\n\nfunc decBytes(p []byte) []byte {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn make([]byte, 0)\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\treturn tmp\n}\n\nfunc decBytesR(p []byte) *[]byte {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\ttmp := make([]byte, 0)\n\t\treturn &tmp\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\treturn &tmp\n}\n"
  },
  {
    "path": "serialization/timestamp/marshal.go",
    "content": "package timestamp\n\nimport (\n\t\"reflect\"\n\t\"time\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase int64:\n\t\treturn EncInt64(v)\n\tcase *int64:\n\t\treturn EncInt64R(v)\n\tcase time.Time:\n\t\treturn EncTime(v)\n\tcase *time.Time:\n\t\treturn EncTimeR(v)\n\n\tdefault:\n\t\t// Custom types (type MyTime int64) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/timestamp/marshal_utils.go",
    "content": "package timestamp\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nvar (\n\tmaxTimestamp = time.Date(292278994, 8, 17, 7, 12, 55, 807*1000000, time.UTC)\n\tminTimestamp = time.Date(-292275055, 5, 16, 16, 47, 4, 192*1000000, time.UTC)\n)\n\nfunc EncInt64(v int64) ([]byte, error) {\n\treturn encInt64(v), nil\n}\n\nfunc EncInt64R(v *int64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt64(*v)\n}\n\nfunc EncTime(v time.Time) ([]byte, error) {\n\tif v.After(maxTimestamp) || v.Before(minTimestamp) {\n\t\treturn nil, fmt.Errorf(\"failed to marshal timestamp: the (%T)(%s) value should be in the range from -292275055-05-16T16:47:04.192Z to 292278994-08-17T07:12:55.807\", v, v.Format(time.RFC3339Nano))\n\t}\n\t// It supposed to be v.UTC().UnixMilli(), for backward compatibility map `time.Time{}` to nil value\n\tif v.IsZero() {\n\t\treturn make([]byte, 0), nil\n\t}\n\tms := v.UTC().UnixMilli()\n\treturn []byte{byte(ms >> 56), byte(ms >> 48), byte(ms >> 40), byte(ms >> 32), byte(ms >> 24), byte(ms >> 16), byte(ms >> 8), byte(ms)}, nil\n}\n\nfunc EncTimeR(v *time.Time) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncTime(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Int64:\n\t\treturn encInt64(v.Int()), nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal timestamp: unsupported value type (%T)(%[1]v), supported types: ~int64, time.Time, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal timestamp: unsupported value type (%T)(%[1]v), supported types: ~int64, time.Time, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encInt64(v int64) []byte {\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n"
  },
  {
    "path": "serialization/timestamp/unmarshal.go",
    "content": "package timestamp\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\n\tcase *int64:\n\t\treturn DecInt64(data, v)\n\tcase **int64:\n\t\treturn DecInt64R(data, v)\n\tcase *time.Time:\n\t\treturn DecTime(data, v)\n\tcase **time.Time:\n\t\treturn DecTimeR(data, v)\n\tdefault:\n\n\t\t// Custom types (type MyTime int64) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal timestamp: unsupported value type (%T)(%[1]v), supported types: ~int64, time.Time\", value)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/timestamp/unmarshal_utils.go",
    "content": "package timestamp\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nvar (\n\terrWrongDataLen = fmt.Errorf(\"failed to unmarshal timestamp: the length of the data should be 0 or 8\")\n)\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal timestamp: can not unmarshal into nil reference (%T)(%[1]v))\", v)\n}\n\nfunc DecInt64(p []byte, v *int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 8:\n\t\t*v = decInt64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64R(p []byte, v **int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int64)\n\t\t}\n\tcase 8:\n\t\tval := decInt64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecTime(p []byte, v *time.Time) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t// supposed to be zero timestamp `time.UnixMilli(0).UTC()`, but for backward compatibility mapped to zero time\n\t\t*v = time.Time{}\n\tcase 8:\n\t\t*v = decTime(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecTimeR(p []byte, v **time.Time) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t// supposed to be zero timestamp `time.UnixMilli(0).UTC()`, but for backward compatibility mapped to zero time\n\t\t\tval := time.Time{}\n\t\t\t*v = &val\n\t\t}\n\tcase 8:\n\t\tval := decTime(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal timestamp: can not unmarshal into nil reference (%T)(%[1]v))\", v.Interface())\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Int64:\n\t\treturn decReflectInt64(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal timestamp: unsupported value type (%T)(%[1]v), supported types: ~int64, time.Time\", v.Interface())\n\t}\n}\n\nfunc decReflectInt64(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 8:\n\t\tv.SetInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal timestamp: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Int64:\n\t\treturn decReflectIntsR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal timestamp: unsupported value type (%T)(%[1]v), supported types: ~int64, time.Time\", v.Interface())\n\t}\n}\n\nfunc decReflectIntsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Elem().Type()))\n\t\t} else {\n\t\t\tv.Elem().Set(reflect.New(v.Type().Elem().Elem()))\n\t\t}\n\tcase 8:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetInt(decInt64(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decInt64(p []byte) int64 {\n\treturn int64(p[0])<<56 | int64(p[1])<<48 | int64(p[2])<<40 | int64(p[3])<<32 | int64(p[4])<<24 | int64(p[5])<<16 | int64(p[6])<<8 | int64(p[7])\n}\n\nfunc decTime(p []byte) time.Time {\n\tmsec := decInt64(p)\n\treturn time.Unix(msec/1e3, (msec%1e3)*1e6).UTC()\n}\n"
  },
  {
    "path": "serialization/timeuuid/marshal.go",
    "content": "package timeuuid\n\nimport (\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase [16]byte:\n\t\treturn EncArray(v)\n\tcase *[16]byte:\n\t\treturn EncArrayR(v)\n\tcase []byte:\n\t\treturn EncSlice(v)\n\tcase *[]byte:\n\t\treturn EncSliceR(v)\n\tcase string:\n\t\treturn EncString(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tdefault:\n\t\t// Custom types (type MyUUID [16]byte) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(rv)\n\t\t}\n\t\treturn EncReflectR(rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/timeuuid/marshal_utils.go",
    "content": "package timeuuid\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n)\n\nfunc EncArray(v [16]byte) ([]byte, error) {\n\treturn v[:], nil\n}\n\nfunc EncArrayR(v *[16]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn v[:], nil\n}\n\nfunc EncSlice(v []byte) ([]byte, error) {\n\tswitch len(v) {\n\tcase 0:\n\t\tif v == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn make([]byte, 0), nil\n\tcase 16:\n\t\treturn v, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the ([]byte) length should be 0 or 16\")\n\t}\n}\n\nfunc EncSliceR(v *[]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncSlice(*v)\n}\n\nfunc EncString(v string) ([]byte, error) {\n\treturn encString(v)\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encString(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Array:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 || v.Len() != 16 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t\t}\n\t\tnv := reflect.New(v.Type())\n\t\tnv.Elem().Set(v)\n\t\treturn nv.Elem().Bytes(), nil\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn encReflectBytes(v)\n\tcase reflect.String:\n\t\treturn encReflectString(v)\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: timeuuid value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\tswitch ev := v.Elem(); ev.Kind() {\n\tcase reflect.Array:\n\t\tif ev.Type().Elem().Kind() != reflect.Uint8 || ev.Len() != 16 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn v.Elem().Bytes(), nil\n\tcase reflect.Slice:\n\t\tif ev.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn encReflectBytes(ev)\n\tcase reflect.String:\n\t\treturn encReflectString(ev)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc encReflectBytes(rv reflect.Value) ([]byte, error) {\n\tswitch rv.Len() {\n\tcase 0:\n\t\tif rv.IsNil() {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn make([]byte, 0), nil\n\tcase 16:\n\t\treturn rv.Bytes(), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the (%T) length should be 0 or 16\", rv.Interface())\n\t}\n}\n\n// encReflectString encodes uuid strings via reflect package.\n// The following code was taken from the `Parse` function of the \"github.com/google/uuid\" package.\nfunc encReflectString(v reflect.Value) ([]byte, error) {\n\ts := v.String()\n\tif s == zeroUUID {\n\t\treturn make([]byte, 0), nil\n\t}\n\tswitch len(s) {\n\tcase 45: // urn:timeuuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\t\tif !strings.EqualFold(s[:9], \"urn:timeuuid:\") {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the (%T) have invalid urn prefix: %q\", v.Interface(), s[:9])\n\t\t}\n\t\ts = s[9:]\n\tcase 38: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}\n\t\ts = s[1:]\n\tcase 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\tcase 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n\t\tvar ok bool\n\t\tdata := make([]byte, 16)\n\t\tfor i := range data {\n\t\t\tdata[i], ok = xtob(s[i*2], s[i*2+1])\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the (%T) have invalid UUID format: %q\", v.Interface(), s)\n\t\t\t}\n\t\t}\n\t\treturn data, nil\n\tcase 0:\n\t\treturn nil, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the (%T) length can be 0,32,36,38,45\", v.Interface())\n\t}\n\n\t// s is now at least 36 bytes long\n\t// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\tif s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {\n\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the (%T) have invalid UUID format: %q\", v.Interface(), s)\n\t}\n\tdata := make([]byte, 16)\n\tfor i, x := range [16]int{\n\t\t0, 2, 4, 6,\n\t\t9, 11,\n\t\t14, 16,\n\t\t19, 21,\n\t\t24, 26, 28, 30, 32, 34,\n\t} {\n\t\tb, ok := xtob(s[x], s[x+1])\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the (%T) have invalid UUID format: %q\", v.Interface(), b)\n\t\t}\n\t\tdata[i] = b\n\t}\n\treturn data, nil\n}\n\n// encString encodes uuid strings.\n// The following code was taken from the `Parse` function of the \"github.com/google/uuid\" package.\nfunc encString(s string) ([]byte, error) {\n\tif s == zeroUUID {\n\t\treturn make([]byte, 0), nil\n\t}\n\tswitch len(s) {\n\tcase 45: // urn:timeuuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\t\tif !strings.EqualFold(s[:9], \"urn:timeuuid:\") {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: (string) have invalid urn prefix: %q\", s[:9])\n\t\t}\n\t\ts = s[9:]\n\tcase 38: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}\n\t\ts = s[1:]\n\tcase 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\tcase 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n\t\tvar ok bool\n\t\tdata := make([]byte, 16)\n\t\tfor i := range data {\n\t\t\tdata[i], ok = xtob(s[i*2], s[i*2+1])\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the (string) have invalid UUID format: %q\", s)\n\t\t\t}\n\t\t}\n\t\treturn data, nil\n\tcase 0:\n\t\treturn nil, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the (string) length can be 0,32,36,38,45\")\n\t}\n\n\t// s is now at least 36 bytes long\n\t// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\tif s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {\n\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the (string) have invalid UUID format: %q\", s)\n\t}\n\tdata := make([]byte, 16)\n\tfor i, x := range [16]int{\n\t\t0, 2, 4, 6,\n\t\t9, 11,\n\t\t14, 16,\n\t\t19, 21,\n\t\t24, 26, 28, 30, 32, 34,\n\t} {\n\t\tb, ok := xtob(s[x], s[x+1])\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal timeuuid: the (string) have invalid UUID format: %q\", b)\n\t\t}\n\t\tdata[i] = b\n\t}\n\treturn data, nil\n}\n\n// xtob converts hex characters x1 and x2 into a byte.\n// The following code was taken from the \"github.com/google/uuid\" package.\nfunc xtob(x1, x2 byte) (byte, bool) {\n\tb1 := xvalues[x1]\n\tb2 := xvalues[x2]\n\treturn (b1 << 4) | b2, b1 != 255 && b2 != 255\n}\n\n// xvalues returns the value of a byte as a hexadecimal digit or 255.\n// The following code was taken from the \"github.com/google/uuid\" package.\nvar xvalues = [256]byte{\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,\n\t255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n}\n"
  },
  {
    "path": "serialization/timeuuid/unmarshal.go",
    "content": "package timeuuid\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *[16]byte:\n\t\treturn DecArray(data, v)\n\tcase **[16]byte:\n\t\treturn DecArrayR(data, v)\n\tcase *[]byte:\n\t\treturn DecSlice(data, v)\n\tcase **[]byte:\n\t\treturn DecSliceR(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tcase *time.Time:\n\t\treturn DecTime(data, v)\n\tcase **time.Time:\n\t\treturn DecTimeR(data, v)\n\tdefault:\n\t\t// Custom types (type MyFloat float32) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v)\n\t\t}\n\t\tif rv.Type().Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/timeuuid/unmarshal_utils.go",
    "content": "package timeuuid\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n)\n\nconst (\n\thexString = \"0123456789abcdef\"\n\tzeroUUID  = \"00000000-0000-0000-0000-000000000000\"\n)\n\nvar (\n\toffsets  = [...]int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34}\n\ttimeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix()\n\n\terrWrongDataLen = fmt.Errorf(\"failed to unmarshal timeuuid: the length of the data should be 0 or 16\")\n)\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal timeuuid: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecArray(p []byte, v *[16]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = [16]byte{}\n\tcase 16:\n\t\tcopy(v[:], p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecArrayR(p []byte, v **[16]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new([16]byte)\n\t\t}\n\tcase 16:\n\t\t*v = &[16]byte{}\n\t\tcopy((*v)[:], p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecSlice(p []byte, v *[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = make([]byte, 0)\n\t\t}\n\tcase 16:\n\t\t*v = make([]byte, 16)\n\t\tcopy(*v, p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecSliceR(p []byte, v **[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\ttmp := make([]byte, 0)\n\t\t\t*v = &tmp\n\t\t}\n\tcase 16:\n\t\t*v = &[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}\n\t\tcopy(**v, p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = zeroUUID\n\t\t}\n\tcase 16:\n\t\t*v = decString(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\ttmp := zeroUUID\n\t\t\t*v = &tmp\n\t\t}\n\tcase 16:\n\t\ttmp := decString(p)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecTime(p []byte, v *time.Time) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = time.Time{}\n\tcase 16:\n\t\t*v = decTime(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecTimeR(p []byte, v **time.Time) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(time.Time)\n\t\t}\n\tcase 16:\n\t\tval := decTime(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Array:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 || v.Len() != 16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t\t}\n\t\treturn decReflectArray(p, v)\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t\t}\n\t\treturn decReflectBytes(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t}\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tev := v.Elem()\n\tswitch evt := ev.Type().Elem(); evt.Kind() {\n\tcase reflect.Array:\n\t\tif evt.Elem().Kind() != reflect.Uint8 || ev.Len() != 16 {\n\t\t\treturn fmt.Errorf(\"failed to marshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t\t}\n\t\treturn decReflectArrayR(p, ev)\n\tcase reflect.Slice:\n\t\tif evt.Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t\t}\n\t\treturn decReflectBytesR(p, ev)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, ev)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t}\n}\n\nfunc decReflectArray(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetZero()\n\tcase 16:\n\t\tval := reflect.New(v.Type())\n\t\tcopy((*[16]byte)(val.UnsafePointer())[:], p)\n\t\tv.Set(val.Elem())\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectBytes(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetBytes(nil)\n\t\t} else {\n\t\t\tv.SetBytes(make([]byte, 0))\n\t\t}\n\tcase 16:\n\t\ttmp := make([]byte, 16)\n\t\tcopy(tmp, p)\n\t\tv.SetBytes(tmp)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(zeroUUID)\n\t\t}\n\tcase 16:\n\t\tv.SetString(decString(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectArrayR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Set(reflect.Zero(v.Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem())\n\t\t\tv.Set(val)\n\t\t}\n\tcase 16:\n\t\tval := reflect.New(v.Type().Elem())\n\t\tcopy((*[16]byte)(val.UnsafePointer())[:], p)\n\t\tv.Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectBytesR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Set(reflect.Zero(v.Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem())\n\t\t\tval.Elem().SetBytes(make([]byte, 0))\n\t\t\tv.Set(val)\n\t\t}\n\tcase 16:\n\t\ttmp := make([]byte, 16)\n\t\tcopy(tmp, p)\n\t\tval := reflect.New(v.Type().Elem())\n\t\tval.Elem().SetBytes(tmp)\n\t\tv.Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Set(reflect.Zero(v.Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem())\n\t\t\tval.Elem().SetString(zeroUUID)\n\t\t\tv.Set(val)\n\t\t}\n\tcase 16:\n\t\tval := reflect.New(v.Type().Elem())\n\t\tval.Elem().SetString(decString(p))\n\t\tv.Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decString(p []byte) string {\n\tr := make([]byte, 36)\n\tfor i, b := range p {\n\t\tr[offsets[i]] = hexString[b>>4]\n\t\tr[offsets[i]+1] = hexString[b&0xF]\n\t}\n\tr[8] = '-'\n\tr[13] = '-'\n\tr[18] = '-'\n\tr[23] = '-'\n\treturn string(r)\n}\n\nfunc decTime(u []byte) time.Time {\n\tts := decTimestamp(u)\n\tsec := ts / 1e7\n\tnsec := (ts % 1e7) * 100\n\treturn time.Unix(sec+timeBase, nsec).UTC()\n}\n\nfunc decTimestamp(u []byte) int64 {\n\treturn int64(uint64(u[0])<<24|uint64(u[1])<<16|\n\t\tuint64(u[2])<<8|uint64(u[3])) +\n\t\tint64(uint64(u[4])<<40|uint64(u[5])<<32) +\n\t\tint64(uint64(u[6]&0x0F)<<56|uint64(u[7])<<48)\n}\n"
  },
  {
    "path": "serialization/tinyint/marshal.go",
    "content": "package tinyint\n\nimport (\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase int8:\n\t\treturn EncInt8(v)\n\tcase int32:\n\t\treturn EncInt32(v)\n\tcase int16:\n\t\treturn EncInt16(v)\n\tcase int64:\n\t\treturn EncInt64(v)\n\tcase int:\n\t\treturn EncInt(v)\n\n\tcase uint8:\n\t\treturn EncUint8(v)\n\tcase uint16:\n\t\treturn EncUint16(v)\n\tcase uint32:\n\t\treturn EncUint32(v)\n\tcase uint64:\n\t\treturn EncUint64(v)\n\tcase uint:\n\t\treturn EncUint(v)\n\n\tcase big.Int:\n\t\treturn EncBigInt(v)\n\tcase string:\n\t\treturn EncString(v)\n\n\tcase *int8:\n\t\treturn EncInt8R(v)\n\tcase *int16:\n\t\treturn EncInt16R(v)\n\tcase *int32:\n\t\treturn EncInt32R(v)\n\tcase *int64:\n\t\treturn EncInt64R(v)\n\tcase *int:\n\t\treturn EncIntR(v)\n\n\tcase *uint8:\n\t\treturn EncUint8R(v)\n\tcase *uint16:\n\t\treturn EncUint16R(v)\n\tcase *uint32:\n\t\treturn EncUint32R(v)\n\tcase *uint64:\n\t\treturn EncUint64R(v)\n\tcase *uint:\n\t\treturn EncUintR(v)\n\n\tcase *big.Int:\n\t\treturn EncBigIntR(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tdefault:\n\t\t// Custom types (type MyInt int) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/tinyint/marshal_utils.go",
    "content": "package tinyint\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nconst supportedTypes = \"~int8, ~int16, ~int32, ~int64, ~int, ~uint8, ~uint16, ~uint32, ~uint64, ~uint, ~string, big.Int\"\n\nvar (\n\tmaxBigInt = big.NewInt(math.MaxInt8)\n\tminBigInt = big.NewInt(math.MinInt8)\n)\n\nfunc EncInt8(v int8) ([]byte, error) {\n\treturn []byte{byte(v)}, nil\n}\n\nfunc EncInt8R(v *int8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt8(*v)\n}\n\nfunc EncInt16(v int16) ([]byte, error) {\n\tif v > math.MaxInt8 || v < math.MinInt8 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v)}, nil\n}\n\nfunc EncInt16R(v *int16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt16(*v)\n}\n\nfunc EncInt32(v int32) ([]byte, error) {\n\tif v > math.MaxInt8 || v < math.MinInt8 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v)}, nil\n}\n\nfunc EncInt32R(v *int32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt32(*v)\n}\n\nfunc EncInt64(v int64) ([]byte, error) {\n\tif v > math.MaxInt8 || v < math.MinInt8 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v)}, nil\n}\n\nfunc EncInt64R(v *int64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt64(*v)\n}\n\nfunc EncInt(v int) ([]byte, error) {\n\tif v > math.MaxInt8 || v < math.MinInt8 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v)}, nil\n}\n\nfunc EncIntR(v *int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt(*v)\n}\n\nfunc EncUint8(v uint8) ([]byte, error) {\n\treturn []byte{v}, nil\n}\n\nfunc EncUint8R(v *uint8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint8(*v)\n}\n\nfunc EncUint16(v uint16) ([]byte, error) {\n\tif v > math.MaxUint8 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v)}, nil\n}\n\nfunc EncUint16R(v *uint16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint16(*v)\n}\n\nfunc EncUint32(v uint32) ([]byte, error) {\n\tif v > math.MaxUint8 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v)}, nil\n}\n\nfunc EncUint32R(v *uint32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint32(*v)\n}\n\nfunc EncUint64(v uint64) ([]byte, error) {\n\tif v > math.MaxUint8 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v)}, nil\n}\n\nfunc EncUint64R(v *uint64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint64(*v)\n}\n\nfunc EncUint(v uint) ([]byte, error) {\n\tif v > math.MaxUint8 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value %#v out of range\", v)\n\t}\n\treturn []byte{byte(v)}, nil\n}\n\nfunc EncUintR(v *uint) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncUint(*v)\n}\n\nfunc EncBigInt(v big.Int) ([]byte, error) {\n\tif v.Cmp(maxBigInt) == 1 || v.Cmp(minBigInt) == -1 {\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value (%T)(%s) out of range\", v, v.String())\n\t}\n\treturn []byte{byte(v.Int64())}, nil\n}\n\nfunc EncBigIntR(v *big.Int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncBigInt(*v)\n}\n\nfunc EncString(v string) ([]byte, error) {\n\tif v == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tn, err := strconv.ParseInt(v, 10, 8)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: can not marshal (%T)(%[1]v) %s\", v, err)\n\t}\n\treturn []byte{byte(n)}, nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncString(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Int8:\n\t\treturn []byte{byte(v.Int())}, nil\n\tcase reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16:\n\t\tval := v.Int()\n\t\tif val > math.MaxInt8 || val < math.MinInt8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value (%T)(%[1]v) out of range\", v.Interface())\n\t\t}\n\t\treturn []byte{byte(val)}, nil\n\tcase reflect.Uint8:\n\t\treturn []byte{byte(v.Uint())}, nil\n\tcase reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16:\n\t\tval := v.Uint()\n\t\tif val > math.MaxUint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: value (%T)(%[1]v) out of range\", v.Interface())\n\t\t}\n\t\treturn []byte{byte(val)}, nil\n\tcase reflect.String:\n\t\tval := v.String()\n\t\tif val == \"\" {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tn, err := strconv.ParseInt(val, 10, 8)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: can not marshal (%T)(%[1]v) %s\", v.Interface(), err)\n\t\t}\n\t\treturn []byte{byte(n)}, nil\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal tinyint: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n"
  },
  {
    "path": "serialization/tinyint/unmarshal.go",
    "content": "package tinyint\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\n\tcase *int8:\n\t\treturn DecInt8(data, v)\n\tcase *int16:\n\t\treturn DecInt16(data, v)\n\tcase *int32:\n\t\treturn DecInt32(data, v)\n\tcase *int64:\n\t\treturn DecInt64(data, v)\n\tcase *int:\n\t\treturn DecInt(data, v)\n\n\tcase *uint8:\n\t\treturn DecUint8(data, v)\n\tcase *uint16:\n\t\treturn DecUint16(data, v)\n\tcase *uint32:\n\t\treturn DecUint32(data, v)\n\tcase *uint64:\n\t\treturn DecUint64(data, v)\n\tcase *uint:\n\t\treturn DecUint(data, v)\n\n\tcase *big.Int:\n\t\treturn DecBigInt(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\n\tcase **int8:\n\t\treturn DecInt8R(data, v)\n\tcase **int16:\n\t\treturn DecInt16R(data, v)\n\tcase **int32:\n\t\treturn DecInt32R(data, v)\n\tcase **int64:\n\t\treturn DecInt64R(data, v)\n\tcase **int:\n\t\treturn DecIntR(data, v)\n\n\tcase **uint8:\n\t\treturn DecUint8R(data, v)\n\tcase **uint16:\n\t\treturn DecUint16R(data, v)\n\tcase **uint32:\n\t\treturn DecUint32R(data, v)\n\tcase **uint64:\n\t\treturn DecUint64R(data, v)\n\tcase **uint:\n\t\treturn DecUintR(data, v)\n\n\tcase **big.Int:\n\t\treturn DecBigIntR(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tdefault:\n\n\t\t// Custom types (type MyInt int) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal tinyint: unsupported value type (%T)(%[1]v), supported types: %s\", v, supportedTypes)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/tinyint/unmarshal_utils.go",
    "content": "package tinyint\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nconst (\n\tnegInt16 = int16(-1) << 8\n\tnegInt32 = int32(-1) << 8\n\tnegInt64 = int64(-1) << 8\n\tnegInt   = int(-1) << 8\n)\n\nvar errWrongDataLen = fmt.Errorf(\"failed to unmarshal tinyint: the length of the data should less or equal then 1\")\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal tinyint: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecInt8(p []byte, v *int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = int8(p[0])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt8R(p []byte, v **int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int8)\n\t\t}\n\tcase 1:\n\t\tval := int8(p[0])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt16(p []byte, v *int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = decInt16(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt16R(p []byte, v **int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int16)\n\t\t}\n\tcase 1:\n\t\tval := decInt16(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32(p []byte, v *int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = decInt32(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt32R(p []byte, v **int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int32)\n\t\t}\n\tcase 1:\n\t\tval := decInt32(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64(p []byte, v *int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = decInt64(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt64R(p []byte, v **int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int64)\n\t\t}\n\tcase 1:\n\t\tval := decInt64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecInt(p []byte, v *int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = decInt(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecIntR(p []byte, v **int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int)\n\t\t}\n\tcase 1:\n\t\tval := decInt(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint8(p []byte, v *uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = p[0]\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint8R(p []byte, v **uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint8)\n\t\t}\n\tcase 1:\n\t\tval := p[0]\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint16(p []byte, v *uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = uint16(p[0])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint16R(p []byte, v **uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint16)\n\t\t}\n\tcase 1:\n\t\tval := uint16(p[0])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32(p []byte, v *uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = uint32(p[0])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint32R(p []byte, v **uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint32)\n\t\t}\n\tcase 1:\n\t\tval := uint32(p[0])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint64(p []byte, v *uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = uint64(p[0])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint64R(p []byte, v **uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint64)\n\t\t}\n\tcase 1:\n\t\tval := uint64(p[0])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUint(p []byte, v *uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = uint(p[0])\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecUintR(p []byte, v **uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint)\n\t\t}\n\tcase 1:\n\t\tval := uint(p[0])\n\t\t*v = &val\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = \"0\"\n\t\t}\n\tcase 1:\n\t\t*v = strconv.FormatInt(decInt64(p), 10)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\tval := \"0\"\n\t\t\t*v = &val\n\t\t}\n\tcase 1:\n\t\t*v = new(string)\n\t\t**v = strconv.FormatInt(decInt64(p), 10)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBigInt(p []byte, v *big.Int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt64(0)\n\tcase 1:\n\t\tv.SetInt64(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecBigIntR(p []byte, v **big.Int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = big.NewInt(0)\n\t\t}\n\tcase 1:\n\t\t*v = big.NewInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn decReflectInts(p, v)\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUints(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal tinyint: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn decReflectIntsR(p, v)\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUintsR(p, v)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal tinyint: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc decReflectInts(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 1:\n\t\tv.SetInt(decInt64(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUints(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\tcase 1:\n\t\tv.SetUint(uint64(p[0]))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(\"0\")\n\t\t}\n\tcase 1:\n\t\tv.SetString(strconv.FormatInt(decInt64(p), 10))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectIntsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 1:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetInt(decInt64(p))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectUintsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 1:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetUint(uint64(p[0]))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetString(\"0\")\n\t\t}\n\t\tv.Elem().Set(val)\n\tcase 1:\n\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\tval.Elem().SetString(strconv.FormatInt(decInt64(p), 10))\n\t\tv.Elem().Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectNullableR(p []byte, v reflect.Value) reflect.Value {\n\tif p == nil {\n\t\treturn reflect.Zero(v.Elem().Type())\n\t}\n\treturn reflect.New(v.Type().Elem().Elem())\n}\n\nfunc decInt16(p []byte) int16 {\n\tif p[0] > math.MaxInt8 {\n\t\treturn negInt16 | int16(p[0])\n\t}\n\treturn int16(p[0])\n}\n\nfunc decInt32(p []byte) int32 {\n\tif p[0] > math.MaxInt8 {\n\t\treturn negInt32 | int32(p[0])\n\t}\n\treturn int32(p[0])\n}\n\nfunc decInt64(p []byte) int64 {\n\tif p[0] > math.MaxInt8 {\n\t\treturn negInt64 | int64(p[0])\n\t}\n\treturn int64(p[0])\n}\n\nfunc decInt(p []byte) int {\n\tif p[0] > math.MaxInt8 {\n\t\treturn negInt | int(p[0])\n\t}\n\treturn int(p[0])\n}\n"
  },
  {
    "path": "serialization/uuid/marshal.go",
    "content": "package uuid\n\nimport (\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase [16]byte:\n\t\treturn EncArray(v)\n\tcase *[16]byte:\n\t\treturn EncArrayR(v)\n\tcase []byte:\n\t\treturn EncSlice(v)\n\tcase *[]byte:\n\t\treturn EncSliceR(v)\n\tcase string:\n\t\treturn EncString(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tdefault:\n\t\t// Custom types (type MyUUID [16]byte) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(rv)\n\t\t}\n\t\treturn EncReflectR(rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/uuid/marshal_utils.go",
    "content": "package uuid\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n)\n\nfunc EncArray(v [16]byte) ([]byte, error) {\n\treturn v[:], nil\n}\n\nfunc EncArrayR(v *[16]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn v[:], nil\n}\n\nfunc EncSlice(v []byte) ([]byte, error) {\n\tswitch len(v) {\n\tcase 0:\n\t\tif v == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn make([]byte, 0), nil\n\tcase 16:\n\t\treturn v, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the ([]byte) length should be 0 or 16\")\n\t}\n}\n\nfunc EncSliceR(v *[]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncSlice(*v)\n}\n\nfunc EncString(v string) ([]byte, error) {\n\treturn encString(v)\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encString(*v)\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.Array:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 || v.Len() != 16 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t\t}\n\t\tnv := reflect.New(v.Type())\n\t\tnv.Elem().Set(v)\n\t\treturn nv.Elem().Bytes(), nil\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn encReflectBytes(v)\n\tcase reflect.String:\n\t\treturn encReflectString(v)\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: timeuuid value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\tswitch ev := v.Elem(); ev.Kind() {\n\tcase reflect.Array:\n\t\tif ev.Type().Elem().Kind() != reflect.Uint8 || ev.Len() != 16 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn v.Elem().Bytes(), nil\n\tcase reflect.Slice:\n\t\tif ev.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn encReflectBytes(ev)\n\tcase reflect.String:\n\t\treturn encReflectString(ev)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc encReflectBytes(rv reflect.Value) ([]byte, error) {\n\tswitch rv.Len() {\n\tcase 0:\n\t\tif rv.IsNil() {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn make([]byte, 0), nil\n\tcase 16:\n\t\treturn rv.Bytes(), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the (%T) length should be 0 or 16\", rv.Interface())\n\t}\n}\n\n// encReflectString encodes uuid strings via reflect package.\n// The following code was taken from the `Parse` function of the \"github.com/google/uuid\" package.\nfunc encReflectString(v reflect.Value) ([]byte, error) {\n\ts := v.String()\n\tswitch len(s) {\n\tcase 45: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\t\tif !strings.EqualFold(s[:9], \"urn:uuid:\") {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the (%T) have invalid urn prefix: %q\", v.Interface(), s[:9])\n\t\t}\n\t\ts = s[9:]\n\tcase 38: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}\n\t\ts = s[1:]\n\tcase 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\tcase 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n\t\tvar ok bool\n\t\tdata := make([]byte, 16)\n\t\tfor i := range data {\n\t\t\tdata[i], ok = xtob(s[i*2], s[i*2+1])\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the (%T) have invalid UUID format: %q\", v.Interface(), s)\n\t\t\t}\n\t\t}\n\t\treturn data, nil\n\tcase 0:\n\t\treturn nil, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the (%T) length can be 0,32,36,38,45\", v.Interface())\n\t}\n\n\t// s is now at least 36 bytes long\n\t// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\tif s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {\n\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the (%T) have invalid UUID format: %q\", v.Interface(), s)\n\t}\n\tdata := make([]byte, 16)\n\tfor i, x := range [16]int{\n\t\t0, 2, 4, 6,\n\t\t9, 11,\n\t\t14, 16,\n\t\t19, 21,\n\t\t24, 26, 28, 30, 32, 34,\n\t} {\n\t\tb, ok := xtob(s[x], s[x+1])\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the (%T) have invalid UUID format: %q\", v.Interface(), b)\n\t\t}\n\t\tdata[i] = b\n\t}\n\treturn data, nil\n}\n\n// encString encodes uuid strings.\n// The following code was taken from the `Parse` function of the \"github.com/google/uuid\" package.\nfunc encString(s string) ([]byte, error) {\n\tswitch len(s) {\n\tcase 45: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\t\tif !strings.EqualFold(s[:9], \"urn:uuid:\") {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: (string) have invalid urn prefix: %q\", s[:9])\n\t\t}\n\t\ts = s[9:]\n\tcase 38: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}\n\t\ts = s[1:]\n\tcase 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\tcase 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n\t\tvar ok bool\n\t\tdata := make([]byte, 16)\n\t\tfor i := range data {\n\t\t\tdata[i], ok = xtob(s[i*2], s[i*2+1])\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the (string) have invalid UUID format: %q\", s)\n\t\t\t}\n\t\t}\n\t\treturn data, nil\n\tcase 0:\n\t\treturn nil, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the (string) length can be 0,32,36,38,45\")\n\t}\n\n\t// s is now at least 36 bytes long\n\t// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n\tif s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {\n\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the (string) have invalid UUID format: %q\", s)\n\t}\n\tdata := make([]byte, 16)\n\tfor i, x := range [16]int{\n\t\t0, 2, 4, 6,\n\t\t9, 11,\n\t\t14, 16,\n\t\t19, 21,\n\t\t24, 26, 28, 30, 32, 34,\n\t} {\n\t\tb, ok := xtob(s[x], s[x+1])\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal uuid: the (string) have invalid UUID format: %q\", b)\n\t\t}\n\t\tdata[i] = b\n\t}\n\treturn data, nil\n}\n\n// xtob converts hex characters x1 and x2 into a byte.\n// The following code was taken from the \"github.com/google/uuid\" package.\nfunc xtob(x1, x2 byte) (byte, bool) {\n\tb1 := xvalues[x1]\n\tb2 := xvalues[x2]\n\treturn (b1 << 4) | b2, b1 != 255 && b2 != 255\n}\n\n// xvalues returns the value of a byte as a hexadecimal digit or 255.\n// The following code was taken from the \"github.com/google/uuid\" package.\nvar xvalues = [256]byte{\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,\n\t255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n\t255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n}\n"
  },
  {
    "path": "serialization/uuid/unmarshal.go",
    "content": "package uuid\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *[16]byte:\n\t\treturn DecArray(data, v)\n\tcase **[16]byte:\n\t\treturn DecArrayR(data, v)\n\tcase *[]byte:\n\t\treturn DecSlice(data, v)\n\tcase **[]byte:\n\t\treturn DecSliceR(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tdefault:\n\t\t// Custom types (type MyFloat float32) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal uuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v)\n\t\t}\n\t\tif rv.Type().Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/uuid/unmarshal_utils.go",
    "content": "package uuid\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nconst hexString = \"0123456789abcdef\"\n\nvar (\n\toffsets         = [...]int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34}\n\terrWrongDataLen = fmt.Errorf(\"failed to unmarshal uuid: the length of the data should be 0 or 16\")\n)\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal uuid: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecArray(p []byte, v *[16]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = [16]byte{}\n\tcase 16:\n\t\tcopy(v[:], p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecArrayR(p []byte, v **[16]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new([16]byte)\n\t\t}\n\tcase 16:\n\t\t*v = &[16]byte{}\n\t\tcopy((*v)[:], p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecSlice(p []byte, v *[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = make([]byte, 0)\n\t\t}\n\tcase 16:\n\t\t*v = make([]byte, 16)\n\t\tcopy(*v, p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecSliceR(p []byte, v **[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\ttmp := make([]byte, 0)\n\t\t\t*v = &tmp\n\t\t}\n\tcase 16:\n\t\t*v = &[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}\n\t\tcopy(**v, p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = \"00000000-0000-0000-0000-000000000000\"\n\t\t}\n\tcase 16:\n\t\t*v = decString(p)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\ttmp := \"00000000-0000-0000-0000-000000000000\"\n\t\t\t*v = &tmp\n\t\t}\n\tcase 16:\n\t\ttmp := decString(p)\n\t\t*v = &tmp\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Array:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 || v.Len() != 16 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal uuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t\t}\n\t\treturn decReflectArray(p, v)\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal uuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t\t}\n\t\treturn decReflectBytes(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal uuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t}\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tev := v.Elem()\n\tswitch evt := ev.Type().Elem(); evt.Kind() {\n\tcase reflect.Array:\n\t\tif evt.Elem().Kind() != reflect.Uint8 || ev.Len() != 16 {\n\t\t\treturn fmt.Errorf(\"failed to marshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t\t}\n\t\treturn decReflectArrayR(p, ev)\n\tcase reflect.Slice:\n\t\tif evt.Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t\t}\n\t\treturn decReflectBytesR(p, ev)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, ev)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal timeuuid: unsupported value type (%T)(%[1]v), supported types: ~[]byte, ~[16]byte, ~string\", v.Interface())\n\t}\n}\n\nfunc decReflectArray(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetZero()\n\tcase 16:\n\t\tval := reflect.New(v.Type())\n\t\tcopy((*[16]byte)(val.UnsafePointer())[:], p)\n\t\tv.Set(val.Elem())\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectBytes(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetBytes(nil)\n\t\t} else {\n\t\t\tv.SetBytes(make([]byte, 0))\n\t\t}\n\tcase 16:\n\t\ttmp := make([]byte, 16)\n\t\tcopy(tmp, p)\n\t\tv.SetBytes(tmp)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(\"00000000-0000-0000-0000-000000000000\")\n\t\t}\n\tcase 16:\n\t\tv.SetString(decString(p))\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectArrayR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Set(reflect.Zero(v.Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem())\n\t\t\tv.Set(val)\n\t\t}\n\tcase 16:\n\t\tval := reflect.New(v.Type().Elem())\n\t\tcopy((*[16]byte)(val.UnsafePointer())[:], p)\n\t\tv.Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectBytesR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Set(reflect.Zero(v.Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem())\n\t\t\tval.Elem().SetBytes(make([]byte, 0))\n\t\t\tv.Set(val)\n\t\t}\n\tcase 16:\n\t\ttmp := make([]byte, 16)\n\t\tcopy(tmp, p)\n\t\tval := reflect.New(v.Type().Elem())\n\t\tval.Elem().SetBytes(tmp)\n\t\tv.Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.Set(reflect.Zero(v.Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem())\n\t\t\tval.Elem().SetString(\"00000000-0000-0000-0000-000000000000\")\n\t\t\tv.Set(val)\n\t\t}\n\tcase 16:\n\t\tval := reflect.New(v.Type().Elem())\n\t\tval.Elem().SetString(decString(p))\n\t\tv.Set(val)\n\tdefault:\n\t\treturn errWrongDataLen\n\t}\n\treturn nil\n}\n\nfunc decString(p []byte) string {\n\tr := make([]byte, 36)\n\tfor i, b := range p {\n\t\tr[offsets[i]] = hexString[b>>4]\n\t\tr[offsets[i]+1] = hexString[b&0xF]\n\t}\n\tr[8] = '-'\n\tr[13] = '-'\n\tr[18] = '-'\n\tr[23] = '-'\n\treturn string(r)\n}\n"
  },
  {
    "path": "serialization/varchar/marshal.go",
    "content": "package varchar\n\nimport (\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase string:\n\t\treturn EncString(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tcase []byte:\n\t\treturn EncBytes(v)\n\tcase *[]byte:\n\t\treturn EncBytesR(v)\n\tdefault:\n\t\t// Custom types (type MyString string) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(rv)\n\t\t}\n\t\treturn EncReflectR(rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/varchar/marshal_utils.go",
    "content": "package varchar\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc EncString(v string) ([]byte, error) {\n\treturn encString(v), nil\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encString(*v), nil\n}\n\nfunc EncBytes(v []byte) ([]byte, error) {\n\treturn v, nil\n}\n\nfunc EncBytesR(v *[]byte) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn *v, nil\n}\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Kind() {\n\tcase reflect.String:\n\t\treturn encString(v.String()), nil\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal varchar: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\t\t}\n\t\treturn EncBytes(v.Bytes())\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal varchar: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal varchar: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte, unsetColumn\", v.Interface())\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encString(v string) []byte {\n\tif v == \"\" {\n\t\treturn make([]byte, 0)\n\t}\n\treturn []byte(v)\n}\n"
  },
  {
    "path": "serialization/varchar/unmarshal.go",
    "content": "package varchar\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\tcase *string:\n\t\treturn DecString(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tcase *[]byte:\n\t\treturn DecBytes(data, v)\n\tcase **[]byte:\n\t\treturn DecBytesR(data, v)\n\tcase *any:\n\t\treturn DecInterface(data, v)\n\tdefault:\n\t\t// Custom types (type MyString string) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varchar: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/varchar/unmarshal_utils.go",
    "content": "package varchar\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n)\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal varchar: can not unmarshal into nil reference(%T)(%[1]v)\", v)\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decString(p)\n\treturn nil\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decStringR(p)\n\treturn nil\n}\n\nfunc DecBytes(p []byte, v *[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tif p == nil {\n\t\t*v = nil\n\t\treturn nil\n\t}\n\tif len(p) == 0 {\n\t\t*v = make([]byte, 0)\n\t\treturn nil\n\t}\n\t*v = append((*v)[:0], p...)\n\treturn nil\n}\n\nfunc DecBytesR(p []byte, v **[]byte) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decBytesR(p)\n\treturn nil\n}\n\nfunc DecInterface(p []byte, v *any) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\t*v = decBytes(p)\n\treturn nil\n}\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.String:\n\t\tv.SetString(decString(p))\n\tcase reflect.Slice:\n\t\tif v.Type().Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal varchar: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t\t}\n\t\tv.SetBytes(decBytes(p))\n\tcase reflect.Interface:\n\t\tv.Set(reflect.ValueOf(decBytes(p)))\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varchar: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t}\n\treturn nil\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn errNilReference(v)\n\t}\n\n\tswitch ev := v.Type().Elem().Elem(); ev.Kind() {\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tcase reflect.Slice:\n\t\tif ev.Elem().Kind() != reflect.Uint8 {\n\t\t\treturn fmt.Errorf(\"failed to marshal varchar: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t\t}\n\t\treturn decReflectBytesR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varchar: unsupported value type (%T)(%[1]v), supported types: ~string, ~[]byte\", v.Interface())\n\t}\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Type().Elem()))\n\t\t} else {\n\t\t\tv.Elem().Set(reflect.New(v.Type().Elem().Elem()))\n\t\t}\n\t\treturn nil\n\t}\n\tval := reflect.New(v.Type().Elem().Elem())\n\tval.Elem().SetString(string(p))\n\tv.Elem().Set(val)\n\treturn nil\n}\n\nfunc decReflectBytesR(p []byte, v reflect.Value) error {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\tv.Elem().Set(reflect.Zero(v.Elem().Type()))\n\t\t} else {\n\t\t\tval := reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetBytes(make([]byte, 0))\n\t\t\tv.Elem().Set(val)\n\t\t}\n\t\treturn nil\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\n\tval := reflect.New(v.Type().Elem().Elem())\n\tval.Elem().SetBytes(tmp)\n\tv.Elem().Set(val)\n\treturn nil\n}\n\nfunc decString(p []byte) string {\n\tif len(p) == 0 {\n\t\treturn \"\"\n\t}\n\treturn string(p)\n}\n\nfunc decStringR(p []byte) *string {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn new(string)\n\t}\n\ttmp := string(p)\n\treturn &tmp\n}\n\nfunc decBytes(p []byte) []byte {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn make([]byte, 0)\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\treturn tmp\n}\n\nfunc decBytesR(p []byte) *[]byte {\n\tif len(p) == 0 {\n\t\tif p == nil {\n\t\t\treturn nil\n\t\t}\n\t\ttmp := make([]byte, 0)\n\t\treturn &tmp\n\t}\n\ttmp := make([]byte, len(p))\n\tcopy(tmp, p)\n\treturn &tmp\n}\n"
  },
  {
    "path": "serialization/varint/marshal.go",
    "content": "package varint\n\nimport (\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Marshal(value any) ([]byte, error) {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil, nil\n\tcase int8:\n\t\treturn EncInt8(v)\n\tcase int32:\n\t\treturn EncInt32(v)\n\tcase int16:\n\t\treturn EncInt16(v)\n\tcase int64:\n\t\treturn EncInt64(v)\n\tcase int:\n\t\treturn EncInt(v)\n\n\tcase uint8:\n\t\treturn EncUint8(v)\n\tcase uint16:\n\t\treturn EncUint16(v)\n\tcase uint32:\n\t\treturn EncUint32(v)\n\tcase uint64:\n\t\treturn EncUint64(v)\n\tcase uint:\n\t\treturn EncUint(v)\n\n\tcase big.Int:\n\t\treturn EncBigInt(v)\n\tcase string:\n\t\treturn EncString(v)\n\n\tcase *int8:\n\t\treturn EncInt8R(v)\n\tcase *int16:\n\t\treturn EncInt16R(v)\n\tcase *int32:\n\t\treturn EncInt32R(v)\n\tcase *int64:\n\t\treturn EncInt64R(v)\n\tcase *int:\n\t\treturn EncIntR(v)\n\n\tcase *uint8:\n\t\treturn EncUint8R(v)\n\tcase *uint16:\n\t\treturn EncUint16R(v)\n\tcase *uint32:\n\t\treturn EncUint32R(v)\n\tcase *uint64:\n\t\treturn EncUint64R(v)\n\tcase *uint:\n\t\treturn EncUintR(v)\n\n\tcase *big.Int:\n\t\treturn EncBigIntR(v)\n\tcase *string:\n\t\treturn EncStringR(v)\n\tdefault:\n\t\t// Custom types (type MyInt int) can be serialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.TypeOf(value)\n\t\tif rv.Kind() != reflect.Ptr {\n\t\t\treturn EncReflect(reflect.ValueOf(v))\n\t\t}\n\t\treturn EncReflectR(reflect.ValueOf(v))\n\t}\n}\n"
  },
  {
    "path": "serialization/varint/marshal_bigint_test.go",
    "content": "package varint\n\nimport (\n\t\"bytes\"\n\t\"math\"\n\t\"math/big\"\n\t\"math/rand\"\n\t\"testing\"\n)\n\nfunc TestEnc2BigInt(t *testing.T) {\n\tt.Parallel()\n\n\tgenData := func(v int64) []byte {\n\t\tdata := []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\t\tout := make([]byte, 0)\n\t\tadd := false\n\n\t\tfor i, b := range data {\n\t\t\tif !add {\n\t\t\t\tif v < 0 {\n\t\t\t\t\tif b != 255 || b == 255 && data[i+1] < 128 {\n\t\t\t\t\t\tadd = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif b != 0 || b == 0 && data[i+1] > 127 {\n\t\t\t\t\t\tadd = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tout = append(out, b)\n\t\t}\n\n\t\treturn out\n\t}\n\n\tt.Run(\"positive\", func(t *testing.T) {\n\t\trnd := rand.New(rand.NewSource(rand.Int63()))\n\t\tfor i := int64(math.MaxInt16); i < 1<<24; i = i + int64(rnd.Int31n(300)) {\n\t\t\texpected := genData(i)\n\n\t\t\treceived := EncBigIntRS(big.NewInt(i))\n\t\t\tif !bytes.Equal(expected, received) {\n\t\t\t\tt.Fatalf(\"%d\\nexpected:%x\\nreceived:%x\", i, expected, received)\n\t\t\t}\n\n\t\t\treceived = EncInt64Ext(i)\n\t\t\tif !bytes.Equal(expected, received) {\n\t\t\t\tt.Fatalf(\"%d\\nexpected:%x\\nreceived:%x\", i, expected, received)\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"negative\", func(t *testing.T) {\n\t\trnd := rand.New(rand.NewSource(rand.Int63()))\n\t\tfor i := int64(math.MinInt16); i > -1<<24; i = i - int64(rnd.Int31n(300)) {\n\t\t\texpected := genData(i)\n\n\t\t\treceived := EncBigIntRS(big.NewInt(i))\n\t\t\tif !bytes.Equal(expected, received) {\n\t\t\t\tt.Fatalf(\"%d\\nexpected:%x\\nreceived:%x\", i, expected, received)\n\t\t\t}\n\n\t\t\treceived = EncInt64Ext(i)\n\t\t\tif !bytes.Equal(expected, received) {\n\t\t\t\tt.Fatalf(\"%d\\nexpected:%x\\nreceived:%x\", i, expected, received)\n\t\t\t}\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "serialization/varint/marshal_custom.go",
    "content": "package varint\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nconst supportedTypes = \"~int8, ~int16, ~int32, ~int64, ~int, ~uint8, ~uint16, ~uint32, ~uint64, ~uint, ~string, big.Int\"\n\nfunc EncReflect(v reflect.Value) ([]byte, error) {\n\tswitch v.Type().Kind() {\n\tcase reflect.Int8:\n\t\treturn EncInt8(int8(v.Int()))\n\tcase reflect.Int16:\n\t\treturn EncInt16(int16(v.Int()))\n\tcase reflect.Int32:\n\t\treturn EncInt32(int32(v.Int()))\n\tcase reflect.Int, reflect.Int64:\n\t\treturn EncInt64(v.Int())\n\tcase reflect.Uint8:\n\t\treturn EncUint8(uint8(v.Uint()))\n\tcase reflect.Uint16:\n\t\treturn EncUint16(uint16(v.Uint()))\n\tcase reflect.Uint32:\n\t\treturn EncUint32(uint32(v.Uint()))\n\tcase reflect.Uint, reflect.Uint64:\n\t\treturn EncUint64(v.Uint())\n\tcase reflect.String:\n\t\treturn encReflectString(v)\n\tcase reflect.Struct:\n\t\tif v.Type().String() == \"gocql.unsetColumn\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to marshal varint: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"failed to marshal varint: unsupported value type (%T)(%[1]v), supported types: %s, unsetColumn\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc EncReflectR(v reflect.Value) ([]byte, error) {\n\tif v.IsNil() {\n\t\treturn nil, nil\n\t}\n\treturn EncReflect(v.Elem())\n}\n\nfunc encReflectString(v reflect.Value) ([]byte, error) {\n\tval := v.String()\n\tswitch {\n\tcase len(val) == 0:\n\t\treturn nil, nil\n\tcase len(val) <= 18:\n\t\tn, err := strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal varint: can not marshal (%T)(%[1]v), %s\", v.Interface(), err)\n\t\t}\n\t\treturn EncInt64Ext(n), nil\n\tcase len(val) <= 20:\n\t\tn, err := strconv.ParseInt(val, 10, 64)\n\t\tif err == nil {\n\t\t\treturn EncInt64Ext(n), nil\n\t\t}\n\n\t\tt, ok := new(big.Int).SetString(val, 10)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal varint: can not marshal (%T)(%[1]v)\", v.Interface())\n\t\t}\n\t\treturn EncBigIntRS(t), nil\n\tdefault:\n\t\tt, ok := new(big.Int).SetString(val, 10)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal varint: can not marshal (%T)(%[1]v)\", v.Interface())\n\t\t}\n\t\treturn EncBigIntRS(t), nil\n\t}\n}\n"
  },
  {
    "path": "serialization/varint/marshal_ints.go",
    "content": "package varint\n\nfunc EncInt8(v int8) ([]byte, error) {\n\treturn encInt8(v), nil\n}\n\nfunc EncInt8R(v *int8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encInt8(*v), nil\n}\n\nfunc EncInt16(v int16) ([]byte, error) {\n\treturn encInt16(v), nil\n}\n\nfunc EncInt16R(v *int16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encInt16(*v), nil\n}\n\nfunc EncInt32(v int32) ([]byte, error) {\n\treturn encInt32(v), nil\n}\n\nfunc EncInt32R(v *int32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encInt32(*v), nil\n}\n\nfunc EncInt64(v int64) ([]byte, error) {\n\treturn EncInt64Ext(v), nil\n}\n\nfunc EncInt64R(v *int64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncInt64Ext(*v), nil\n}\n\nfunc EncInt(v int) ([]byte, error) {\n\treturn encInt(v), nil\n}\n\nfunc EncIntR(v *int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encInt(*v), nil\n}\n\nfunc encInt8(v int8) []byte {\n\treturn []byte{byte(v)}\n}\n\nfunc encInt16(v int16) []byte {\n\tif v <= maxInt8 && v >= minInt8 {\n\t\treturn []byte{byte(v)}\n\t}\n\treturn []byte{byte(v >> 8), byte(v)}\n}\n\nfunc encInt32(v int32) []byte {\n\tif v <= maxInt8 && v >= minInt8 {\n\t\treturn []byte{byte(v)}\n\t}\n\tif v <= maxInt16 && v >= minInt16 {\n\t\treturn []byte{byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt24 && v >= minInt24 {\n\t\treturn []byte{byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n\nfunc EncInt64Ext(v int64) []byte {\n\tif v <= maxInt8 && v >= minInt8 {\n\t\treturn []byte{byte(v)}\n\t}\n\tif v <= maxInt16 && v >= minInt16 {\n\t\treturn []byte{byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt24 && v >= minInt24 {\n\t\treturn []byte{byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt32 && v >= minInt32 {\n\t\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt40 && v >= minInt40 {\n\t\treturn []byte{byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt48 && v >= minInt48 {\n\t\treturn []byte{byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt56 && v >= minInt56 {\n\t\treturn []byte{byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n\nfunc encInt(v int) []byte {\n\tif v <= maxInt8 && v >= minInt8 {\n\t\treturn []byte{byte(v)}\n\t}\n\tif v <= maxInt16 && v >= minInt16 {\n\t\treturn []byte{byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt24 && v >= minInt24 {\n\t\treturn []byte{byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt32 && v >= minInt32 {\n\t\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt40 && v >= minInt40 {\n\t\treturn []byte{byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt48 && v >= minInt48 {\n\t\treturn []byte{byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\tif v <= maxInt56 && v >= minInt56 {\n\t\treturn []byte{byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\t}\n\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n}\n"
  },
  {
    "path": "serialization/varint/marshal_uints.go",
    "content": "package varint\n\nfunc EncUint8(v uint8) ([]byte, error) {\n\treturn encUint8(v), nil\n}\n\nfunc EncUint8R(v *uint8) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encUint8(*v), nil\n}\n\nfunc EncUint16(v uint16) ([]byte, error) {\n\treturn encUint16(v), nil\n}\n\nfunc EncUint16R(v *uint16) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encUint16(*v), nil\n}\n\nfunc EncUint32(v uint32) ([]byte, error) {\n\treturn encUint32(v), nil\n}\n\nfunc EncUint32R(v *uint32) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encUint32(*v), nil\n}\n\nfunc EncUint64(v uint64) ([]byte, error) {\n\treturn encUint64(v), nil\n}\n\nfunc EncUint64R(v *uint64) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encUint64(*v), nil\n}\n\nfunc EncUint(v uint) ([]byte, error) {\n\treturn encUint(v), nil\n}\n\nfunc EncUintR(v *uint) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn encUint(*v), nil\n}\n\nfunc encUint8(v uint8) []byte {\n\tif v > maxInt8 {\n\t\treturn []byte{0, v}\n\t}\n\treturn []byte{v}\n}\n\nfunc encUint16(v uint16) []byte {\n\tswitch {\n\tcase byte(v>>15) != 0:\n\t\treturn []byte{0, byte(v >> 8), byte(v)}\n\tcase byte(v>>7) != 0:\n\t\treturn []byte{byte(v >> 8), byte(v)}\n\tdefault:\n\t\treturn []byte{byte(v)}\n\t}\n}\n\nfunc encUint32(v uint32) []byte {\n\tswitch {\n\tcase byte(v>>31) != 0:\n\t\treturn []byte{0, byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>23) != 0:\n\t\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>15) != 0:\n\t\treturn []byte{byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>7) != 0:\n\t\treturn []byte{byte(v >> 8), byte(v)}\n\tdefault:\n\t\treturn []byte{byte(v)}\n\t}\n}\n\nfunc encUint64(v uint64) []byte {\n\tswitch {\n\tcase byte(v>>63) != 0:\n\t\treturn []byte{0, byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>55) != 0:\n\t\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>47) != 0:\n\t\treturn []byte{byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>39) != 0:\n\t\treturn []byte{byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>31) != 0:\n\t\treturn []byte{byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>23) != 0:\n\t\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>15) != 0:\n\t\treturn []byte{byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>7) != 0:\n\t\treturn []byte{byte(v >> 8), byte(v)}\n\tdefault:\n\t\treturn []byte{byte(v)}\n\t}\n}\n\nfunc encUint(v uint) []byte {\n\tswitch {\n\tcase byte(v>>63) != 0:\n\t\treturn []byte{0, byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>55) != 0:\n\t\treturn []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>47) != 0:\n\t\treturn []byte{byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>39) != 0:\n\t\treturn []byte{byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>31) != 0:\n\t\treturn []byte{byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>23) != 0:\n\t\treturn []byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>15) != 0:\n\t\treturn []byte{byte(v >> 16), byte(v >> 8), byte(v)}\n\tcase byte(v>>7) != 0:\n\t\treturn []byte{byte(v >> 8), byte(v)}\n\tdefault:\n\t\treturn []byte{byte(v)}\n\t}\n}\n"
  },
  {
    "path": "serialization/varint/marshal_utils.go",
    "content": "package varint\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"strconv\"\n)\n\nconst (\n\tmaxInt8  = 1<<7 - 1\n\tmaxInt16 = 1<<15 - 1\n\tmaxInt24 = 1<<23 - 1\n\tmaxInt32 = 1<<31 - 1\n\tmaxInt40 = 1<<39 - 1\n\tmaxInt48 = 1<<47 - 1\n\tmaxInt56 = 1<<55 - 1\n\tmaxInt64 = 1<<63 - 1\n\n\tminInt8  = -1 << 7\n\tminInt16 = -1 << 15\n\tminInt24 = -1 << 23\n\tminInt32 = -1 << 31\n\tminInt40 = -1 << 39\n\tminInt48 = -1 << 47\n\tminInt56 = -1 << 55\n)\n\nfunc EncBigInt(v big.Int) ([]byte, error) {\n\treturn encBigInt(v), nil\n}\n\nfunc EncBigIntR(v *big.Int) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncBigIntRS(v), nil\n}\n\nfunc EncString(v string) ([]byte, error) {\n\tswitch {\n\tcase len(v) == 0:\n\t\treturn nil, nil\n\tcase len(v) <= 18:\n\t\tn, err := strconv.ParseInt(v, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal varint: can not marshal %#v, %s\", v, err)\n\t\t}\n\t\treturn EncInt64Ext(n), nil\n\tcase len(v) <= 20:\n\t\tn, err := strconv.ParseInt(v, 10, 64)\n\t\tif err == nil {\n\t\t\treturn EncInt64Ext(n), nil\n\t\t}\n\n\t\tt, ok := new(big.Int).SetString(v, 10)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal varint: can not marshal %#v\", v)\n\t\t}\n\t\treturn EncBigIntRS(t), nil\n\tdefault:\n\t\tt, ok := new(big.Int).SetString(v, 10)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal varint: can not marshal %#v\", v)\n\t\t}\n\t\treturn EncBigIntRS(t), nil\n\t}\n}\n\nfunc EncStringR(v *string) ([]byte, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\treturn EncString(*v)\n}\n\nfunc encBigInt(v big.Int) []byte {\n\tswitch v.Sign() {\n\tcase 1:\n\t\tdata := v.Bytes()\n\t\tif data[0] > math.MaxInt8 {\n\t\t\tdata = append([]byte{0}, data...)\n\t\t}\n\t\treturn data\n\tcase -1:\n\t\tdata := v.Bytes()\n\t\tadd := true\n\t\tfor i := len(data) - 1; i >= 0; i-- {\n\t\t\tif !add {\n\t\t\t\tdata[i] = 255 - data[i]\n\t\t\t} else {\n\t\t\t\tdata[i] = 255 - data[i] + 1\n\t\t\t\tif data[i] != 0 {\n\t\t\t\t\tadd = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif data[0] < 128 {\n\t\t\tdata = append([]byte{255}, data...)\n\t\t}\n\t\treturn data\n\tdefault:\n\t\treturn []byte{0}\n\t}\n}\n\n// EncBigIntRS encode big.Int to []byte.\n// This function shared to use in marshal `decimal`.\nfunc EncBigIntRS(v *big.Int) []byte {\n\tswitch v.Sign() {\n\tcase 1:\n\t\tdata := v.Bytes()\n\t\tif data[0] > math.MaxInt8 {\n\t\t\tdata = append([]byte{0}, data...)\n\t\t}\n\t\treturn data\n\tcase -1:\n\t\tdata := v.Bytes()\n\t\tadd := true\n\t\tfor i := len(data) - 1; i >= 0; i-- {\n\t\t\tif !add {\n\t\t\t\tdata[i] = 255 - data[i]\n\t\t\t} else {\n\t\t\t\tdata[i] = 255 - data[i] + 1\n\t\t\t\tif data[i] != 0 {\n\t\t\t\t\tadd = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif data[0] < 128 {\n\t\t\tdata = append([]byte{255}, data...)\n\t\t}\n\t\treturn data\n\tdefault:\n\t\treturn []byte{0}\n\t}\n}\n"
  },
  {
    "path": "serialization/varint/unmarshal.go",
    "content": "package varint\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n)\n\nfunc Unmarshal(data []byte, value any) error {\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn nil\n\n\tcase *int8:\n\t\treturn DecInt8(data, v)\n\tcase *int16:\n\t\treturn DecInt16(data, v)\n\tcase *int32:\n\t\treturn DecInt32(data, v)\n\tcase *int64:\n\t\treturn DecInt64(data, v)\n\tcase *int:\n\t\treturn DecInt(data, v)\n\n\tcase *uint8:\n\t\treturn DecUint8(data, v)\n\tcase *uint16:\n\t\treturn DecUint16(data, v)\n\tcase *uint32:\n\t\treturn DecUint32(data, v)\n\tcase *uint64:\n\t\treturn DecUint64(data, v)\n\tcase *uint:\n\t\treturn DecUint(data, v)\n\n\tcase *big.Int:\n\t\treturn DecBigInt(data, v)\n\tcase *string:\n\t\treturn DecString(data, v)\n\n\tcase **int8:\n\t\treturn DecInt8R(data, v)\n\tcase **int16:\n\t\treturn DecInt16R(data, v)\n\tcase **int32:\n\t\treturn DecInt32R(data, v)\n\tcase **int64:\n\t\treturn DecInt64R(data, v)\n\tcase **int:\n\t\treturn DecIntR(data, v)\n\n\tcase **uint8:\n\t\treturn DecUint8R(data, v)\n\tcase **uint16:\n\t\treturn DecUint16R(data, v)\n\tcase **uint32:\n\t\treturn DecUint32R(data, v)\n\tcase **uint64:\n\t\treturn DecUint64R(data, v)\n\tcase **uint:\n\t\treturn DecUintR(data, v)\n\n\tcase **big.Int:\n\t\treturn DecBigIntR(data, v)\n\tcase **string:\n\t\treturn DecStringR(data, v)\n\tdefault:\n\n\t\t// Custom types (type MyInt int) can be deserialized only via `reflect` package.\n\t\t// Later, when generic-based serialization is introduced we can do that via generics.\n\t\trv := reflect.ValueOf(value)\n\t\trt := rv.Type()\n\t\tif rt.Kind() != reflect.Ptr {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: unsupported value type (%T)(%#[1]v), supported types: %s\", value, supportedTypes)\n\t\t}\n\t\tif rt.Elem().Kind() != reflect.Ptr {\n\t\t\treturn DecReflect(data, rv)\n\t\t}\n\t\treturn DecReflectR(data, rv)\n\t}\n}\n"
  },
  {
    "path": "serialization/varint/unmarshal_bigint_test.go",
    "content": "package varint\n\nimport (\n\t\"math\"\n\t\"math/big\"\n\t\"math/rand\"\n\t\"testing\"\n)\n\nfunc TestDec2BigInt(t *testing.T) {\n\tt.Parallel()\n\n\tgenData := func(v int64) []byte {\n\t\tdata := []byte{byte(v >> 56), byte(v >> 48), byte(v >> 40), byte(v >> 32), byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)}\n\t\tout := make([]byte, 0)\n\t\tadd := false\n\t\tfor i, b := range data {\n\t\t\tif !add {\n\t\t\t\tif v < 0 {\n\t\t\t\t\tif b != 255 || b == 255 && data[i+1] < 128 {\n\t\t\t\t\t\tadd = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif b != 0 || b == 0 && data[i+1] > 127 {\n\t\t\t\t\t\tadd = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tout = append(out, b)\n\t\t}\n\n\t\treturn out\n\t}\n\n\tt.Run(\"positive\", func(t *testing.T) {\n\t\trnd := rand.New(rand.NewSource(rand.Int63()))\n\t\tfor i := int64(math.MaxInt16); i < 1<<23; i = i + int64(rnd.Int31n(300)) {\n\t\t\tdata := genData(i)\n\t\t\texpected := big.NewInt(i)\n\n\t\t\treceived := Dec2BigInt(data)\n\t\t\tif expected.Cmp(received) != 0 {\n\t\t\t\tt.Fatalf(\"%d\\nexpected:%s\\nreceived:%s\", i, expected, received)\n\t\t\t}\n\n\t\t\t_ = DecBigInt(data, received)\n\t\t\tif expected.Cmp(received) != 0 {\n\t\t\t\tt.Fatalf(\"%d\\nexpected:%s\\nreceived:%s\", i, expected, received)\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"negative\", func(t *testing.T) {\n\t\trnd := rand.New(rand.NewSource(rand.Int63()))\n\t\tfor i := int64(math.MinInt16); i > -1<<23; i = i - int64(rnd.Int31n(300)) {\n\t\t\tdata := genData(i)\n\t\t\texpected := big.NewInt(i)\n\n\t\t\treceived := Dec2BigInt(data)\n\t\t\tif expected.Cmp(received) != 0 {\n\t\t\t\tt.Fatalf(\"%d\\nexpected:%s\\nreceived:%s\", i, expected, received)\n\t\t\t}\n\n\t\t\t_ = DecBigInt(data, received)\n\t\t\tif expected.Cmp(received) != 0 {\n\t\t\t\tt.Fatalf(\"%d\\nexpected:%s\\nreceived:%s\", i, expected, received)\n\t\t\t}\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "serialization/varint/unmarshal_custom.go",
    "content": "package varint\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n)\n\nfunc DecReflect(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: can not unmarshal into nil reference (%T)(%#[1]v)\", v.Interface())\n\t}\n\n\tswitch v = v.Elem(); v.Kind() {\n\tcase reflect.Int8:\n\t\treturn decReflectInt8(p, v)\n\tcase reflect.Int16:\n\t\treturn decReflectInt16(p, v)\n\tcase reflect.Int32:\n\t\treturn decReflectInt32(p, v)\n\tcase reflect.Int64, reflect.Int:\n\t\treturn decReflectInts(p, v)\n\tcase reflect.Uint8:\n\t\treturn decReflectUint8(p, v)\n\tcase reflect.Uint16:\n\t\treturn decReflectUint16(p, v)\n\tcase reflect.Uint32:\n\t\treturn decReflectUint32(p, v)\n\tcase reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUints(p, v)\n\tcase reflect.String:\n\t\treturn decReflectString(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: unsupported value type (%T)(%#[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc decReflectInt8(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\tcase 1:\n\t\tv.SetInt(dec1toInt64(p))\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the int8 range\", v.Interface())\n\t}\n\treturn nil\n}\n\nfunc decReflectInt16(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\t\treturn nil\n\tcase 1:\n\t\tv.SetInt(dec1toInt64(p))\n\t\treturn nil\n\tcase 2:\n\t\tv.SetInt(dec2toInt64(p))\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the int16 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectInt32(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\t\treturn nil\n\tcase 1:\n\t\tv.SetInt(dec1toInt64(p))\n\t\treturn nil\n\tcase 2:\n\t\tv.SetInt(dec2toInt64(p))\n\tcase 3:\n\t\tv.SetInt(dec3toInt64(p))\n\tcase 4:\n\t\tv.SetInt(dec4toInt64(p))\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint32 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectInts(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt(0)\n\t\treturn nil\n\tcase 1:\n\t\tv.SetInt(dec1toInt64(p))\n\t\treturn nil\n\tcase 2:\n\t\tv.SetInt(dec2toInt64(p))\n\tcase 3:\n\t\tv.SetInt(dec3toInt64(p))\n\tcase 4:\n\t\tv.SetInt(dec4toInt64(p))\n\tcase 5:\n\t\tv.SetInt(dec5toInt64(p))\n\tcase 6:\n\t\tv.SetInt(dec6toInt64(p))\n\tcase 7:\n\t\tv.SetInt(dec7toInt64(p))\n\tcase 8:\n\t\tv.SetInt(dec8toInt64(p))\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the int64 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectUint8(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\t\treturn nil\n\tcase 1:\n\t\tv.SetUint(dec1toUint64(p))\n\t\treturn nil\n\tcase 2:\n\t\tif p[0] == 0 {\n\t\t\tv.SetUint(dec2toUint64(p))\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint8 range\", v.Interface())\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint8 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectUint16(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\t\treturn nil\n\tcase 1:\n\t\tv.SetUint(dec1toUint64(p))\n\t\treturn nil\n\tcase 2:\n\t\tv.SetUint(dec2toUint64(p))\n\tcase 3:\n\t\tif p[0] == 0 {\n\t\t\tv.SetUint(dec3toUint64(p))\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint16 range\", v.Interface())\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint16 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectUint32(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\t\treturn nil\n\tcase 1:\n\t\tv.SetUint(dec1toUint64(p))\n\t\treturn nil\n\tcase 2:\n\t\tv.SetUint(dec2toUint64(p))\n\tcase 3:\n\t\tv.SetUint(dec3toUint64(p))\n\tcase 4:\n\t\tv.SetUint(dec4toUint64(p))\n\tcase 5:\n\t\tif p[0] == 0 {\n\t\t\tv.SetUint(dec5toUint64(p))\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint32 range\", v.Interface())\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint32 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectUints(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetUint(0)\n\t\treturn nil\n\tcase 1:\n\t\tv.SetUint(dec1toUint64(p))\n\t\treturn nil\n\tcase 2:\n\t\tv.SetUint(dec2toUint64(p))\n\tcase 3:\n\t\tv.SetUint(dec3toUint64(p))\n\tcase 4:\n\t\tv.SetUint(dec4toUint64(p))\n\tcase 5:\n\t\tv.SetUint(dec5toUint64(p))\n\tcase 6:\n\t\tv.SetUint(dec6toUint64(p))\n\tcase 7:\n\t\tv.SetUint(dec7toUint64(p))\n\tcase 8:\n\t\tv.SetUint(dec8toUint64(p))\n\tcase 9:\n\t\tif p[0] == 0 {\n\t\t\tv.SetUint(dec9toUint64(p))\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint64 range\", v.Interface())\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint64 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectString(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\tv.SetString(\"\")\n\t\t} else {\n\t\t\tv.SetString(\"0\")\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tv.SetString(strconv.FormatInt(dec1toInt64(p), 10))\n\t\treturn nil\n\tcase 2:\n\t\tv.SetString(strconv.FormatInt(dec2toInt64(p), 10))\n\tcase 3:\n\t\tv.SetString(strconv.FormatInt(dec3toInt64(p), 10))\n\tcase 4:\n\t\tv.SetString(strconv.FormatInt(dec4toInt64(p), 10))\n\tcase 5:\n\t\tv.SetString(strconv.FormatInt(dec5toInt64(p), 10))\n\tcase 6:\n\t\tv.SetString(strconv.FormatInt(dec6toInt64(p), 10))\n\tcase 7:\n\t\tv.SetString(strconv.FormatInt(dec7toInt64(p), 10))\n\tcase 8:\n\t\tv.SetString(strconv.FormatInt(dec8toInt64(p), 10))\n\tdefault:\n\t\tv.SetString(Dec2BigInt(p).String())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecReflectR(p []byte, v reflect.Value) error {\n\tif v.IsNil() {\n\t\treturn fmt.Errorf(\"failed to unmarshal bigint: can not unmarshal into nil reference (%T)(%[1]v)\", v.Interface())\n\t}\n\n\tswitch v.Type().Elem().Elem().Kind() {\n\tcase reflect.Int8:\n\t\treturn decReflectInt8R(p, v)\n\tcase reflect.Int16:\n\t\treturn decReflectInt16R(p, v)\n\tcase reflect.Int32:\n\t\treturn decReflectInt32R(p, v)\n\tcase reflect.Int64, reflect.Int:\n\t\treturn decReflectIntsR(p, v)\n\tcase reflect.Uint8:\n\t\treturn decReflectUint8R(p, v)\n\tcase reflect.Uint16:\n\t\treturn decReflectUint16R(p, v)\n\tcase reflect.Uint32:\n\t\treturn decReflectUint32R(p, v)\n\tcase reflect.Uint64, reflect.Uint:\n\t\treturn decReflectUintsR(p, v)\n\tcase reflect.String:\n\t\treturn decReflectStringR(p, v)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal bigint: unsupported value type (%T)(%[1]v), supported types: %s\", v.Interface(), supportedTypes)\n\t}\n}\n\nfunc decReflectNullableR(p []byte, v reflect.Value) reflect.Value {\n\tif p == nil {\n\t\treturn reflect.Zero(v.Elem().Type())\n\t}\n\treturn reflect.New(v.Type().Elem().Elem())\n}\n\nfunc decReflectInt8R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\tcase 1:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec1toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the int8 range\", v.Interface())\n\t}\n\treturn nil\n}\n\nfunc decReflectInt16R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\t\treturn nil\n\tcase 1:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec1toInt64(p))\n\t\tv.Elem().Set(newVal)\n\t\treturn nil\n\tcase 2:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec2toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the int16 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectInt32R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\t\treturn nil\n\tcase 1:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec1toInt64(p))\n\t\tv.Elem().Set(newVal)\n\t\treturn nil\n\tcase 2:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec2toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 3:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec3toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 4:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec4toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the int32 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectIntsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\t\treturn nil\n\tcase 1:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec1toInt64(p))\n\t\tv.Elem().Set(newVal)\n\t\treturn nil\n\tcase 2:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec2toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 3:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec3toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 4:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec4toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 5:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec5toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 6:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec6toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 7:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec7toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 8:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetInt(dec8toInt64(p))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the int64 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectUint8R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\t\treturn nil\n\tcase 1:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec1toUint64(p))\n\t\tv.Elem().Set(newVal)\n\t\treturn nil\n\tcase 2:\n\t\tif p[0] == 0 {\n\t\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\t\tnewVal.Elem().SetUint(dec2toUint64(p))\n\t\t\tv.Elem().Set(newVal)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint8 range\", v.Interface())\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint8 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectUint16R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\t\treturn nil\n\tcase 1:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec1toUint64(p))\n\t\tv.Elem().Set(newVal)\n\t\treturn nil\n\tcase 2:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec2toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 3:\n\t\tif p[0] == 0 {\n\t\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\t\tnewVal.Elem().SetUint(dec3toUint64(p))\n\t\t\tv.Elem().Set(newVal)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint16 range\", v.Interface())\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint16 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectUint32R(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\t\treturn nil\n\tcase 1:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec1toUint64(p))\n\t\tv.Elem().Set(newVal)\n\t\treturn nil\n\tcase 2:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec2toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 3:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec3toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 4:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec4toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 5:\n\t\tif p[0] == 0 {\n\t\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\t\tnewVal.Elem().SetUint(dec5toUint64(p))\n\t\t\tv.Elem().Set(newVal)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint32 range\", v.Interface())\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint32 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectUintsR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.Elem().Set(decReflectNullableR(p, v))\n\t\treturn nil\n\tcase 1:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec1toUint64(p))\n\t\tv.Elem().Set(newVal)\n\t\treturn nil\n\tcase 2:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec2toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 3:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec3toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 4:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec4toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 5:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec5toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 6:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec6toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 7:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec7toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 8:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetUint(dec8toUint64(p))\n\t\tv.Elem().Set(newVal)\n\tcase 9:\n\t\tif p[0] == 0 {\n\t\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\t\tnewVal.Elem().SetUint(dec9toUint64(p))\n\t\t\tv.Elem().Set(newVal)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint64 range\", v.Interface())\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into %T, the data value should be in the uint64 range\", v.Interface())\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc decReflectStringR(p []byte, v reflect.Value) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tvar val reflect.Value\n\t\tif p == nil {\n\t\t\tval = reflect.Zero(v.Type().Elem())\n\t\t} else {\n\t\t\tval = reflect.New(v.Type().Elem().Elem())\n\t\t\tval.Elem().SetString(\"0\")\n\t\t}\n\t\tv.Elem().Set(val)\n\t\treturn nil\n\tcase 1:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(strconv.FormatInt(dec1toInt64(p), 10))\n\t\tv.Elem().Set(newVal)\n\t\treturn nil\n\tcase 2:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(strconv.FormatInt(dec2toInt64(p), 10))\n\t\tv.Elem().Set(newVal)\n\tcase 3:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(strconv.FormatInt(dec3toInt64(p), 10))\n\t\tv.Elem().Set(newVal)\n\tcase 4:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(strconv.FormatInt(dec4toInt64(p), 10))\n\t\tv.Elem().Set(newVal)\n\tcase 5:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(strconv.FormatInt(dec5toInt64(p), 10))\n\t\tv.Elem().Set(newVal)\n\tcase 6:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(strconv.FormatInt(dec6toInt64(p), 10))\n\t\tv.Elem().Set(newVal)\n\tcase 7:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(strconv.FormatInt(dec7toInt64(p), 10))\n\t\tv.Elem().Set(newVal)\n\tcase 8:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(strconv.FormatInt(dec8toInt64(p), 10))\n\t\tv.Elem().Set(newVal)\n\tdefault:\n\t\tnewVal := reflect.New(v.Type().Elem().Elem())\n\t\tnewVal.Elem().SetString(Dec2BigInt(p).String())\n\t\tv.Elem().Set(newVal)\n\t}\n\treturn errBrokenData(p)\n}\n"
  },
  {
    "path": "serialization/varint/unmarshal_ints.go",
    "content": "package varint\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\tnegInt16s8 = int16(-1) << 8\n\n\tnegInt32s8  = int32(-1) << 8\n\tnegInt32s16 = int32(-1) << 16\n\tnegInt32s24 = int32(-1) << 24\n\n\tnegInt64s8  = int64(-1) << 8\n\tnegInt64s16 = int64(-1) << 16\n\tnegInt64s24 = int64(-1) << 24\n\tnegInt64s32 = int64(-1) << 32\n\tnegInt64s40 = int64(-1) << 40\n\tnegInt64s48 = int64(-1) << 48\n\tnegInt64s56 = int64(-1) << 56\n\n\tnegIntS8  = int(-1) << 8\n\tnegIntS16 = int(-1) << 16\n\tnegIntS24 = int(-1) << 24\n\tnegIntS32 = int(-1) << 32\n\tnegIntS40 = int(-1) << 40\n\tnegIntS48 = int(-1) << 48\n\tnegIntS56 = int(-1) << 56\n)\n\nfunc DecInt8(p []byte, v *int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\tcase 1:\n\t\t*v = dec1toInt8(p)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into int8, the data value should be in the int8 range\")\n\t}\n\treturn nil\n}\n\nfunc DecInt8R(p []byte, v **int8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int8)\n\t\t}\n\tcase 1:\n\t\tval := dec1toInt8(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into int8, the data value should be in the int8 range\")\n\t}\n\treturn nil\n}\n\nfunc DecInt16(p []byte, v *int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\t\treturn nil\n\tcase 1:\n\t\t*v = dec1toInt16(p)\n\t\treturn nil\n\tcase 2:\n\t\t*v = dec2toInt16(p)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into int16, the data value should be in the int16 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecInt16R(p []byte, v **int16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int16)\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tval := dec1toInt16(p)\n\t\t*v = &val\n\t\treturn nil\n\tcase 2:\n\t\tval := dec2toInt16(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into int16, the data value should be in the int16 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecInt32(p []byte, v *int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\t\treturn nil\n\tcase 1:\n\t\t*v = dec1toInt32(p)\n\t\treturn nil\n\tcase 2:\n\t\t*v = dec2toInt32(p)\n\tcase 3:\n\t\t*v = dec3toInt32(p)\n\tcase 4:\n\t\t*v = dec4toInt32(p)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into int32, the data value should be in the int32 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecInt32R(p []byte, v **int32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int32)\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tval := dec1toInt32(p)\n\t\t*v = &val\n\t\treturn nil\n\tcase 2:\n\t\tval := dec2toInt32(p)\n\t\t*v = &val\n\tcase 3:\n\t\tval := dec3toInt32(p)\n\t\t*v = &val\n\tcase 4:\n\t\tval := dec4toInt32(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into int32, the data value should be in the int32 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecInt64(p []byte, v *int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\t\treturn nil\n\tcase 1:\n\t\t*v = dec1toInt64(p)\n\t\treturn nil\n\tcase 2:\n\t\t*v = dec2toInt64(p)\n\tcase 3:\n\t\t*v = dec3toInt64(p)\n\tcase 4:\n\t\t*v = dec4toInt64(p)\n\tcase 5:\n\t\t*v = dec5toInt64(p)\n\tcase 6:\n\t\t*v = dec6toInt64(p)\n\tcase 7:\n\t\t*v = dec7toInt64(p)\n\tcase 8:\n\t\t*v = dec8toInt64(p)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into int64, the data value should be in the int64 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecInt64R(p []byte, v **int64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int64)\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tval := dec1toInt64(p)\n\t\t*v = &val\n\t\treturn nil\n\tcase 2:\n\t\tval := dec2toInt64(p)\n\t\t*v = &val\n\tcase 3:\n\t\tval := dec3toInt64(p)\n\t\t*v = &val\n\tcase 4:\n\t\tval := dec4toInt64(p)\n\t\t*v = &val\n\tcase 5:\n\t\tval := dec5toInt64(p)\n\t\t*v = &val\n\tcase 6:\n\t\tval := dec6toInt64(p)\n\t\t*v = &val\n\tcase 7:\n\t\tval := dec7toInt64(p)\n\t\t*v = &val\n\tcase 8:\n\t\tval := dec8toInt64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into int64, the data value should be in the int64 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecInt(p []byte, v *int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\t\treturn nil\n\tcase 1:\n\t\t*v = dec1toInt(p)\n\t\treturn nil\n\tcase 2:\n\t\t*v = dec2toInt(p)\n\tcase 3:\n\t\t*v = dec3toInt(p)\n\tcase 4:\n\t\t*v = dec4toInt(p)\n\tcase 5:\n\t\t*v = dec5toInt(p)\n\tcase 6:\n\t\t*v = dec6toInt(p)\n\tcase 7:\n\t\t*v = dec7toInt(p)\n\tcase 8:\n\t\t*v = dec8toInt(p)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into int, the data value should be in the int range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecIntR(p []byte, v **int) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(int)\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tval := dec1toInt(p)\n\t\t*v = &val\n\t\treturn nil\n\tcase 2:\n\t\tval := dec2toInt(p)\n\t\t*v = &val\n\tcase 3:\n\t\tval := dec3toInt(p)\n\t\t*v = &val\n\tcase 4:\n\t\tval := dec4toInt(p)\n\t\t*v = &val\n\tcase 5:\n\t\tval := dec5toInt(p)\n\t\t*v = &val\n\tcase 6:\n\t\tval := dec6toInt(p)\n\t\t*v = &val\n\tcase 7:\n\t\tval := dec7toInt(p)\n\t\t*v = &val\n\tcase 8:\n\t\tval := dec8toInt(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into int, the data value should be in the int range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc dec1toInt8(p []byte) int8 {\n\treturn int8(p[0])\n}\n\nfunc dec1toInt16(p []byte) int16 {\n\tif p[0] > 127 {\n\t\treturn negInt16s8 | int16(p[0])\n\t}\n\treturn int16(p[0])\n}\n\nfunc dec1toInt32(p []byte) int32 {\n\tif p[0] > 127 {\n\t\treturn negInt32s8 | int32(p[0])\n\t}\n\treturn int32(p[0])\n}\n\nfunc dec1toInt64(p []byte) int64 {\n\tif p[0] > 127 {\n\t\treturn negInt64s8 | int64(p[0])\n\t}\n\treturn int64(p[0])\n}\n\nfunc dec1toInt(p []byte) int {\n\tif p[0] > 127 {\n\t\treturn negIntS8 | int(p[0])\n\t}\n\treturn int(p[0])\n}\n\nfunc dec2toInt16(p []byte) int16 {\n\treturn int16(p[0])<<8 | int16(p[1])\n}\n\nfunc dec2toInt32(p []byte) int32 {\n\tif p[0] > 127 {\n\t\treturn negInt32s16 | int32(p[0])<<8 | int32(p[1])\n\t}\n\treturn int32(p[0])<<8 | int32(p[1])\n}\n\nfunc dec2toInt64(p []byte) int64 {\n\tif p[0] > 127 {\n\t\treturn negInt64s16 | int64(p[0])<<8 | int64(p[1])\n\t}\n\treturn int64(p[0])<<8 | int64(p[1])\n}\n\nfunc dec2toInt(p []byte) int {\n\tif p[0] > 127 {\n\t\treturn negIntS16 | int(p[0])<<8 | int(p[1])\n\t}\n\treturn int(p[0])<<8 | int(p[1])\n}\n\nfunc dec3toInt32(p []byte) int32 {\n\tif p[0] > 127 {\n\t\treturn negInt32s24 | int32(p[0])<<16 | int32(p[1])<<8 | int32(p[2])\n\t}\n\treturn int32(p[0])<<16 | int32(p[1])<<8 | int32(p[2])\n}\n\nfunc dec3toInt64(p []byte) int64 {\n\tif p[0] > 127 {\n\t\treturn negInt64s24 | int64(p[0])<<16 | int64(p[1])<<8 | int64(p[2])\n\t}\n\treturn int64(p[0])<<16 | int64(p[1])<<8 | int64(p[2])\n}\n\nfunc dec3toInt(p []byte) int {\n\tif p[0] > 127 {\n\t\treturn negIntS24 | int(p[0])<<16 | int(p[1])<<8 | int(p[2])\n\t}\n\treturn int(p[0])<<16 | int(p[1])<<8 | int(p[2])\n}\n\nfunc dec4toInt32(p []byte) int32 {\n\treturn int32(p[0])<<24 | int32(p[1])<<16 | int32(p[2])<<8 | int32(p[3])\n}\n\nfunc dec4toInt64(p []byte) int64 {\n\tif p[0] > 127 {\n\t\treturn negInt64s32 | int64(p[0])<<24 | int64(p[1])<<16 | int64(p[2])<<8 | int64(p[3])\n\t}\n\treturn int64(p[0])<<24 | int64(p[1])<<16 | int64(p[2])<<8 | int64(p[3])\n}\n\nfunc dec4toInt(p []byte) int {\n\tif p[0] > 127 {\n\t\treturn negIntS32 | int(p[0])<<24 | int(p[1])<<16 | int(p[2])<<8 | int(p[3])\n\t}\n\treturn int(p[0])<<24 | int(p[1])<<16 | int(p[2])<<8 | int(p[3])\n}\n\nfunc dec5toInt64(p []byte) int64 {\n\tif p[0] > 127 {\n\t\treturn negInt64s40 | int64(p[0])<<32 | int64(p[1])<<24 | int64(p[2])<<16 | int64(p[3])<<8 | int64(p[4])\n\t}\n\treturn int64(p[0])<<32 | int64(p[1])<<24 | int64(p[2])<<16 | int64(p[3])<<8 | int64(p[4])\n}\n\nfunc dec5toInt(p []byte) int {\n\tif p[0] > 127 {\n\t\treturn negIntS40 | int(p[0])<<32 | int(p[1])<<24 | int(p[2])<<16 | int(p[3])<<8 | int(p[4])\n\t}\n\treturn int(p[0])<<32 | int(p[1])<<24 | int(p[2])<<16 | int(p[3])<<8 | int(p[4])\n}\n\nfunc dec6toInt64(p []byte) int64 {\n\tif p[0] > 127 {\n\t\treturn negInt64s48 | int64(p[0])<<40 | int64(p[1])<<32 | int64(p[2])<<24 | int64(p[3])<<16 | int64(p[4])<<8 | int64(p[5])\n\t}\n\treturn int64(p[0])<<40 | int64(p[1])<<32 | int64(p[2])<<24 | int64(p[3])<<16 | int64(p[4])<<8 | int64(p[5])\n}\n\nfunc dec6toInt(p []byte) int {\n\tif p[0] > 127 {\n\t\treturn negIntS48 | int(p[0])<<40 | int(p[1])<<32 | int(p[2])<<24 | int(p[3])<<16 | int(p[4])<<8 | int(p[5])\n\t}\n\treturn int(p[0])<<40 | int(p[1])<<32 | int(p[2])<<24 | int(p[3])<<16 | int(p[4])<<8 | int(p[5])\n}\n\nfunc dec7toInt64(p []byte) int64 {\n\tif p[0] > 127 {\n\t\treturn negInt64s56 | int64(p[0])<<48 | int64(p[1])<<40 | int64(p[2])<<32 | int64(p[3])<<24 | int64(p[4])<<16 | int64(p[5])<<8 | int64(p[6])\n\t}\n\treturn int64(p[0])<<48 | int64(p[1])<<40 | int64(p[2])<<32 | int64(p[3])<<24 | int64(p[4])<<16 | int64(p[5])<<8 | int64(p[6])\n}\n\nfunc dec7toInt(p []byte) int {\n\tif p[0] > 127 {\n\t\treturn negIntS56 | int(p[0])<<48 | int(p[1])<<40 | int(p[2])<<32 | int(p[3])<<24 | int(p[4])<<16 | int(p[5])<<8 | int(p[6])\n\t}\n\treturn int(p[0])<<48 | int(p[1])<<40 | int(p[2])<<32 | int(p[3])<<24 | int(p[4])<<16 | int(p[5])<<8 | int(p[6])\n}\n\nfunc dec8toInt64(p []byte) int64 {\n\treturn int64(p[0])<<56 | int64(p[1])<<48 | int64(p[2])<<40 | int64(p[3])<<32 | int64(p[4])<<24 | int64(p[5])<<16 | int64(p[6])<<8 | int64(p[7])\n}\n\nfunc dec8toInt(p []byte) int {\n\treturn int(p[0])<<56 | int(p[1])<<48 | int(p[2])<<40 | int(p[3])<<32 | int(p[4])<<24 | int(p[5])<<16 | int(p[6])<<8 | int(p[7])\n}\n"
  },
  {
    "path": "serialization/varint/unmarshal_uints.go",
    "content": "package varint\n\nimport (\n\t\"fmt\"\n)\n\nfunc DecUint8(p []byte, v *uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\t\treturn nil\n\tcase 1:\n\t\t*v = dec1toUint8(p)\n\t\treturn nil\n\tcase 2:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint8, the data value should be in the uint8 range\")\n\t\t}\n\t\t*v = dec2toUint8(p)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint8, the data value should be in the uint8 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecUint8R(p []byte, v **uint8) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint8)\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tval := dec1toUint8(p)\n\t\t*v = &val\n\t\treturn nil\n\tcase 2:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint8, the data value should be in the uint8 range\")\n\t\t}\n\t\tval := dec2toUint8(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint8, the data value should be in the uint8 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecUint16(p []byte, v *uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\t\treturn nil\n\tcase 1:\n\t\t*v = dec1toUint16(p)\n\t\treturn nil\n\tcase 2:\n\t\t*v = dec2toUint16(p)\n\tcase 3:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint16, the data value should be in the uint16 range\")\n\t\t}\n\t\t*v = dec3toUint16(p)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint16, the data value should be in the uint16 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecUint16R(p []byte, v **uint16) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint16)\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tval := dec1toUint16(p)\n\t\t*v = &val\n\t\treturn nil\n\tcase 2:\n\t\tval := dec2toUint16(p)\n\t\t*v = &val\n\tcase 3:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint16, the data value should be in the uint16 range\")\n\t\t}\n\t\tval := dec3toUint16(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint16, the data value should be in the uint16 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecUint32(p []byte, v *uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\t\treturn nil\n\tcase 1:\n\t\t*v = dec1toUint32(p)\n\t\treturn nil\n\tcase 2:\n\t\t*v = dec2toUint32(p)\n\tcase 3:\n\t\t*v = dec3toUint32(p)\n\tcase 4:\n\t\t*v = dec4toUint32(p)\n\tcase 5:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint32, the data value should be in the uint32 range\")\n\t\t}\n\t\t*v = dec5toUint32(p)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint32, the data value should be in the uint32 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecUint32R(p []byte, v **uint32) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint32)\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tval := dec1toUint32(p)\n\t\t*v = &val\n\t\treturn nil\n\tcase 2:\n\t\tval := dec2toUint32(p)\n\t\t*v = &val\n\tcase 3:\n\t\tval := dec3toUint32(p)\n\t\t*v = &val\n\tcase 4:\n\t\tval := dec4toUint32(p)\n\t\t*v = &val\n\tcase 5:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint32, the data value should be in the uint32 range\")\n\t\t}\n\t\tval := dec5toUint32(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint32, the data value should be in the uint32 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecUint64(p []byte, v *uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\t\treturn nil\n\tcase 1:\n\t\t*v = dec1toUint64(p)\n\t\treturn nil\n\tcase 2:\n\t\t*v = dec2toUint64(p)\n\tcase 3:\n\t\t*v = dec3toUint64(p)\n\tcase 4:\n\t\t*v = dec4toUint64(p)\n\tcase 5:\n\t\t*v = dec5toUint64(p)\n\tcase 6:\n\t\t*v = dec6toUint64(p)\n\tcase 7:\n\t\t*v = dec7toUint64(p)\n\tcase 8:\n\t\t*v = dec8toUint64(p)\n\tcase 9:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint64, the data value should be in the uint64 range\")\n\t\t}\n\t\t*v = dec9toUint64(p)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint64, the data value should be in the uint64 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecUint64R(p []byte, v **uint64) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint64)\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tval := dec1toUint64(p)\n\t\t*v = &val\n\t\treturn nil\n\tcase 2:\n\t\tval := dec2toUint64(p)\n\t\t*v = &val\n\tcase 3:\n\t\tval := dec3toUint64(p)\n\t\t*v = &val\n\tcase 4:\n\t\tval := dec4toUint64(p)\n\t\t*v = &val\n\tcase 5:\n\t\tval := dec5toUint64(p)\n\t\t*v = &val\n\tcase 6:\n\t\tval := dec6toUint64(p)\n\t\t*v = &val\n\tcase 7:\n\t\tval := dec7toUint64(p)\n\t\t*v = &val\n\tcase 8:\n\t\tval := dec8toUint64(p)\n\t\t*v = &val\n\tcase 9:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint64, the data value should be in the uint64 range\")\n\t\t}\n\t\tval := dec9toUint64(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint64, the data value should be in the uint64 range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecUint(p []byte, v *uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\t*v = 0\n\t\treturn nil\n\tcase 1:\n\t\t*v = dec1toUint(p)\n\t\treturn nil\n\tcase 2:\n\t\t*v = dec2toUint(p)\n\tcase 3:\n\t\t*v = dec3toUint(p)\n\tcase 4:\n\t\t*v = dec4toUint(p)\n\tcase 5:\n\t\t*v = dec5toUint(p)\n\tcase 6:\n\t\t*v = dec6toUint(p)\n\tcase 7:\n\t\t*v = dec7toUint(p)\n\tcase 8:\n\t\t*v = dec8toUint(p)\n\tcase 9:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint, the data value should be in the uint range\")\n\t\t}\n\t\t*v = dec9toUint(p)\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint, the data value should be in the uint range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecUintR(p []byte, v **uint) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\t*v = new(uint)\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tval := dec1toUint(p)\n\t\t*v = &val\n\t\treturn nil\n\tcase 2:\n\t\tval := dec2toUint(p)\n\t\t*v = &val\n\tcase 3:\n\t\tval := dec3toUint(p)\n\t\t*v = &val\n\tcase 4:\n\t\tval := dec4toUint(p)\n\t\t*v = &val\n\tcase 5:\n\t\tval := dec5toUint(p)\n\t\t*v = &val\n\tcase 6:\n\t\tval := dec6toUint(p)\n\t\t*v = &val\n\tcase 7:\n\t\tval := dec7toUint(p)\n\t\t*v = &val\n\tcase 8:\n\t\tval := dec8toUint(p)\n\t\t*v = &val\n\tcase 9:\n\t\tif p[0] != 0 {\n\t\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint, the data value should be in the uint range\")\n\t\t}\n\t\tval := dec9toUint(p)\n\t\t*v = &val\n\tdefault:\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: to unmarshal into uint, the data value should be in the uint range\")\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc dec1toUint8(p []byte) uint8 {\n\treturn p[0]\n}\n\nfunc dec1toUint16(p []byte) uint16 {\n\treturn uint16(p[0])\n}\n\nfunc dec1toUint32(p []byte) uint32 {\n\treturn uint32(p[0])\n}\n\nfunc dec1toUint64(p []byte) uint64 {\n\treturn uint64(p[0])\n}\n\nfunc dec1toUint(p []byte) uint {\n\treturn uint(p[0])\n}\n\nfunc dec2toUint8(p []byte) uint8 {\n\treturn p[1]\n}\n\nfunc dec2toUint16(p []byte) uint16 {\n\treturn uint16(p[0])<<8 | uint16(p[1])\n}\n\nfunc dec2toUint32(p []byte) uint32 {\n\treturn uint32(p[0])<<8 | uint32(p[1])\n}\n\nfunc dec2toUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<8 | uint64(p[1])\n}\n\nfunc dec2toUint(p []byte) uint {\n\treturn uint(p[0])<<8 | uint(p[1])\n}\n\nfunc dec3toUint16(p []byte) uint16 {\n\treturn uint16(p[1])<<8 | uint16(p[2])\n}\n\nfunc dec3toUint32(p []byte) uint32 {\n\treturn uint32(p[0])<<16 | uint32(p[1])<<8 | uint32(p[2])\n}\n\nfunc dec3toUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<16 | uint64(p[1])<<8 | uint64(p[2])\n}\n\nfunc dec3toUint(p []byte) uint {\n\treturn uint(p[0])<<16 | uint(p[1])<<8 | uint(p[2])\n}\n\nfunc dec4toUint32(p []byte) uint32 {\n\treturn uint32(p[0])<<24 | uint32(p[1])<<16 | uint32(p[2])<<8 | uint32(p[3])\n}\n\nfunc dec4toUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<24 | uint64(p[1])<<16 | uint64(p[2])<<8 | uint64(p[3])\n}\n\nfunc dec4toUint(p []byte) uint {\n\treturn uint(p[0])<<24 | uint(p[1])<<16 | uint(p[2])<<8 | uint(p[3])\n}\n\nfunc dec5toUint32(p []byte) uint32 {\n\treturn uint32(p[1])<<24 | uint32(p[2])<<16 | uint32(p[3])<<8 | uint32(p[4])\n}\n\nfunc dec5toUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<32 | uint64(p[1])<<24 | uint64(p[2])<<16 | uint64(p[3])<<8 | uint64(p[4])\n}\n\nfunc dec5toUint(p []byte) uint {\n\treturn uint(p[0])<<32 | uint(p[1])<<24 | uint(p[2])<<16 | uint(p[3])<<8 | uint(p[4])\n}\n\nfunc dec6toUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<40 | uint64(p[1])<<32 | uint64(p[2])<<24 | uint64(p[3])<<16 | uint64(p[4])<<8 | uint64(p[5])\n}\n\nfunc dec6toUint(p []byte) uint {\n\treturn uint(p[0])<<40 | uint(p[1])<<32 | uint(p[2])<<24 | uint(p[3])<<16 | uint(p[4])<<8 | uint(p[5])\n}\n\nfunc dec7toUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<48 | uint64(p[1])<<40 | uint64(p[2])<<32 | uint64(p[3])<<24 | uint64(p[4])<<16 | uint64(p[5])<<8 | uint64(p[6])\n}\n\nfunc dec7toUint(p []byte) uint {\n\treturn uint(p[0])<<48 | uint(p[1])<<40 | uint(p[2])<<32 | uint(p[3])<<24 | uint(p[4])<<16 | uint(p[5])<<8 | uint(p[6])\n}\n\nfunc dec8toUint64(p []byte) uint64 {\n\treturn uint64(p[0])<<56 | uint64(p[1])<<48 | uint64(p[2])<<40 | uint64(p[3])<<32 | uint64(p[4])<<24 | uint64(p[5])<<16 | uint64(p[6])<<8 | uint64(p[7])\n}\n\nfunc dec8toUint(p []byte) uint {\n\treturn uint(p[0])<<56 | uint(p[1])<<48 | uint(p[2])<<40 | uint(p[3])<<32 | uint(p[4])<<24 | uint(p[5])<<16 | uint(p[6])<<8 | uint(p[7])\n}\n\nfunc dec9toUint64(p []byte) uint64 {\n\treturn uint64(p[1])<<56 | uint64(p[2])<<48 | uint64(p[3])<<40 | uint64(p[4])<<32 | uint64(p[5])<<24 | uint64(p[6])<<16 | uint64(p[7])<<8 | uint64(p[8])\n}\n\nfunc dec9toUint(p []byte) uint {\n\treturn uint(p[1])<<56 | uint(p[2])<<48 | uint(p[3])<<40 | uint(p[4])<<32 | uint(p[5])<<24 | uint(p[6])<<16 | uint(p[7])<<8 | uint(p[8])\n}\n"
  },
  {
    "path": "serialization/varint/unmarshal_utils.go",
    "content": "package varint\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"strconv\"\n)\n\nfunc errBrokenData(p []byte) error {\n\tif p[0] == 0 && p[1] <= 127 || p[0] == 255 && p[1] > 127 {\n\t\treturn fmt.Errorf(\"failed to unmarshal varint: the data is broken\")\n\t}\n\treturn nil\n}\n\nfunc errNilReference(v any) error {\n\treturn fmt.Errorf(\"failed to unmarshal varint: can not unmarshal into nil reference %#v)\", v)\n}\n\nfunc DecString(p []byte, v *string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = \"\"\n\t\t} else {\n\t\t\t*v = \"0\"\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\t*v = strconv.FormatInt(dec1toInt64(p), 10)\n\t\treturn nil\n\tcase 2:\n\t\t*v = strconv.FormatInt(dec2toInt64(p), 10)\n\tcase 3:\n\t\t*v = strconv.FormatInt(dec3toInt64(p), 10)\n\tcase 4:\n\t\t*v = strconv.FormatInt(dec4toInt64(p), 10)\n\tcase 5:\n\t\t*v = strconv.FormatInt(dec5toInt64(p), 10)\n\tcase 6:\n\t\t*v = strconv.FormatInt(dec6toInt64(p), 10)\n\tcase 7:\n\t\t*v = strconv.FormatInt(dec7toInt64(p), 10)\n\tcase 8:\n\t\t*v = strconv.FormatInt(dec8toInt64(p), 10)\n\tdefault:\n\t\t*v = Dec2BigInt(p).String()\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecStringR(p []byte, v **string) error {\n\tif v == nil {\n\t\treturn errNilReference(v)\n\t}\n\tswitch len(p) {\n\tcase 0:\n\t\tif p == nil {\n\t\t\t*v = nil\n\t\t} else {\n\t\t\tval := \"0\"\n\t\t\t*v = &val\n\t\t}\n\t\treturn nil\n\tcase 1:\n\t\tval := strconv.FormatInt(dec1toInt64(p), 10)\n\t\t*v = &val\n\t\treturn nil\n\tcase 2:\n\t\tval := strconv.FormatInt(dec2toInt64(p), 10)\n\t\t*v = &val\n\tcase 3:\n\t\tval := strconv.FormatInt(dec3toInt64(p), 10)\n\t\t*v = &val\n\tcase 4:\n\t\tval := strconv.FormatInt(dec4toInt64(p), 10)\n\t\t*v = &val\n\tcase 5:\n\t\tval := strconv.FormatInt(dec5toInt64(p), 10)\n\t\t*v = &val\n\tcase 6:\n\t\tval := strconv.FormatInt(dec6toInt64(p), 10)\n\t\t*v = &val\n\tcase 7:\n\t\tval := strconv.FormatInt(dec7toInt64(p), 10)\n\t\t*v = &val\n\tcase 8:\n\t\tval := strconv.FormatInt(dec8toInt64(p), 10)\n\t\t*v = &val\n\tdefault:\n\t\tval := Dec2BigInt(p).String()\n\t\t*v = &val\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecBigInt(p []byte, v *big.Int) error {\n\tswitch len(p) {\n\tcase 0:\n\t\tv.SetInt64(0)\n\t\treturn nil\n\tcase 1:\n\t\tv.SetInt64(dec1toInt64(p))\n\t\treturn nil\n\tcase 2:\n\t\tv.SetInt64(dec2toInt64(p))\n\tcase 3:\n\t\tv.SetInt64(dec3toInt64(p))\n\tcase 4:\n\t\tv.SetInt64(dec4toInt64(p))\n\tcase 5:\n\t\tv.SetInt64(dec5toInt64(p))\n\tcase 6:\n\t\tv.SetInt64(dec6toInt64(p))\n\tcase 7:\n\t\tv.SetInt64(dec7toInt64(p))\n\tcase 8:\n\t\tv.SetInt64(dec8toInt64(p))\n\tdefault:\n\t\tdec2ToBigInt(p, v)\n\t}\n\treturn errBrokenData(p)\n}\n\nfunc DecBigIntR(p []byte, v **big.Int) error {\n\tif p != nil {\n\t\t*v = big.NewInt(0)\n\t\treturn DecBigInt(p, *v)\n\t}\n\t*v = nil\n\treturn nil\n}\n\n// Dec2BigInt decode p to big.Int. Use for cases with len(p)>=2.\n// This function shared to use in unmarshal `decimal`.\nfunc Dec2BigInt(p []byte) *big.Int {\n\t// Positive range processing\n\tif p[0] <= 127 {\n\t\treturn new(big.Int).SetBytes(p)\n\t}\n\t// negative range processing\n\tdata := make([]byte, len(p))\n\tcopy(data, p)\n\n\tadd := true\n\tfor i := len(data) - 1; i >= 0; i-- {\n\t\tif !add {\n\t\t\tdata[i] = 255 - data[i]\n\t\t} else {\n\t\t\tdata[i] = 255 - data[i] + 1\n\t\t\tif data[i] != 0 {\n\t\t\t\tadd = false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn new(big.Int).Neg(new(big.Int).SetBytes(data))\n}\n\nfunc dec2ToBigInt(p []byte, v *big.Int) {\n\tif p[0] <= 127 {\n\t\t// Positive range processing\n\t\tv.SetBytes(p)\n\t} else {\n\t\t// negative range processing\n\t\tdata := make([]byte, len(p))\n\t\tcopy(data, p)\n\n\t\tadd := true\n\t\tfor i := len(data) - 1; i >= 0; i-- {\n\t\t\tif !add {\n\t\t\t\tdata[i] = 255 - data[i]\n\t\t\t} else {\n\t\t\t\tdata[i] = 255 - data[i] + 1\n\t\t\t\tif data[i] != 0 {\n\t\t\t\t\tadd = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tv.Set(new(big.Int).Neg(new(big.Int).SetBytes(data)))\n\t}\n}\n"
  },
  {
    "path": "session.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"maps\"\n\t\"net\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"github.com/gocql/gocql/debounce\"\n\t\"github.com/gocql/gocql/events\"\n\t\"github.com/gocql/gocql/internal/debug\"\n\t\"github.com/gocql/gocql/internal/eventbus\"\n\t\"github.com/gocql/gocql/internal/lru\"\n\t\"github.com/gocql/gocql/tablets\"\n)\n\n// Session is the interface used by users to interact with the database.\n//\n// It's safe for concurrent use by multiple goroutines and a typical usage\n// scenario is to have one global session object to interact with the\n// whole Cassandra cluster.\n//\n// This type extends the Node interface by adding a convenient query builder\n// and automatically sets a default consistency level on all operations\n// that do not have a consistency level set.\ntype Session struct {\n\twarningHandler            WarningHandler\n\tqueryObserver             QueryObserver\n\tcontrol                   controlConnection\n\tctx                       context.Context\n\tlogger                    StdLogger\n\ttrace                     Tracer\n\tpolicy                    HostSelectionPolicy\n\tbatchObserver             BatchObserver\n\tconnectObserver           ConnectObserver\n\tframeObserver             FrameHeaderObserver\n\tstreamObserver            StreamObserver\n\tinitErr                   error\n\tnodeEvents                *eventDebouncer\n\tstmtsLRU                  *preparedLRU\n\thostSource                *ringDescriber\n\tpool                      *policyConnPool\n\tringRefresher             *debounce.RefreshDebouncer\n\treadyCh                   chan struct{}\n\texecutor                  *queryExecutor\n\tcancel                    context.CancelFunc\n\tschemaEvents              *eventDebouncer\n\tmetadataDescriber         *metadataDescriber\n\teventBus                  *eventbus.EventBus[events.Event]\n\tconnCfg                   *ConnConfig\n\tclientRoutesHandler       *ClientRoutesHandler\n\troutingKeyInfoCache       routingKeyInfoLRU\n\taddressTranslator         AddressTranslator\n\tcfg                       ClusterConfig\n\tprefetch                  float64\n\tpageSize                  int\n\tmu                        sync.RWMutex\n\tsessionStateMu            sync.RWMutex\n\tcons                      Consistency\n\tisClosing                 bool\n\thasAggregatesAndFunctions bool\n\tuseSystemSchema           bool\n\ttabletsRoutingV1          bool\n\tisInitialized             bool\n\tisClosed                  bool\n}\n\nvar queryPool = &sync.Pool{\n\tNew: func() any {\n\t\treturn &Query{\n\t\t\troutingInfo: &queryRoutingInfo{},\n\t\t\tmetrics:     &queryMetrics{m: make(map[UUID]*hostMetrics)},\n\t\t\trefCount:    1,\n\t\t}\n\t},\n}\n\nfunc resolveInitialEndpoints(resolver DNSResolver, addrs []string, defaultPort int, logger StdLogger) ([]*HostInfo, error) {\n\tvar hosts []*HostInfo\n\tvar errs []error\n\tfor _, hostaddr := range addrs {\n\t\tresolvedHosts, err := resolveInitialEndpoint(resolver, hostaddr, defaultPort)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to resolve endpoint %q: %w\", hostaddr, err)\n\t\t\terrs = append(errs, err)\n\t\t\tlogger.Println(err.Error())\n\t\t\tcontinue\n\t\t}\n\t\thosts = append(hosts, resolvedHosts...)\n\t}\n\tif len(hosts) == 0 {\n\t\treturn nil, fmt.Errorf(\"failed to resolve any of the provided hostnames: %w\", errors.Join(errs...))\n\t}\n\treturn hosts, nil\n}\n\nfunc newSessionCommon(cfg ClusterConfig) (*Session, error) {\n\tif err := cfg.Validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"gocql: unable to create session: cluster config validation failed: %v\", err)\n\t}\n\n\t// TODO: we should take a context in here at some point\n\tctx, cancel := context.WithCancel(context.TODO())\n\n\ts := &Session{\n\t\tcons:              cfg.Consistency,\n\t\tprefetch:          0.25,\n\t\tcfg:               cfg,\n\t\tpageSize:          cfg.PageSize,\n\t\tstmtsLRU:          &preparedLRU{lru: lru.New[stmtCacheKey](cfg.MaxPreparedStmts)},\n\t\tconnectObserver:   cfg.ConnectObserver,\n\t\tctx:               ctx,\n\t\tcancel:            cancel,\n\t\tlogger:            cfg.logger(),\n\t\taddressTranslator: cfg.AddressTranslator,\n\t\treadyCh:           make(chan struct{}, 1),\n\t}\n\n\tif cfg.ClientRoutesConfig != nil {\n\t\ts.clientRoutesHandler = NewClientRoutesAddressTranslator(*cfg.ClientRoutesConfig, s.cfg.DNSResolver, s.cfg.SslOpts != nil, s.logger)\n\t\ts.addressTranslator = s.clientRoutesHandler\n\t}\n\n\t// Close created resources on error otherwise they'll leak\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ts.Close()\n\t\t}\n\t}()\n\n\ts.metadataDescriber = newMetadataDescriber(s)\n\n\ts.eventBus = eventbus.New[events.Event](cfg.EventBusConfig, cfg.Logger)\n\tif err = s.eventBus.Start(); err != nil {\n\t\treturn nil, fmt.Errorf(\"gocql: unable to create session: %v\", err)\n\t}\n\n\ts.nodeEvents = newEventDebouncer(\"NodeEvents\", s.handleNodeEvent, s.logger)\n\ts.schemaEvents = newEventDebouncer(\"SchemaEvents\", s.handleSchemaEvent, s.logger)\n\n\ts.routingKeyInfoCache.lru = lru.New[string](cfg.MaxRoutingKeyInfo)\n\n\ts.hostSource = &ringDescriber{cfg: &s.cfg, logger: s.logger}\n\ts.ringRefresher = debounce.NewRefreshDebouncer(debounce.RingRefreshDebounceTime, func() error {\n\t\treturn s.refreshRing()\n\t})\n\n\tif cfg.PoolConfig.HostSelectionPolicy == nil {\n\t\tcfg.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())\n\t}\n\ts.pool = cfg.PoolConfig.buildPool(s)\n\n\ts.policy = cfg.PoolConfig.HostSelectionPolicy\n\ts.policy.Init(s)\n\n\ts.executor = &queryExecutor{\n\t\tpool:   s.pool,\n\t\tpolicy: cfg.PoolConfig.HostSelectionPolicy,\n\t}\n\n\ts.queryObserver = cfg.QueryObserver\n\ts.batchObserver = cfg.BatchObserver\n\ts.connectObserver = cfg.ConnectObserver\n\ts.frameObserver = cfg.FrameHeaderObserver\n\ts.streamObserver = cfg.StreamObserver\n\n\t//Check the TLS Config before trying to connect to anything external\n\tconnCfg, err := connConfig(&s.cfg)\n\tif err != nil {\n\t\t//TODO: Return a typed error\n\t\treturn nil, fmt.Errorf(\"gocql: unable to create session: %v\", err)\n\t}\n\ts.connCfg = connCfg\n\tif cfg.WarningsHandlerBuilder != nil {\n\t\ts.warningHandler = cfg.WarningsHandlerBuilder(s)\n\t}\n\treturn s, nil\n}\n\n// NewSession wraps an existing Node.\nfunc NewSession(cfg ClusterConfig) (*Session, error) {\n\ts, err := newSessionCommon(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = s.init(); err != nil {\n\t\tif err == ErrNoConnectionsStarted {\n\t\t\t//This error used to be generated inside NewSession & returned directly\n\t\t\t//Forward it on up to be backwards compatible\n\t\t\treturn nil, ErrNoConnectionsStarted\n\t\t} else {\n\t\t\t// TODO(zariel): dont wrap this error in fmt.Errorf, return a typed error\n\t\t\treturn nil, fmt.Errorf(\"gocql: unable to create session: %v\", err)\n\t\t}\n\t}\n\n\ts.readyCh <- struct{}{}\n\tclose(s.readyCh)\n\n\treturn s, nil\n}\n\nfunc NewSessionNonBlocking(cfg ClusterConfig) (*Session, error) {\n\ts, err := newSessionCommon(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo func() {\n\t\tif initErr := s.init(); initErr != nil {\n\t\t\ts.sessionStateMu.Lock()\n\t\t\ts.initErr = fmt.Errorf(\"gocql: unable to create session: %v\", initErr)\n\t\t\ts.sessionStateMu.Unlock()\n\t\t}\n\n\t\ts.readyCh <- struct{}{}\n\t\tclose(s.readyCh)\n\t}()\n\n\treturn s, nil\n}\n\n// SubscribeToEvents adds a new subscriber to the event bus.\n// name: subscriber name\n// queueSize: buffer size for the subscriber events, when buffer is overflowed events are dropped\n// filter: optional filter function (can be nil to receive all events)\n//\n// Returns a Subscriber instance that provides access to events and a Stop method.\nfunc (s *Session) SubscribeToEvents(name string, queueSize int, filter eventbus.FilterFunc[events.Event]) *eventbus.Subscriber[events.Event] {\n\treturn s.eventBus.Subscribe(name, queueSize, filter)\n}\n\nfunc (s *Session) init() error {\n\tif s.cfg.disableInit {\n\t\treturn nil\n\t}\n\n\thosts, err := resolveInitialEndpoints(s.cfg.DNSResolver, s.cfg.Hosts, s.cfg.Port, s.logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar partitioner string\n\n\tif !s.cfg.disableControlConn {\n\t\ts.control = createControlConn(s)\n\t\treconnectionPolicy := s.cfg.InitialReconnectionPolicy\n\t\tfor i := 0; i < reconnectionPolicy.GetMaxRetries(); i++ {\n\t\t\tif i != 0 {\n\t\t\t\ttime.Sleep(reconnectionPolicy.GetInterval(i))\n\t\t\t}\n\n\t\t\tif s.cfg.ProtoVersion == 0 {\n\t\t\t\tvar proto int\n\t\t\t\tproto, err = s.control.discoverProtocol(hosts)\n\t\t\t\tif err != nil {\n\t\t\t\t\terr = fmt.Errorf(\"unable to discover protocol version: %w\\n\", err)\n\t\t\t\t\tif debug.Enabled {\n\t\t\t\t\t\ts.logger.Println(err.Error())\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t} else if proto == 0 {\n\t\t\t\t\treturn errors.New(\"unable to discovery protocol version\")\n\t\t\t\t}\n\n\t\t\t\t// TODO(zariel): we really only need this in 1 place\n\t\t\t\ts.cfg.ProtoVersion = proto\n\t\t\t\ts.connCfg.ProtoVersion = proto\n\t\t\t}\n\n\t\t\tif err = s.control.connect(hosts); err != nil {\n\t\t\t\terr = fmt.Errorf(\"unable to create control connection: %w\\n\", err)\n\t\t\t\tif debug.Enabled {\n\t\t\t\t\ts.logger.Println(err.Error())\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to connect to the cluster, last error: %w\", err)\n\t\t}\n\n\t\tconn := s.control.getConn().conn.(*Conn)\n\t\tconn.mu.Lock()\n\t\ts.tabletsRoutingV1 = conn.isTabletSupported()\n\t\tconn.mu.Unlock()\n\n\t\ts.hostSource.setControlConn(s.control)\n\n\t\tif s.clientRoutesHandler != nil {\n\t\t\tif err = s.clientRoutesHandler.Initialize(s); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to initialize client routes handler: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tif !s.cfg.DisableInitialHostLookup {\n\t\t\tvar newHosts []*HostInfo\n\t\t\tnewHosts, partitioner, err = s.hostSource.GetHostsFromSystem()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfilteredHosts := make([]*HostInfo, 0, len(newHosts))\n\t\t\tfor _, host := range newHosts {\n\t\t\t\tif !s.cfg.filterHost(host) {\n\t\t\t\t\tfilteredHosts = append(filteredHosts, host)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\thosts = filteredHosts\n\t\t}\n\n\t\tnewer, _ := checkSystemSchema(s.control)\n\t\ts.useSystemSchema = newer\n\t\tdefer conn.finalizeConnection()\n\t} else {\n\t\t// For testing purposes we populate host ids\n\t\tfor _, host := range hosts {\n\t\t\tif host.hostId.IsEmpty() {\n\t\t\t\thost.hostId = MustRandomUUID()\n\t\t\t}\n\t\t}\n\t}\n\n\tif partitioner != \"\" {\n\t\ts.policy.SetPartitioner(partitioner)\n\t}\n\n\thostMap := make(map[string]*HostInfo, len(hosts))\n\tfor _, host := range hosts {\n\t\thostMap[host.HostID()] = host\n\t}\n\n\thosts = hosts[:0]\n\t// each host will increment left and decrement it after connecting and once\n\t// there's none left, we'll close hostCh\n\tvar left int64\n\t// we will receive up to len(hostMap) of messages so create a buffer so we\n\t// don't end up stuck in a goroutine if we stopped listening\n\tconnectedCh := make(chan struct{}, len(hostMap))\n\t// we add one here because we don't want to end up closing hostCh until we're\n\t// done looping and the decerement code might be reached before we've looped\n\t// again\n\tatomic.AddInt64(&left, 1)\n\tfor _, host := range hostMap {\n\t\thost := s.hostSource.addOrUpdate(host)\n\t\tif s.cfg.filterHost(host) {\n\t\t\tcontinue\n\t\t}\n\n\t\tatomic.AddInt64(&left, 1)\n\t\tgo func() {\n\t\t\ts.pool.addHost(host)\n\t\t\tconnectedCh <- struct{}{}\n\n\t\t\t// if there are no hosts left, then close the hostCh to unblock the loop\n\t\t\t// below if its still waiting\n\t\t\tif atomic.AddInt64(&left, -1) == 0 {\n\t\t\t\tclose(connectedCh)\n\t\t\t}\n\t\t}()\n\n\t\thosts = append(hosts, host)\n\t}\n\t// once we're done looping we subtract the one we initially added and check\n\t// to see if we should close\n\tif atomic.AddInt64(&left, -1) == 0 {\n\t\tclose(connectedCh)\n\t}\n\n\tif s.cfg.disableControlConn {\n\t\tversion := s.hostSource.getHostsList()[0].Version()\n\t\ts.useSystemSchema = version.AtLeast(3, 0, 0)\n\t\ts.hasAggregatesAndFunctions = version.AtLeast(2, 2, 0)\n\t}\n\n\t// before waiting for them to connect, add them all to the policy so we can\n\t// utilize efficiencies by calling AddHosts if the policy supports it\n\ttype bulkAddHosts interface {\n\t\tAddHosts([]*HostInfo)\n\t}\n\tif v, ok := s.policy.(bulkAddHosts); ok {\n\t\tv.AddHosts(hosts)\n\t} else {\n\t\tfor _, host := range hosts {\n\t\t\ts.policy.AddHost(host)\n\t\t}\n\t}\n\n\treadyPolicy, _ := s.policy.(ReadyPolicy)\n\t// now loop over connectedCh until it's closed (meaning we've connected to all)\n\t// or until the policy says we're ready\n\tfor range connectedCh {\n\t\tif readyPolicy != nil && readyPolicy.Ready() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// TODO(zariel): we probably dont need this any more as we verify that we\n\t// can connect to one of the endpoints supplied by using the control conn.\n\t// See if there are any connections in the pool\n\tif s.cfg.ReconnectInterval > 0 {\n\t\tgo s.reconnectDownedHosts(s.cfg.ReconnectInterval)\n\t}\n\n\tif s.pool.Size() == 0 {\n\t\treturn ErrNoConnectionsStarted\n\t}\n\n\t// Invoke KeyspaceChanged to let the policy cache the session keyspace\n\t// parameters. This is used by tokenAwareHostPolicy to discover replicas.\n\tif !s.cfg.disableControlConn && s.cfg.Keyspace != \"\" {\n\t\ts.policy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: s.cfg.Keyspace})\n\t}\n\n\tif err = s.policy.IsOperational(s); err != nil {\n\t\treturn fmt.Errorf(\"gocql: unable to create session: %v\", err)\n\t}\n\n\ts.sessionStateMu.Lock()\n\ts.isInitialized = true\n\ts.sessionStateMu.Unlock()\n\n\treturn nil\n}\n\n// AwaitSchemaAgreement will wait until schema versions across all nodes in the\n// cluster are the same (as seen from the point of view of the control connection).\n// The maximum amount of time this takes is governed\n// by the MaxWaitSchemaAgreement setting in the configuration (default: 60s).\n// AwaitSchemaAgreement returns an error in case schema versions are not the same\n// after the timeout specified in MaxWaitSchemaAgreement elapses.\nfunc (s *Session) AwaitSchemaAgreement(ctx context.Context) error {\n\tif s.cfg.disableControlConn {\n\t\treturn errNoControl\n\t}\n\tif err := s.Ready(); err != nil {\n\t\treturn err\n\t}\n\tch := s.control.getConn()\n\treturn (&Iter{err: ch.conn.awaitSchemaAgreement(ctx)}).err\n}\n\nfunc (s *Session) reconnectDownedHosts(intv time.Duration) {\n\treconnectTicker := time.NewTicker(intv)\n\tdefer reconnectTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-reconnectTicker.C:\n\t\t\thosts := s.hostSource.getHostsList()\n\n\t\t\t// Print session.hostSource for debug.\n\t\t\tif debug.Enabled {\n\t\t\t\tbuf := bytes.NewBufferString(\"Session.hostSource:\")\n\t\t\t\tfor _, h := range hosts {\n\t\t\t\t\tbuf.WriteString(\"[\" + h.ConnectAddress().String() + \":\" + h.State().String() + \"]\")\n\t\t\t\t}\n\t\t\t\ts.logger.Println(buf.String())\n\t\t\t}\n\n\t\t\tfor _, h := range hosts {\n\t\t\t\tif h.IsUp() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// we let the pool call handleNodeConnected to change the host state\n\t\t\t\ts.pool.addHost(h)\n\t\t\t}\n\t\tcase <-s.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// SetConsistency sets the default consistency level for this session. This\n// setting can also be changed on a per-query basis and the default value\n// is Quorum.\nfunc (s *Session) SetConsistency(cons Consistency) {\n\ts.mu.Lock()\n\ts.cons = cons\n\ts.mu.Unlock()\n}\n\n// SetPageSize sets the default page size for this session. A value <= 0 will\n// disable paging. This setting can also be changed on a per-query basis.\nfunc (s *Session) SetPageSize(n int) {\n\ts.mu.Lock()\n\ts.pageSize = n\n\ts.mu.Unlock()\n}\n\n// SetPrefetch sets the default threshold for pre-fetching new pages. If\n// there are only p*pageSize rows remaining, the next page will be requested\n// automatically. This value can also be changed on a per-query basis and\n// the default value is 0.25.\nfunc (s *Session) SetPrefetch(p float64) {\n\ts.mu.Lock()\n\ts.prefetch = p\n\ts.mu.Unlock()\n}\n\n// SetTrace sets the default tracer for this session. This setting can also\n// be changed on a per-query basis.\nfunc (s *Session) SetTrace(trace Tracer) {\n\ts.mu.Lock()\n\ts.trace = trace\n\ts.mu.Unlock()\n}\n\n// QueryWithContext same as Query, but adds context to it.\nfunc (s *Session) QueryWithContext(ctx context.Context, stmt string, values ...any) *Query {\n\tq := s.Query(stmt, values...)\n\tq.context = ctx\n\treturn q\n}\n\n// Query generates a new query object for interacting with the database.\n// Further details of the query may be tweaked using the resulting query\n// value before the query is executed. Query is automatically prepared\n// if it has not previously been executed.\nfunc (s *Session) Query(stmt string, values ...any) *Query {\n\tqry := queryPool.Get().(*Query)\n\tqry.session = s\n\tqry.stmt = stmt\n\tqry.values = values\n\tqry.defaultsFromSession()\n\tqry.SetRequestTimeout(s.cfg.Timeout)\n\treturn qry\n}\n\ntype QueryInfo struct {\n\tId          []byte\n\tArgs        []ColumnInfo\n\tRval        []ColumnInfo\n\tPKeyColumns []int\n}\n\n// Bind generates a new query object based on the query statement passed in.\n// The query is automatically prepared if it has not previously been executed.\n// The binding callback allows the application to define which query argument\n// values will be marshalled as part of the query execution.\n// During execution, the meta data of the prepared query will be routed to the\n// binding callback, which is responsible for producing the query argument values.\nfunc (s *Session) Bind(stmt string, b func(q *QueryInfo) ([]any, error)) *Query {\n\tqry := queryPool.Get().(*Query)\n\tqry.session = s\n\tqry.stmt = stmt\n\tqry.binding = b\n\tqry.defaultsFromSession()\n\tqry.SetRequestTimeout(s.cfg.Timeout)\n\treturn qry\n}\n\n// Close closes all connections. The session is unusable after this\n// operation.\nfunc (s *Session) Close() {\n\n\ts.sessionStateMu.Lock()\n\tif s.isClosing {\n\t\ts.sessionStateMu.Unlock()\n\t\treturn\n\t}\n\ts.isClosing = true\n\ts.sessionStateMu.Unlock()\n\n\tif s.pool != nil {\n\t\ts.pool.Close()\n\t}\n\n\tif s.control != nil {\n\t\ts.control.close()\n\t}\n\n\tif s.nodeEvents != nil {\n\t\ts.nodeEvents.stop()\n\t}\n\n\tif s.schemaEvents != nil {\n\t\ts.schemaEvents.stop()\n\t}\n\n\tif s.eventBus != nil {\n\t\t_ = s.eventBus.Stop()\n\t}\n\n\tif s.ringRefresher != nil {\n\t\ts.ringRefresher.Stop()\n\t}\n\n\tif s.cancel != nil {\n\t\ts.cancel()\n\t}\n\n\tif s.clientRoutesHandler != nil {\n\t\ts.clientRoutesHandler.Stop()\n\t}\n\n\ts.sessionStateMu.Lock()\n\ts.isClosed = true\n\ts.sessionStateMu.Unlock()\n\n\tif s.metadataDescriber != nil && s.metadataDescriber.metadata != nil {\n\t\ts.metadataDescriber.metadata.tabletsMetadata.Close()\n\t}\n}\n\nfunc (s *Session) Closed() bool {\n\ts.sessionStateMu.RLock()\n\tclosed := s.isClosed\n\ts.sessionStateMu.RUnlock()\n\treturn closed\n}\n\nfunc (s *Session) initialized() bool {\n\ts.sessionStateMu.RLock()\n\tinitialized := s.isInitialized\n\ts.sessionStateMu.RUnlock()\n\treturn initialized\n}\n\nfunc (s *Session) Ready() error {\n\ts.sessionStateMu.RLock()\n\terr := ErrSessionNotReady\n\tif s.isInitialized || s.initErr != nil {\n\t\terr = s.initErr\n\t}\n\ts.sessionStateMu.RUnlock()\n\treturn err\n}\n\nfunc (s *Session) WaitUntilReady() error {\n\t<-s.readyCh\n\treturn s.initErr\n}\n\nfunc (s *Session) executeQuery(qry *Query) (it *Iter) {\n\tif s.Closed() {\n\t\treturn &Iter{err: ErrSessionClosed}\n\t}\n\tif err := s.Ready(); err != nil {\n\t\treturn &Iter{err: err}\n\t}\n\n\titer, err := s.executor.executeQuery(qry)\n\tif err != nil {\n\t\treturn &Iter{err: err}\n\t}\n\tif iter == nil {\n\t\tpanic(\"nil iter\")\n\t}\n\n\treturn iter\n}\n\nfunc (s *Session) removeHost(h *HostInfo) {\n\ts.policy.RemoveHost(h)\n\thostID := h.HostID()\n\ts.pool.removeHost(hostID)\n\ts.hostSource.removeHost(hostID)\n}\n\n// KeyspaceMetadata returns the schema metadata for the keyspace specified. Returns an error if the keyspace does not exist.\nfunc (s *Session) KeyspaceMetadata(keyspace string) (*KeyspaceMetadata, error) {\n\tif s.Closed() {\n\t\treturn nil, ErrSessionClosed\n\t} else if err := s.Ready(); err != nil {\n\t\treturn nil, err\n\t} else if keyspace == \"\" {\n\t\treturn nil, ErrNoKeyspace\n\t}\n\n\treturn s.metadataDescriber.GetKeyspace(keyspace)\n}\n\n// TableMetadata returns the schema metadata for the specified table. Returns an error if the keyspace or table does not exist.\nfunc (s *Session) TableMetadata(keyspace, table string) (*TableMetadata, error) {\n\tif s.Closed() {\n\t\treturn nil, ErrSessionClosed\n\t} else if err := s.Ready(); err != nil {\n\t\treturn nil, err\n\t} else if keyspace == \"\" {\n\t\treturn nil, ErrNoKeyspace\n\t} else if table == \"\" {\n\t\treturn nil, ErrNoTable\n\t}\n\n\treturn s.metadataDescriber.GetTable(keyspace, table)\n}\n\n// TabletsMetadata returns the metadata about all tablets across all keyspaces and tables.\n//\n// Deprecated: Use [Session.TableTabletsMetadata] for per-table lookups or\n// [Session.ForEachTablet] to iterate without aggregating into a flat list.\nfunc (s *Session) TabletsMetadata() (tablets.TabletInfoList, error) {\n\tif s.Closed() {\n\t\treturn nil, ErrSessionClosed\n\t} else if err := s.Ready(); err != nil {\n\t\treturn nil, err\n\t} else if !s.tabletsRoutingV1 {\n\t\treturn nil, ErrTabletsNotUsed\n\t}\n\n\treturn s.metadataDescriber.getTablets(), nil\n}\n\n// TableTabletsMetadata returns the tablet metadata for the specified keyspace and table.\n// Returns (nil, nil) when no tablets exist for the given keyspace/table.\nfunc (s *Session) TableTabletsMetadata(keyspace, table string) (tablets.TabletEntryList, error) {\n\t// fail fast\n\tif s.Closed() {\n\t\treturn nil, ErrSessionClosed\n\t} else if err := s.Ready(); err != nil {\n\t\treturn nil, err\n\t} else if !s.tabletsRoutingV1 {\n\t\treturn nil, ErrTabletsNotUsed\n\t} else if keyspace == \"\" {\n\t\treturn nil, ErrNoKeyspace\n\t} else if table == \"\" {\n\t\treturn nil, ErrNoTable\n\t}\n\n\treturn s.metadataDescriber.getTableTablets(keyspace, table), nil\n}\n\n// ForEachTablet iterates over all keyspace/table pairs and their tablet entries,\n// calling fn for each one. If fn returns false, iteration stops early.\n// The entries slice is a shallow copy; do not mutate individual entries.\nfunc (s *Session) ForEachTablet(fn func(keyspace, table string, entries tablets.TabletEntryList) bool) error {\n\t// fail fast\n\tif s.Closed() {\n\t\treturn ErrSessionClosed\n\t} else if err := s.Ready(); err != nil {\n\t\treturn err\n\t} else if !s.tabletsRoutingV1 {\n\t\treturn ErrTabletsNotUsed\n\t}\n\tif fn == nil {\n\t\treturn nil\n\t}\n\n\ts.metadataDescriber.forEachTablet(fn)\n\treturn nil\n}\n\nfunc (s *Session) getConn() *Conn {\n\thosts := s.hostSource.getHostsList()\n\tfor _, host := range hosts {\n\t\tif !host.IsUp() {\n\t\t\tcontinue\n\t\t}\n\n\t\tpool, ok := s.pool.getPool(host)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t} else if conn := pool.Pick(nil, nil); conn != nil {\n\t\t\treturn conn\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// findTabletReplicasUnsafeForToken returns the raw replica slice for the tablet\n// owning the given token. The returned slice must not be modified by callers.\nfunc (s *Session) findTabletReplicasUnsafeForToken(keyspace, table string, token int64) []tablets.ReplicaInfo {\n\tif s.Closed() {\n\t\treturn nil\n\t}\n\tmd := s.metadataDescriber\n\tif md == nil || md.metadata == nil {\n\t\treturn nil\n\t}\n\treturn md.metadata.tabletsMetadata.FindReplicasUnsafeForToken(keyspace, table, token)\n}\n\n// returns routing key indexes and type info\nfunc (s *Session) routingKeyInfo(ctx context.Context, stmt string, requestTimeout time.Duration) (*routingKeyInfo, error) {\n\ts.routingKeyInfoCache.mu.Lock()\n\n\tentry, cached := s.routingKeyInfoCache.lru.Get(stmt)\n\tif cached {\n\t\t// done accessing the cache\n\t\ts.routingKeyInfoCache.mu.Unlock()\n\t\t// the entry is an inflight struct similar to that used by\n\t\t// Conn to prepare statements\n\t\tinflight := entry.(*inflightCachedEntry)\n\n\t\t// wait for any inflight work\n\t\tinflight.wg.Wait()\n\n\t\tif inflight.err != nil {\n\t\t\treturn nil, inflight.err\n\t\t}\n\n\t\tkey, _ := inflight.value.(*routingKeyInfo)\n\n\t\treturn key, nil\n\t}\n\n\t// create a new inflight entry while the data is created\n\tinflight := new(inflightCachedEntry)\n\tinflight.wg.Add(1)\n\tdefer inflight.wg.Done()\n\ts.routingKeyInfoCache.lru.Add(stmt, inflight)\n\ts.routingKeyInfoCache.mu.Unlock()\n\n\tvar (\n\t\tinfo         *preparedStatment\n\t\tpartitionKey []*ColumnMetadata\n\t)\n\n\tconn := s.getConn()\n\tif conn == nil {\n\t\t// TODO: better error?\n\t\tinflight.err = errors.New(\"gocql: unable to fetch prepared info: no connection available\")\n\t\treturn nil, inflight.err\n\t}\n\n\t// get the query info for the statement\n\tinfo, inflight.err = conn.prepareStatement(ctx, stmt, nil, requestTimeout)\n\tif inflight.err != nil {\n\t\t// don't cache this error\n\t\ts.routingKeyInfoCache.Remove(stmt)\n\t\treturn nil, inflight.err\n\t}\n\n\t// TODO: it would be nice to mark hosts here but as we are not using the policies\n\t// to fetch hosts we cant\n\n\tif info.request.colCount == 0 {\n\t\t// no arguments, no routing key, and no error\n\t\treturn nil, nil\n\t}\n\n\ttable := info.request.table\n\tkeyspace := info.request.keyspace\n\n\t// Fall back to per-column metadata when FlagGlobalTableSpec is not set.\n\tif keyspace == \"\" && len(info.request.columns) > 0 {\n\t\tkeyspace = info.request.columns[0].Keyspace\n\t}\n\tif table == \"\" && len(info.request.columns) > 0 {\n\t\ttable = info.request.columns[0].Table\n\t}\n\n\tpartitioner, err := scyllaGetTablePartitioner(s, keyspace, table)\n\tif err != nil {\n\t\t// don't cache this error, but make sure all waiters see the same failure.\n\t\tinflight.err = err\n\t\ts.routingKeyInfoCache.Remove(stmt)\n\t\treturn nil, err\n\t}\n\n\tif len(info.request.pkeyColumns) > 0 {\n\t\t// proto v4 dont need to calculate primary key columns\n\t\ttypes := make([]TypeInfo, len(info.request.pkeyColumns))\n\t\tfor i, col := range info.request.pkeyColumns {\n\t\t\ttypes[i] = info.request.columns[col].TypeInfo\n\t\t}\n\n\t\troutingKeyInfo := &routingKeyInfo{\n\t\t\tindexes:     info.request.pkeyColumns,\n\t\t\ttypes:       types,\n\t\t\tlwt:         info.request.lwt,\n\t\t\tpartitioner: partitioner,\n\t\t\tkeyspace:    keyspace,\n\t\t\ttable:       table,\n\t\t}\n\n\t\tinflight.value = routingKeyInfo\n\t\treturn routingKeyInfo, nil\n\t}\n\n\t// get the table metadata (uses TableMetadata to handle cache invalidation)\n\tvar tableMetadata *TableMetadata\n\ttableMetadata, inflight.err = s.TableMetadata(keyspace, table)\n\tif inflight.err != nil {\n\t\t// don't cache this error\n\t\ts.routingKeyInfoCache.Remove(stmt)\n\t\treturn nil, inflight.err\n\t}\n\n\tpartitionKey = tableMetadata.PartitionKey\n\n\t// Build a name→index map so that partition key lookup is O(1) per\n\t// column instead of a nested O(partitionKey × columns) scan.\n\t// The map records the first occurrence of each column name.\n\tcolIndex := make(map[string]int, len(info.request.columns))\n\tfor i, c := range info.request.columns {\n\t\tif _, exists := colIndex[c.Name]; !exists {\n\t\t\tcolIndex[c.Name] = i\n\t\t}\n\t}\n\n\tsize := len(partitionKey)\n\troutingKeyInfo := &routingKeyInfo{\n\t\tindexes:     make([]int, size),\n\t\ttypes:       make([]TypeInfo, size),\n\t\tlwt:         info.request.lwt,\n\t\tpartitioner: partitioner,\n\t\tkeyspace:    keyspace,\n\t\ttable:       table,\n\t}\n\n\tfor keyIndex, keyColumn := range partitionKey {\n\t\targIndex, found := colIndex[keyColumn.Name]\n\t\tif !found {\n\t\t\t// missing a routing key column mapping\n\t\t\t// no routing key, and no error\n\t\t\treturn nil, nil\n\t\t}\n\t\troutingKeyInfo.indexes[keyIndex] = argIndex\n\t\troutingKeyInfo.types[keyIndex] = info.request.columns[argIndex].TypeInfo\n\t}\n\n\t// cache this result\n\tinflight.value = routingKeyInfo\n\n\treturn routingKeyInfo, nil\n}\n\nfunc (b *Batch) execute(ctx context.Context, conn *Conn) *Iter {\n\treturn conn.executeBatch(ctx, b)\n}\n\n// Exec executes a batch operation and returns nil if successful\n// otherwise an error is returned describing the failure.\nfunc (b *Batch) Exec() error {\n\titer := b.session.executeBatch(b)\n\treturn iter.Close()\n}\n\nfunc (s *Session) executeBatch(batch *Batch) *Iter {\n\t// fail fast\n\tif s.Closed() {\n\t\treturn &Iter{err: ErrSessionClosed}\n\t}\n\tif err := s.Ready(); err != nil {\n\t\treturn &Iter{err: err}\n\t}\n\n\t// Drop metrics from prior query executions\n\tbatch.metrics.reset()\n\n\t// Prevent the execution of the batch if greater than the limit\n\t// Currently batches have a limit of 65536 queries.\n\t// https://datastax-oss.atlassian.net/browse/JAVA-229\n\tif batch.Size() > BatchSizeMaximum {\n\t\treturn &Iter{err: ErrTooManyStmts}\n\t}\n\n\titer, err := s.executor.executeQuery(batch)\n\tif err != nil {\n\t\treturn &Iter{err: err}\n\t}\n\n\treturn iter\n}\n\n// ExecuteBatch executes a batch operation and returns nil if successful\n// otherwise an error is returned describing the failure.\nfunc (s *Session) ExecuteBatch(batch *Batch) error {\n\titer := s.executeBatch(batch)\n\treturn iter.Close()\n}\n\n// ExecuteBatchCAS executes a batch operation and returns true if successful and\n// an iterator (to scan additional rows if more than one conditional statement)\n// was sent.\n// Further scans on the interator must also remember to include\n// the applied boolean as the first argument to *Iter.Scan\nfunc (s *Session) ExecuteBatchCAS(batch *Batch, dest ...any) (applied bool, iter *Iter, err error) {\n\titer = s.executeBatch(batch)\n\tif err := iter.checkErrAndNotFound(); err != nil {\n\t\titer.Close()\n\t\treturn false, nil, err\n\t}\n\n\tif len(iter.Columns()) > 1 {\n\t\tdest = append([]any{&applied}, dest...)\n\t\titer.Scan(dest...)\n\t} else {\n\t\titer.Scan(&applied)\n\t}\n\n\treturn applied, iter, iter.err\n}\n\n// MapExecuteBatchCAS executes a batch operation much like ExecuteBatchCAS,\n// however it accepts a map rather than a list of arguments for the initial\n// scan.\nfunc (s *Session) MapExecuteBatchCAS(batch *Batch, dest map[string]any) (applied bool, iter *Iter, err error) {\n\titer = s.executeBatch(batch)\n\tif err := iter.checkErrAndNotFound(); err != nil {\n\t\titer.Close()\n\t\treturn false, nil, err\n\t}\n\titer.MapScan(dest)\n\tif iter.err != nil {\n\t\treturn false, iter, iter.err\n\t}\n\t// check if [applied] was returned, otherwise it might not be CAS\n\tif appliedRaw, ok := dest[\"[applied]\"]; ok {\n\t\tapplied, ok = appliedRaw.(bool)\n\t\tif !ok {\n\t\t\ts.logger.Println(\"encountered non-bool \\\"[applied]\\\" key\")\n\t\t}\n\t\tdelete(dest, \"[applied]\")\n\t}\n\n\t// we usually close here, but instead of closing, just returin an error\n\t// if MapScan failed. Although Close just returns err, using Close\n\t// here might be confusing as we are not actually closing the iter\n\treturn applied, iter, iter.err\n}\n\n// translateAddressPort is a helper method that will use the given AddressTranslator\n// if defined, to translate the given address and port into a possibly new address\n// and port, If no AddressTranslator or if an error occurs, the given address and\n// port will be returned.\nfunc translateAddressPort(addressTranslator AddressTranslator, host *HostInfo, addr AddressPort, logger StdLogger) (AddressPort, error) {\n\tif addressTranslator == nil || !addr.IsValid() {\n\t\treturn addr, nil\n\t}\n\ttranslatorV2, ok := addressTranslator.(AddressTranslatorV2)\n\tif !ok {\n\t\tnewAddr, newPort := addressTranslator.Translate(addr.Address, int(addr.Port))\n\t\tif debug.Enabled {\n\t\t\tlogger.Printf(\"gocql: translated address %q to '%v:%d'\", addr, newAddr, newPort)\n\t\t}\n\t\treturn AddressPort{\n\t\t\tAddress: newAddr,\n\t\t\tPort:    uint16(newPort),\n\t\t}, nil\n\t}\n\tnewAddr, err := translatorV2.TranslateHost(host, addr)\n\tif err != nil {\n\t\tif debug.Enabled {\n\t\t\tlogger.Printf(\"gocql: failed to translate address %q: %s\", addr, err.Error())\n\t\t}\n\t\treturn addr, err\n\t}\n\tif debug.Enabled {\n\t\tlogger.Printf(\"gocql: translated address %q to %q\", addr, newAddr)\n\t}\n\treturn newAddr, nil\n}\n\ntype hostMetrics struct {\n\t// Attempts is count of how many times this query has been attempted for this host.\n\t// An attempt is either a retry or fetching next page of results.\n\tAttempts int\n\n\t// TotalLatency is the sum of attempt latencies for this host in nanoseconds.\n\tTotalLatency int64\n}\n\ntype queryMetrics struct {\n\tm map[UUID]*hostMetrics\n\t// totalAttempts is total number of attempts.\n\t// Equal to sum of all hostMetrics' Attempts\n\ttotalAttempts int\n\tl             sync.RWMutex\n}\n\n// preFilledQueryMetrics initializes new queryMetrics based on per-host supplied data.\nfunc preFilledQueryMetrics(m map[UUID]*hostMetrics) *queryMetrics {\n\tqm := &queryMetrics{m: m}\n\tfor _, hm := range qm.m {\n\t\tqm.totalAttempts += hm.Attempts\n\t}\n\treturn qm\n}\n\n// hostMetrics returns a snapshot of metrics for given host.\n// If the metrics for host don't exist, they are created.\nfunc (qm *queryMetrics) hostMetrics(host *HostInfo) *hostMetrics {\n\tqm.l.Lock()\n\tmetrics := qm.hostMetricsLocked(host)\n\tcopied := new(hostMetrics)\n\t*copied = *metrics\n\tqm.l.Unlock()\n\treturn copied\n}\n\n// hostMetricsLocked gets or creates host metrics for given host.\n// It must be called only while holding qm.l lock.\nfunc (qm *queryMetrics) hostMetricsLocked(host *HostInfo) *hostMetrics {\n\tid := host.hostUUID()\n\tmetrics, exists := qm.m[id]\n\tif !exists {\n\t\t// if the host is not in the map, it means it's been accessed for the first time\n\t\tmetrics = &hostMetrics{}\n\t\tqm.m[id] = metrics\n\t}\n\n\treturn metrics\n}\n\n// attempts returns the number of times the query was executed.\nfunc (qm *queryMetrics) attempts() int {\n\tqm.l.Lock()\n\tattempts := qm.totalAttempts\n\tqm.l.Unlock()\n\treturn attempts\n}\n\nfunc (qm *queryMetrics) latency() int64 {\n\tqm.l.Lock()\n\tvar (\n\t\tattempts int\n\t\tlatency  int64\n\t)\n\tfor _, metric := range qm.m {\n\t\tattempts += metric.Attempts\n\t\tlatency += metric.TotalLatency\n\t}\n\tqm.l.Unlock()\n\tif attempts > 0 {\n\t\treturn latency / int64(attempts)\n\t}\n\treturn 0\n}\n\n// reset resets metrics, to forget about prior query executions.\n// Uses clear() instead of make() to preserve the map's backing array,\n// avoiding a heap allocation on each re-execution.\nfunc (qm *queryMetrics) reset() {\n\tqm.l.Lock()\n\tclear(qm.m)\n\tqm.totalAttempts = 0\n\tqm.l.Unlock()\n}\n\n// attempt adds given number of attempts and latency for given host.\n// It returns previous total attempts.\n// If needsHostMetrics is true, a copy of updated hostMetrics is returned.\nfunc (qm *queryMetrics) attempt(addAttempts int, addLatency time.Duration,\n\thost *HostInfo, needsHostMetrics bool) (int, *hostMetrics) {\n\tqm.l.Lock()\n\n\ttotalAttempts := qm.totalAttempts\n\tqm.totalAttempts += addAttempts\n\n\tupdateHostMetrics := qm.hostMetricsLocked(host)\n\tupdateHostMetrics.Attempts += addAttempts\n\tupdateHostMetrics.TotalLatency += addLatency.Nanoseconds()\n\n\tvar hostMetricsCopy *hostMetrics\n\tif needsHostMetrics {\n\t\thostMetricsCopy = new(hostMetrics)\n\t\t*hostMetricsCopy = *updateHostMetrics\n\t}\n\n\tqm.l.Unlock()\n\treturn totalAttempts, hostMetricsCopy\n}\n\n// Query represents a CQL statement that can be executed.\ntype Query struct {\n\ttrace   Tracer\n\tcontext context.Context\n\t// pageContextParent keeps paging fetch contexts anchored to the original\n\t// query context instead of chaining each page under the previous page fetch.\n\tpageContextParent context.Context\n\tspec              SpeculativeExecutionPolicy\n\trt                RetryPolicy\n\tconn              ConnInterface\n\tobserver          QueryObserver\n\tmetrics           *queryMetrics\n\tsession           *Session\n\t// Timeout on waiting for response from server\n\tcustomPayload map[string][]byte\n\t// getKeyspace is field so that it can be overriden in tests\n\tgetKeyspace func() string\n\t// routingInfo is a pointer because Query can be copied and copyable struct can't hold a mutex.\n\troutingInfo *queryRoutingInfo\n\tbinding     func(q *QueryInfo) ([]any, error)\n\t// hostID specifies the host on which the query should be executed.\n\t// If it is empty, then the host is picked by HostSelectionPolicy\n\thostID     string\n\tstmt       string\n\troutingKey []byte\n\tvalues     []any\n\tpageState  []byte\n\t// requestTimeout is a timeout on waiting for response from server\n\trequestTimeout             time.Duration\n\tdefaultTimestampValue      int64\n\tprefetch                   float64\n\tpageSize                   int\n\trefCount                   uint32\n\tcons                       Consistency\n\tserialCons                 Consistency\n\tdisableAutoPage            bool\n\tdeferReleasedErrorFinalize bool\n\tidempotent                 bool\n\tskipPrepare                bool\n\tdisableSkipMetadata        bool\n\tdefaultTimestamp           bool\n\t// prepareCache caches whether shouldPrepare has been computed.\n\t// Since q.stmt is immutable after construction, the result never\n\t// changes. Accessed atomically because speculative execution may\n\t// call shouldPrepare from multiple goroutines concurrently.\n\t// Values: 0 = unknown, 1 = should prepare (DML), 2 = should not prepare.\n\tprepareCache uint32\n}\n\ntype queryRoutingInfo struct {\n\t// partitioner is a reference to a Partitioner instance\n\t// If nil default partitioner will be used.\n\tpartitioner Partitioner\n\tkeyspace    string\n\ttable       string\n\t// mu protects contents of queryRoutingInfo.\n\tmu sync.RWMutex\n\t// \"lwt\" denotes the query being an LWT operation\n\t// In effect if the query is of the form \"INSERT/UPDATE/DELETE ... IF ...\"\n\t// For more details see https://docs.scylladb.com/using-scylla/lwt/\n\tlwt bool\n}\n\nfunc (qri *queryRoutingInfo) isLWT() bool {\n\tqri.mu.RLock()\n\tdefer qri.mu.RUnlock()\n\treturn qri.lwt\n}\n\nfunc (qri *queryRoutingInfo) getPartitioner() Partitioner {\n\tqri.mu.RLock()\n\tdefer qri.mu.RUnlock()\n\treturn qri.partitioner\n}\n\nfunc (q *Query) defaultsFromSession() {\n\ts := q.session\n\n\ts.mu.RLock()\n\tq.cons = s.cons\n\tq.pageSize = s.pageSize\n\tq.trace = s.trace\n\tq.observer = s.queryObserver\n\tq.prefetch = s.prefetch\n\tq.rt = s.cfg.RetryPolicy\n\tq.serialCons = s.cfg.SerialConsistency\n\tq.defaultTimestamp = s.cfg.DefaultTimestamp\n\tq.idempotent = s.cfg.DefaultIdempotence\n\tif q.metrics == nil {\n\t\tq.metrics = &queryMetrics{m: make(map[UUID]*hostMetrics)}\n\t}\n\n\tq.spec = defaultNonSpecExec\n\ts.mu.RUnlock()\n}\n\n// Statement returns the statement that was used to generate this query.\nfunc (q Query) Statement() string {\n\treturn q.stmt\n}\n\n// Values returns the values passed in via Bind.\n// This can be used by a wrapper type that needs to access the bound values.\nfunc (q Query) Values() []any {\n\treturn q.values\n}\n\n// String implements the stringer interface.\nfunc (q Query) String() string {\n\treturn fmt.Sprintf(\"[query statement=%q values=%+v consistency=%s]\", q.stmt, q.values, q.cons)\n}\n\n// Attempts returns the number of times the query was executed.\nfunc (q *Query) Attempts() int {\n\treturn q.metrics.attempts()\n}\n\nfunc (q *Query) AddAttempts(i int, host *HostInfo) {\n\tq.metrics.attempt(i, 0, host, false)\n}\n\n// Latency returns the average amount of nanoseconds per attempt of the query.\nfunc (q *Query) Latency() int64 {\n\treturn q.metrics.latency()\n}\n\nfunc (q *Query) AddLatency(l int64, host *HostInfo) {\n\tq.metrics.attempt(0, time.Duration(l)*time.Nanosecond, host, false)\n}\n\n// Consistency sets the consistency level for this query. If no consistency\n// level have been set, the default consistency level of the cluster\n// is used.\nfunc (q *Query) Consistency(c Consistency) *Query {\n\tq.cons = c\n\treturn q\n}\n\n// GetConsistency returns the currently configured consistency level for\n// the query.\nfunc (q *Query) GetConsistency() Consistency {\n\treturn q.cons\n}\n\n// Same as Consistency but without a return value\nfunc (q *Query) SetConsistency(c Consistency) {\n\tq.cons = c\n}\n\n// CustomPayload sets the custom payload level for this query.\nfunc (q *Query) CustomPayload(customPayload map[string][]byte) *Query {\n\tq.customPayload = customPayload\n\treturn q\n}\n\nfunc (q *Query) Context() context.Context {\n\tif q.context == nil {\n\t\treturn context.Background()\n\t}\n\treturn q.context\n}\n\n// Trace enables tracing of this query. Look at the documentation of the\n// Tracer interface to learn more about tracing.\nfunc (q *Query) Trace(trace Tracer) *Query {\n\tq.trace = trace\n\treturn q\n}\n\n// Observer enables query-level observer on this query.\n// The provided observer will be called every time this query is executed.\nfunc (q *Query) Observer(observer QueryObserver) *Query {\n\tq.observer = observer\n\treturn q\n}\n\n// PageSize will tell the iterator to fetch the result in pages of size n.\n// This is useful for iterating over large result sets, but setting the\n// page size too low might decrease the performance. This feature is only\n// available in Cassandra 2 and onwards.\nfunc (q *Query) PageSize(n int) *Query {\n\tq.pageSize = n\n\treturn q\n}\n\n// DefaultTimestamp will enable the with default timestamp flag on the query.\n// If enable, this will replace the server side assigned\n// timestamp as default timestamp. Note that a timestamp in the query itself\n// will still override this timestamp. This is entirely optional.\n//\n// Only available on protocol >= 3\nfunc (q *Query) DefaultTimestamp(enable bool) *Query {\n\tq.defaultTimestamp = enable\n\treturn q\n}\n\n// WithTimestamp will enable the with default timestamp flag on the query\n// like DefaultTimestamp does. But also allows to define value for timestamp.\n// It works the same way as USING TIMESTAMP in the query itself, but\n// should not break prepared query optimization.\n//\n// Only available on protocol >= 3\nfunc (q *Query) WithTimestamp(timestamp int64) *Query {\n\tq.DefaultTimestamp(true)\n\tq.defaultTimestampValue = timestamp\n\treturn q\n}\n\n// RoutingKey sets the routing key to use when a token aware connection\n// pool is used to optimize the routing of this query.\nfunc (q *Query) RoutingKey(routingKey []byte) *Query {\n\tq.routingKey = routingKey\n\treturn q\n}\n\nfunc (q *Query) withContext(ctx context.Context) ExecutableQuery {\n\t// I really wish go had covariant types\n\treturn q.WithContext(ctx)\n}\n\n// WithContext returns a shallow copy of q with its context\n// set to ctx.\n//\n// The provided context controls the entire lifetime of executing a\n// query, queries will be canceled and return once the context is\n// canceled.\nfunc (q *Query) WithContext(ctx context.Context) *Query {\n\tq2 := *q\n\tq2.context = ctx\n\treturn &q2\n}\n\n// Deprecate: does nothing, cancel the context passed to WithContext\nfunc (q *Query) Cancel() {\n\t// TODO: delete\n}\n\nfunc (q *Query) execute(ctx context.Context, conn *Conn) *Iter {\n\treturn conn.executeQuery(ctx, q)\n}\n\nfunc (q *Query) attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo) {\n\tlatency := end.Sub(start)\n\tattempt, metricsForHost := q.metrics.attempt(1, latency, host, q.observer != nil)\n\n\tif q.observer != nil {\n\t\tq.observer.ObserveQuery(q.Context(), ObservedQuery{\n\t\t\tKeyspace:  keyspace,\n\t\t\tStatement: q.stmt,\n\t\t\tValues:    q.values,\n\t\t\tStart:     start,\n\t\t\tEnd:       end,\n\t\t\tRows:      iter.numRows,\n\t\t\tHost:      host,\n\t\t\tMetrics:   metricsForHost,\n\t\t\tErr:       iter.err,\n\t\t\tAttempt:   attempt,\n\t\t})\n\t}\n}\n\nfunc (q *Query) retryPolicy() RetryPolicy {\n\treturn q.rt\n}\n\n// Keyspace returns the keyspace the query will be executed against.\nfunc (q *Query) Keyspace() string {\n\tif q.getKeyspace != nil {\n\t\treturn q.getKeyspace()\n\t}\n\tif q.routingInfo.keyspace != \"\" {\n\t\treturn q.routingInfo.keyspace\n\t}\n\n\tif q.session == nil {\n\t\treturn \"\"\n\t}\n\t// TODO(chbannis): this should be parsed from the query or we should let\n\t// this be set by users.\n\treturn q.session.cfg.Keyspace\n}\n\n// Table returns name of the table the query will be executed against.\nfunc (q *Query) Table() string {\n\treturn q.routingInfo.table\n}\n\nfunc (q *Query) GetSession() *Session {\n\treturn q.session\n}\n\n// GetRoutingKey gets the routing key to use for routing this query. If\n// a routing key has not been explicitly set, then the routing key will\n// be constructed if possible using the keyspace's schema and the query\n// info for this query statement. If the routing key cannot be determined\n// then nil will be returned with no error. On any error condition,\n// an error description will be returned.\nfunc (q *Query) GetRoutingKey() ([]byte, error) {\n\tif q.routingKey != nil {\n\t\treturn q.routingKey, nil\n\t} else if q.binding != nil && len(q.values) == 0 {\n\t\t// If this query was created using session.Bind we wont have the query\n\t\t// values yet, so we have to pass down to the next policy.\n\t\t// TODO: Remove this and handle this case\n\t\treturn nil, nil\n\t}\n\n\t// Non-DML statements (DDL, USE, GRANT, etc.) do not need preparation\n\t// and have no routing key. Skip the routingKeyInfo call which would\n\t// otherwise send a wasteful PREPARE frame to the server.\n\tif !q.shouldPrepare() {\n\t\treturn nil, nil\n\t}\n\n\t// try to determine the routing key\n\troutingKeyInfo, err := q.session.routingKeyInfo(q.Context(), q.stmt, q.requestTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif routingKeyInfo != nil {\n\t\tq.routingInfo.mu.Lock()\n\t\tq.routingInfo.lwt = routingKeyInfo.lwt\n\t\tq.routingInfo.partitioner = routingKeyInfo.partitioner\n\t\tq.routingInfo.keyspace = routingKeyInfo.keyspace\n\t\tq.routingInfo.table = routingKeyInfo.table\n\t\tq.routingInfo.mu.Unlock()\n\t}\n\treturn createRoutingKey(routingKeyInfo, q.values)\n}\n\nfunc (q *Query) shouldPrepare() bool {\n\tif v := atomic.LoadUint32(&q.prepareCache); v != 0 {\n\t\treturn v == 1\n\t}\n\tresult := stmtIsDML(q.stmt)\n\tif result {\n\t\tatomic.StoreUint32(&q.prepareCache, 1)\n\t} else {\n\t\tatomic.StoreUint32(&q.prepareCache, 2)\n\t}\n\treturn result\n}\n\n// stmtKeyword returns the first whitespace-delimited keyword of a CQL\n// statement, skipping leading whitespace. For \"BEGIN …\" statements it\n// returns the last word instead (e.g. \"BATCH\"). The returned substring\n// shares the backing array of stmt (zero allocations). The result is\n// not lowercased; callers should use strings.EqualFold for comparison.\nfunc stmtKeyword(stmt string) string {\n\t// Skip leading whitespace using unicode-aware scanning (no allocation).\n\ti := 0\n\tfor i < len(stmt) {\n\t\tr, size := utf8.DecodeRuneInString(stmt[i:])\n\t\tif !unicode.IsSpace(r) {\n\t\t\tbreak\n\t\t}\n\t\ti += size\n\t}\n\n\t// Find the end of the first word.\n\tj := i\n\tfor j < len(stmt) {\n\t\tr, size := utf8.DecodeRuneInString(stmt[j:])\n\t\tif unicode.IsSpace(r) {\n\t\t\tbreak\n\t\t}\n\t\tj += size\n\t}\n\n\tword := stmt[i:j]\n\tif strings.EqualFold(word, \"begin\") {\n\t\t// Handle \"BEGIN BATCH ... APPLY BATCH\" — extract the last word.\n\t\tend := len(stmt)\n\t\tfor end > j {\n\t\t\tr, size := utf8.DecodeLastRuneInString(stmt[:end])\n\t\t\tif !unicode.IsSpace(r) && r != ';' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tend -= size\n\t\t}\n\t\tstart := end\n\t\tfor start > j {\n\t\t\tr, size := utf8.DecodeLastRuneInString(stmt[:start])\n\t\t\tif unicode.IsSpace(r) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tstart -= size\n\t\t}\n\t\tword = stmt[start:end]\n\t}\n\treturn word\n}\n\n// stmtIsDML reports whether stmt is a DML statement that should be prepared.\nfunc stmtIsDML(stmt string) bool {\n\tkw := stmtKeyword(stmt)\n\tswitch len(kw) {\n\tcase 5: // \"batch\"\n\t\treturn strings.EqualFold(kw, \"batch\")\n\tcase 6: // \"select\", \"insert\", \"update\", \"delete\"\n\t\treturn strings.EqualFold(kw, \"select\") ||\n\t\t\tstrings.EqualFold(kw, \"insert\") ||\n\t\t\tstrings.EqualFold(kw, \"update\") ||\n\t\t\tstrings.EqualFold(kw, \"delete\")\n\t}\n\treturn false\n}\n\n// SetPrefetch sets the default threshold for pre-fetching new pages. If\n// there are only p*pageSize rows remaining, the next page will be requested\n// automatically.\nfunc (q *Query) Prefetch(p float64) *Query {\n\tq.prefetch = p\n\treturn q\n}\n\n// RetryPolicy sets the policy to use when retrying the query.\nfunc (q *Query) RetryPolicy(r RetryPolicy) *Query {\n\tq.rt = r\n\treturn q\n}\n\n// SetSpeculativeExecutionPolicy sets the execution policy\nfunc (q *Query) SetSpeculativeExecutionPolicy(sp SpeculativeExecutionPolicy) *Query {\n\tq.spec = sp\n\treturn q\n}\n\n// speculativeExecutionPolicy fetches the policy\nfunc (q *Query) speculativeExecutionPolicy() SpeculativeExecutionPolicy {\n\treturn q.spec\n}\n\n// IsIdempotent returns whether the query is marked as idempotent.\n// Non-idempotent query won't be retried.\n// See \"Retries and speculative execution\" in package docs for more details.\nfunc (q *Query) IsIdempotent() bool {\n\treturn q.idempotent\n}\n\nfunc (q *Query) IsLWT() bool {\n\treturn q.routingInfo.isLWT()\n}\n\nfunc (q *Query) GetCustomPartitioner() Partitioner {\n\treturn q.routingInfo.getPartitioner()\n}\n\n// Idempotent marks the query as being idempotent or not depending on\n// the value.\n// Non-idempotent query won't be retried.\n// See \"Retries and speculative execution\" in package docs for more details.\nfunc (q *Query) Idempotent(value bool) *Query {\n\tq.idempotent = value\n\treturn q\n}\n\n// Bind sets query arguments of query. This can also be used to rebind new query arguments\n// to an existing query instance.\nfunc (q *Query) Bind(v ...any) *Query {\n\tq.values = v\n\tq.pageState = nil\n\treturn q\n}\n\n// SerialConsistency sets the consistency level for the\n// serial phase of conditional updates. That consistency can only be\n// either SERIAL or LOCAL_SERIAL and if not present, it defaults to\n// SERIAL. This option will be ignored for anything else that a\n// conditional update/insert.\nfunc (q *Query) SerialConsistency(cons Consistency) *Query {\n\tif !cons.IsSerial() {\n\t\tpanic(\"Serial consistency can only be SERIAL or LOCAL_SERIAL got \" + cons.String())\n\t}\n\tq.serialCons = cons\n\treturn q\n}\n\n// PageState sets the paging state for the query to resume paging from a specific\n// point in time. Setting this will disable to query paging for this query, and\n// must be used for all subsequent pages.\nfunc (q *Query) PageState(state []byte) *Query {\n\tq.pageState = state\n\tq.disableAutoPage = true\n\treturn q\n}\n\n// NoSkipMetadata will override the internal result metadata cache so that the driver does not\n// send skip_metadata for queries, this means that the result will always contain\n// the metadata to parse the rows and will not reuse the metadata from the prepared\n// statement. This should only be used to work around cassandra bugs, such as when using\n// CAS operations which do not end in Cas.\n//\n// See https://issues.apache.org/jira/browse/CASSANDRA-11099\n// https://github.com/apache/cassandra-gocql-driver/issues/612\nfunc (q *Query) NoSkipMetadata() *Query {\n\tq.disableSkipMetadata = true\n\treturn q\n}\n\n// Exec executes the query without returning any rows.\nfunc (q *Query) Exec() error {\n\treturn q.Iter().Close()\n}\n\n// GetRequestTimeout returns time driver waits for single server response\n// This timeout is applied to preparing statement request and for query execution requests\nfunc (b *Query) GetRequestTimeout() time.Duration {\n\treturn b.requestTimeout\n}\n\n// SetRequestTimeout sets time driver waits for server to respond\n// This timeout is applied to preparing statement request and for query execution requests\nfunc (b *Query) SetRequestTimeout(timeout time.Duration) *Query {\n\tb.requestTimeout = timeout\n\treturn b\n}\n\nfunc isUseStatement(stmt string) bool {\n\tif len(stmt) < 3 {\n\t\treturn false\n\t}\n\n\treturn strings.EqualFold(stmt[0:3], \"use\")\n}\n\n// Iter executes the query and returns an iterator capable of iterating\n// over all results.\nfunc (q *Query) Iter() *Iter {\n\tif isUseStatement(q.stmt) {\n\t\treturn &Iter{err: ErrUseStmt}\n\t}\n\n\tif !q.disableAutoPage {\n\t\treturn q.executeQuery()\n\t}\n\n\t// Retry on empty page if pagination is manual\n\titer := q.executeQueryForIterPostProcessing()\n\tvar hiddenWarnings []string\n\tfor iter.err == nil && iter.numRows == 0 && !iter.LastPage() {\n\t\tif warnings := iter.Warnings(); len(warnings) > 0 {\n\t\t\thiddenWarnings = append(hiddenWarnings, warnings...)\n\t\t}\n\t\tps := iter.PageState()\n\t\titer.discard()\n\t\tq.PageState(ps)\n\t\titer = q.executeQueryForIterPostProcessing()\n\t}\n\tif len(hiddenWarnings) > 0 {\n\t\titer.allWarnings = append(hiddenWarnings, iter.allWarnings...)\n\t}\n\tif iter.err != nil && iter.framer == nil && iter.next == nil {\n\t\titer.finalize(true)\n\t}\n\treturn iter\n}\n\nfunc (q *Query) executeQueryForIterPostProcessing() (iter *Iter) {\n\tq.deferReleasedErrorFinalize = true\n\tdefer func() {\n\t\tq.deferReleasedErrorFinalize = false\n\t}()\n\treturn q.executeQuery()\n}\n\nfunc (q *Query) executeQuery() *Iter {\n\t// Drop metrics from prior query executions\n\tq.metrics.reset()\n\n\tif q.conn != nil {\n\t\t// if the query was specifically run on a connection then re-use that\n\t\t// connection when fetching the next results\n\t\treturn q.conn.executeQuery(q.Context(), q)\n\t}\n\treturn q.session.executeQuery(q)\n}\n\n// MapScan executes the query, copies the columns of the first selected\n// row into the map pointed at by m and discards the rest. If no rows\n// were selected, ErrNotFound is returned.\nfunc (q *Query) MapScan(m map[string]any) error {\n\titer := q.Iter()\n\tif err := iter.checkErrAndNotFound(); err != nil {\n\t\titer.Close()\n\t\treturn err\n\t}\n\titer.MapScan(m)\n\treturn iter.Close()\n}\n\n// Scan executes the query, copies the columns of the first selected\n// row into the values pointed at by dest and discards the rest. If no rows\n// were selected, ErrNotFound is returned.\nfunc (q *Query) Scan(dest ...any) error {\n\titer := q.Iter()\n\tif err := iter.checkErrAndNotFound(); err != nil {\n\t\titer.Close()\n\t\treturn err\n\t}\n\titer.Scan(dest...)\n\treturn iter.Close()\n}\n\n// ScanCAS executes a lightweight transaction (i.e. an UPDATE or INSERT\n// statement containing an IF clause). If the transaction fails because\n// the existing values did not match, the previous values will be stored\n// in dest.\n//\n// As for INSERT .. IF NOT EXISTS, previous values will be returned as if\n// SELECT * FROM. So using ScanCAS with INSERT is inherently prone to\n// column mismatching. Use MapScanCAS to capture them safely.\nfunc (q *Query) ScanCAS(dest ...any) (applied bool, err error) {\n\tq.disableSkipMetadata = true\n\titer := q.Iter()\n\tif err := iter.checkErrAndNotFound(); err != nil {\n\t\titer.Close()\n\t\treturn false, err\n\t}\n\tif len(iter.Columns()) > 1 {\n\t\tdest = append([]any{&applied}, dest...)\n\t\titer.Scan(dest...)\n\t} else {\n\t\titer.Scan(&applied)\n\t}\n\treturn applied, iter.Close()\n}\n\n// MapScanCAS executes a lightweight transaction (i.e. an UPDATE or INSERT\n// statement containing an IF clause). If the transaction fails because\n// the existing values did not match, the previous values will be stored\n// in dest map.\n//\n// As for INSERT .. IF NOT EXISTS, previous values will be returned as if\n// SELECT * FROM. So using ScanCAS with INSERT is inherently prone to\n// column mismatching. MapScanCAS is added to capture them safely.\nfunc (q *Query) MapScanCAS(dest map[string]any) (applied bool, err error) {\n\tq.disableSkipMetadata = true\n\titer := q.Iter()\n\tif err := iter.checkErrAndNotFound(); err != nil {\n\t\titer.Close()\n\t\treturn false, err\n\t}\n\titer.MapScan(dest)\n\tif iter.err != nil {\n\t\treturn false, iter.Close()\n\t}\n\t// check if [applied] was returned, otherwise it might not be CAS\n\tif appliedRaw, ok := dest[\"[applied]\"]; ok {\n\t\tapplied, ok = appliedRaw.(bool)\n\t\tif !ok {\n\t\t\tq.session.logger.Println(\"encountered non-bool \\\"[applied]\\\" key\")\n\t\t}\n\t\tdelete(dest, \"[applied]\")\n\t}\n\n\treturn applied, iter.Close()\n}\n\n// Release releases a query back into a pool of queries. Released Queries\n// cannot be reused.\n//\n// Example:\n//\n//\tqry := session.Query(\"SELECT * FROM my_table\")\n//\tqry.Exec()\n//\tqry.Release()\nfunc (q *Query) Release() {\n\tq.decRefCount()\n}\n\n// reset zeroes out all fields of a query so that it can be safely pooled.\n// It preserves the metrics allocation for reuse. routingInfo is always freshly\n// allocated because paging copies share the pointer (see conn.go executeQuery).\nfunc (q *Query) reset() {\n\tm := q.metrics\n\tif m != nil {\n\t\tclear(m.m)\n\t\tm.totalAttempts = 0\n\t}\n\n\t*q = Query{routingInfo: &queryRoutingInfo{}, metrics: m, refCount: 1}\n}\n\nfunc (q *Query) incRefCount() {\n\tatomic.AddUint32(&q.refCount, 1)\n}\n\nfunc (q *Query) decRefCount() {\n\tif res := atomic.AddUint32(&q.refCount, ^uint32(0)); res == 0 {\n\t\t// do release\n\t\tq.reset()\n\t\tqueryPool.Put(q)\n\t}\n}\n\nfunc (q *Query) borrowForExecution() {\n\tq.incRefCount()\n}\n\nfunc (q *Query) releaseAfterExecution() {\n\tq.decRefCount()\n}\n\n// SetHostID allows to define the host the query should be executed against. If the\n// host was filtered or otherwise unavailable, then the query will error. If an empty\n// string is sent, the default behavior, using the configured HostSelectionPolicy will\n// be used. A hostID can be obtained from HostInfo.HostID() after calling GetHosts().\nfunc (q *Query) SetHostID(hostID string) *Query {\n\tq.hostID = hostID\n\treturn q\n}\n\n// GetHostID returns id of the host on which query should be executed.\nfunc (q *Query) GetHostID() string {\n\treturn q.hostID\n}\n\n// Iter represents an iterator that can be used to iterate over all rows that\n// were returned by a query. The iterator might send additional queries to the\n// database during the iteration if paging was enabled.\n//\n// IMPORTANT: Close should still be called whenever iteration may stop early.\n// Iterators that run to exhaustion through Scan/Scanner.Next auto-finalize when\n// they become terminal, but Close remains the safest pattern and is still needed\n// to surface errors after manual early termination. Use defer immediately after\n// obtaining an Iter when in doubt:\n//\n//\titer := session.Query(\"...\").Iter()\n//\tdefer iter.Close()\n//\n// Failure to call Close() after early termination may leak resources and prevent\n// buffer reuse.\n//\n// CONCURRENCY: Iter is NOT safe for concurrent use. An Iter instance should only\n// be used from a single goroutine at a time. While Close() is safe to call multiple\n// times (idempotent), calling Scan(), Next(), or other methods concurrently with\n// Close() or each other will result in undefined behavior.\ntype Iter struct {\n\twarningQuery          ExecutableQuery\n\tframer                framerInterface\n\terr                   error\n\twarningHandler        WarningHandler\n\treleasedCustomPayload map[string][]byte\n\tnext                  *nextIter\n\thost                  *HostInfo\n\t// allWarnings accumulates warnings across page boundaries.\n\t// When a page's framer is released during fetchNextPage(), its warnings\n\t// are appended here so they are not lost.\n\tallWarnings []string\n\n\t// scanColumns caches the column names computed by RowData() so that\n\t// MapScan does not recompute them on every row. Populated lazily on\n\t// the first call to getScanColumns().\n\tscanColumns       []string\n\tmeta              resultMetadata\n\tpos               int\n\tnumRows           int\n\tclosed            int32\n\twarningsHandled   int32\n\twarningQueryOwned bool\n}\n\n// Host returns the host which the query was sent to.\nfunc (iter *Iter) Host() *HostInfo {\n\treturn iter.host\n}\n\n// Columns returns the name and type of the selected columns.\nfunc (iter *Iter) Columns() []ColumnInfo {\n\treturn iter.meta.columns\n}\n\n// copyPageData copies page-related fields from src to iter, excluding the closed flag.\n// This is used when fetching the next page to avoid races with concurrent Close() calls.\n//\n// After this call, src must not be used because its framer ownership has been\n// transferred to iter (src.framer is set to nil to prevent double-release).\nfunc (iter *Iter) copyPageData(src *Iter) {\n\titer.err = src.err\n\titer.framer = src.framer\n\titer.next = src.next\n\titer.host = src.host\n\titer.meta = src.meta\n\titer.allWarnings = append(iter.allWarnings, src.allWarnings...)\n\titer.releasedCustomPayload = src.releasedCustomPayload\n\titer.pos = src.pos\n\titer.numRows = src.numRows\n\tif iter.warningQuery == nil {\n\t\titer.warningHandler = src.warningHandler\n\t\titer.warningQuery = src.warningQuery\n\t\titer.warningQueryOwned = src.warningQueryOwned\n\t} else {\n\t\tsrc.releaseWarningQuery()\n\t}\n\n\t// Clear source framer to prevent double-release: ownership is now with iter.\n\tsrc.framer = nil\n\tsrc.allWarnings = nil\n\tsrc.releasedCustomPayload = nil\n\tsrc.next = nil\n\tsrc.warningHandler = nil\n\tsrc.warningQuery = nil\n\tsrc.warningQueryOwned = false\n\t// Intentionally don't copy iter.closed - it's managed with atomic operations\n}\n\nfunc (iter *Iter) bindWarningHandler(qry ExecutableQuery, handler WarningHandler) *Iter {\n\tif iter == nil || handler == nil {\n\t\treturn iter\n\t}\n\titer.warningQuery = qry\n\titer.warningHandler = handler\n\tif pooledQuery, ok := qry.(*Query); ok {\n\t\tpooledQuery.incRefCount()\n\t\titer.warningQueryOwned = true\n\t\tif iter.err != nil && iter.framer == nil && iter.next == nil && !pooledQuery.deferReleasedErrorFinalize {\n\t\t\titer.finalize(true)\n\t\t}\n\t\treturn iter\n\t}\n\tif iter.err != nil && iter.framer == nil && iter.next == nil {\n\t\titer.finalize(true)\n\t}\n\treturn iter\n}\n\nfunc (iter *Iter) releaseWarningQuery() {\n\tqry := iter.warningQuery\n\towned := iter.warningQueryOwned\n\titer.warningQueryOwned = false\n\titer.warningQuery = nil\n\n\tif !owned {\n\t\treturn\n\t}\n\tpooledQuery, ok := qry.(*Query)\n\tif !ok {\n\t\treturn\n\t}\n\tpooledQuery.decRefCount()\n}\n\nfunc (iter *Iter) collectReleasedFramerMetadata(f framerInterface) {\n\tif f == nil {\n\t\treturn\n\t}\n\tif warnings := f.GetHeaderWarnings(); len(warnings) > 0 {\n\t\titer.allWarnings = append(iter.allWarnings, warnings...)\n\t}\n\tif payload := f.GetCustomPayload(); len(payload) > 0 {\n\t\titer.releasedCustomPayload = maps.Clone(payload)\n\t}\n}\n\nfunc (iter *Iter) handleWarningsOnce() {\n\tif iter.warningHandler == nil {\n\t\treturn\n\t}\n\tif !atomic.CompareAndSwapInt32(&iter.warningsHandled, 0, 1) {\n\t\treturn\n\t}\n\tif warnings := iter.Warnings(); len(warnings) > 0 {\n\t\titer.warningHandler.HandleWarnings(iter.warningQuery, iter.host, warnings)\n\t}\n}\n\nfunc (iter *Iter) finalize(dispatchWarnings bool) {\n\tif !atomic.CompareAndSwapInt32(&iter.closed, 0, 1) {\n\t\treturn\n\t}\n\tif iter.framer != nil {\n\t\titer.collectReleasedFramerMetadata(iter.framer)\n\t\titer.framer.Release()\n\t\titer.framer = nil\n\t}\n\tif iter.next != nil {\n\t\titer.next.close()\n\t\titer.next = nil\n\t}\n\tif dispatchWarnings {\n\t\titer.handleWarningsOnce()\n\t}\n\titer.releaseWarningQuery()\n\titer.warningHandler = nil\n}\n\nfunc (iter *Iter) discard() {\n\titer.finalize(false)\n}\n\nfunc newErrorIterWithReleasedFramer(err error, framer framerInterface) *Iter {\n\titer := &Iter{err: err}\n\tif framer != nil {\n\t\titer.collectReleasedFramerMetadata(framer)\n\t\tframer.Release()\n\t}\n\treturn iter\n}\n\n// fetchNextPage releases the current page's framer and loads the next page\n// into iter. Returns true if a new page was successfully loaded,\n// false if no more pages or if the fetch produced an error.\nfunc (iter *Iter) fetchNextPage() bool {\n\tif iter.pos < iter.numRows || iter.next == nil {\n\t\treturn false\n\t}\n\tcurrentNext := iter.next\n\tif iter.framer != nil {\n\t\t// Accumulate warnings from the current page before releasing its framer,\n\t\t// so they are not lost across page boundaries.\n\t\tif w := iter.framer.GetHeaderWarnings(); len(w) > 0 {\n\t\t\titer.allWarnings = append(iter.allWarnings, w...)\n\t\t}\n\t\titer.framer.Release()\n\t\titer.framer = nil // prevent accidental use of released framer\n\t}\n\tnext := currentNext.fetch()\n\tcurrentNext.consume()\n\titer.copyPageData(next)\n\treturn iter.err == nil\n}\n\ntype Scanner interface {\n\t// Next advances the row pointer to point at the next row, the row is valid until\n\t// the next call of Next. It returns true if there is a row which is available to be\n\t// scanned into with Scan.\n\t// Next must be called before every call to Scan.\n\tNext() bool\n\n\t// Scan copies the current row's columns into dest. If the length of dest does not equal\n\t// the number of columns returned in the row an error is returned. If an error is encountered\n\t// when unmarshalling a column into the value in dest an error is returned and the row is invalidated\n\t// until the next call to Next.\n\t// Next must be called before calling Scan, if it is not an error is returned.\n\tScan(...any) error\n\n\t// Err returns the if there was one during iteration that resulted in iteration being unable to complete.\n\t// Err will also release resources held by the iterator, the Scanner should not used after being called.\n\tErr() error\n}\n\ntype iterScanner struct {\n\titer  *Iter\n\tcols  [][]byte\n\tvalid bool\n}\n\nfunc (is *iterScanner) Next() bool {\n\titer := is.iter\n\tif iter.err != nil {\n\t\titer.finalize(true)\n\t\treturn false\n\t}\n\n\tfor iter.pos >= iter.numRows {\n\t\tif !iter.fetchNextPage() {\n\t\t\titer.finalize(true)\n\t\t\treturn false\n\t\t}\n\t}\n\n\tfor i := 0; i < len(is.cols); i++ {\n\t\tcol, err := iter.readColumn()\n\t\tif err != nil {\n\t\t\titer.err = err\n\t\t\titer.finalize(true)\n\t\t\treturn false\n\t\t}\n\t\tis.cols[i] = col\n\t}\n\titer.pos++\n\tis.valid = true\n\n\treturn true\n}\n\nfunc scanColumn(p []byte, col ColumnInfo, dest []any) (int, error) {\n\tif dest[0] == nil {\n\t\treturn 1, nil\n\t}\n\n\tif col.TypeInfo.Type() == TypeTuple {\n\t\t// this will panic, actually a bug, please report\n\t\ttuple := col.TypeInfo.(TupleTypeInfo)\n\n\t\tcount := len(tuple.Elems)\n\t\t// here we pass in a slice of the struct which has the number number of\n\t\t// values as elements in the tuple\n\t\tif err := Unmarshal(col.TypeInfo, p, dest[:count]); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn count, nil\n\t} else {\n\t\tif err := Unmarshal(col.TypeInfo, p, dest[0]); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn 1, nil\n\t}\n}\n\nfunc (is *iterScanner) Scan(dest ...any) error {\n\tif !is.valid {\n\t\treturn errors.New(\"gocql: Scan called without calling Next\")\n\t}\n\n\titer := is.iter\n\t// currently only support scanning into an expand tuple, such that its the same\n\t// as scanning in more values from a single column\n\tif len(dest) != iter.meta.actualColCount {\n\t\treturn fmt.Errorf(\"gocql: not enough columns to scan into: have %d want %d\", len(dest), iter.meta.actualColCount)\n\t}\n\n\t// i is the current position in dest, could posible replace it and just use\n\t// slices of dest\n\ti := 0\n\tvar err error\n\tfor _, col := range iter.meta.columns {\n\t\tvar n int\n\t\tn, err = scanColumn(is.cols[i], col, dest[i:])\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\ti += n\n\t}\n\n\tis.valid = false\n\treturn err\n}\n\nfunc (is *iterScanner) Err() error {\n\titer := is.iter\n\tis.iter = nil\n\tis.cols = nil\n\tis.valid = false\n\treturn iter.Close()\n}\n\n// Scanner returns a row Scanner which provides an interface to scan rows in a manner which is\n// similar to database/sql. The iter should NOT be used again after calling this method.\nfunc (iter *Iter) Scanner() Scanner {\n\tif iter == nil {\n\t\treturn nil\n\t}\n\n\treturn &iterScanner{iter: iter, cols: make([][]byte, len(iter.meta.columns))}\n}\n\nfunc (iter *Iter) readColumn() ([]byte, error) {\n\tif atomic.LoadInt32(&iter.closed) != 0 {\n\t\treturn nil, errors.New(\"iterator closed\")\n\t}\n\tif iter.framer == nil {\n\t\treturn nil, errors.New(\"no framer available\")\n\t}\n\treturn iter.framer.ReadBytesInternal()\n}\n\n// Scan consumes the next row of the iterator and copies the columns of the\n// current row into the values pointed at by dest. Use nil as a dest value\n// to skip the corresponding column. Scan might send additional queries\n// to the database to retrieve the next set of rows if paging was enabled.\n//\n// Scan returns true if the row was successfully unmarshaled or false if the\n// end of the result set was reached or if an error occurred. Close should\n// be called afterwards to retrieve any potential errors.\nfunc (iter *Iter) Scan(dest ...any) bool {\n\tif iter.err != nil {\n\t\titer.finalize(true)\n\t\treturn false\n\t}\n\n\tfor iter.pos >= iter.numRows {\n\t\tif !iter.fetchNextPage() {\n\t\t\titer.finalize(true)\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif iter.next != nil && iter.pos >= iter.next.pos {\n\t\titer.next.fetchAsync()\n\t}\n\n\t// currently only support scanning into an expand tuple, such that its the same\n\t// as scanning in more values from a single column\n\tif len(dest) != iter.meta.actualColCount {\n\t\titer.err = fmt.Errorf(\"gocql: not enough columns to scan into: have %d want %d\", len(dest), iter.meta.actualColCount)\n\t\titer.finalize(true)\n\t\treturn false\n\t}\n\n\t// i is the current position in dest, could posible replace it and just use\n\t// slices of dest\n\ti := 0\n\tfor _, col := range iter.meta.columns {\n\t\tcolBytes, err := iter.readColumn()\n\t\tif err != nil {\n\t\t\titer.err = err\n\t\t\titer.finalize(true)\n\t\t\treturn false\n\t\t}\n\n\t\tn, err := scanColumn(colBytes, col, dest[i:])\n\t\tif err != nil {\n\t\t\titer.err = err\n\t\t\titer.finalize(true)\n\t\t\treturn false\n\t\t}\n\t\ti += n\n\t}\n\n\titer.pos++\n\treturn true\n}\n\n// GetCustomPayload returns any parsed custom payload results if given in the\n// response from Cassandra. The returned map is a shallow copy and is safe to\n// retain after the Iter advances or is closed.\n//\n// When paging is enabled, this returns the custom payload from the most recently\n// loaded page only. Custom payloads from previously consumed pages are not retained.\n// If you need the payload, retrieve it before advancing to the next page.\n//\n// This additional feature of CQL Protocol v4\n// allows additional results and query information to be returned by\n// custom QueryHandlers running in your C* cluster.\n// See https://datastax.github.io/java-driver/manual/custom_payloads/\nfunc (iter *Iter) GetCustomPayload() map[string][]byte {\n\tif iter.framer != nil {\n\t\treturn maps.Clone(iter.framer.GetCustomPayload())\n\t}\n\treturn maps.Clone(iter.releasedCustomPayload)\n}\n\n// Warnings returns any warnings generated if given in the response from Cassandra.\n// When paging is enabled, warnings are accumulated across all pages that have been\n// consumed so far plus the warnings from the current (not yet released) page.\n//\n// This is only available starting with CQL Protocol v4.\nfunc (iter *Iter) Warnings() []string {\n\tvar current []string\n\tif iter.framer != nil {\n\t\tcurrent = iter.framer.GetHeaderWarnings()\n\t}\n\tif len(iter.allWarnings) == 0 {\n\t\treturn slices.Clone(current) // always return a caller-owned slice\n\t}\n\tif len(current) == 0 {\n\t\treturn slices.Clone(iter.allWarnings)\n\t}\n\treturn slices.Concat(iter.allWarnings, current)\n}\n\n// Close closes the iterator and returns any errors that happened during\n// the query or the iteration.\nfunc (iter *Iter) Close() error {\n\titer.finalize(true)\n\treturn iter.err\n}\n\n// WillSwitchPage detects if iterator reached end of current page\n// and the next page is available.\nfunc (iter *Iter) WillSwitchPage() bool {\n\treturn iter.pos >= iter.numRows && iter.next != nil\n}\n\n// checkErrAndNotFound handle error and NotFound in one method.\nfunc (iter *Iter) checkErrAndNotFound() error {\n\tif iter.err != nil {\n\t\treturn iter.err\n\t} else if iter.numRows == 0 {\n\t\treturn ErrNotFound\n\t}\n\treturn nil\n}\n\n// PageState return the current paging state for a query which can be used for\n// subsequent queries to resume paging this point.\nfunc (iter *Iter) PageState() []byte {\n\treturn iter.meta.pagingState\n}\n\n// LastPage returns true if there are no more pages to fetch.\nfunc (iter *Iter) LastPage() bool {\n\treturn len(iter.meta.pagingState) == 0\n}\n\n// NumRows returns the number of rows in this pagination, it will update when new\n// pages are fetched, it is not the value of the total number of rows this iter\n// will return unless there is only a single page returned.\nfunc (iter *Iter) NumRows() int {\n\treturn iter.numRows\n}\n\n// nextIter holds state for fetching a single page in an iterator.\n// single page might be attempted multiple times due to retries.\ntype nextIter struct {\n\tqry    *Query\n\tnext   *Iter\n\tcancel context.CancelFunc\n\toncea  sync.Once\n\tonce   sync.Once\n\tmu     sync.Mutex\n\tpos    int\n\tclosed bool\n}\n\nfunc newNextIter(qry *Query, pos int) *nextIter {\n\tparentCtx := qry.pageContextParent\n\tif parentCtx == nil {\n\t\tparentCtx = qry.Context()\n\t}\n\tctx, cancel := context.WithCancel(parentCtx)\n\tnextQry := qry.WithContext(ctx)\n\tnextQry.pageContextParent = parentCtx\n\treturn &nextIter{\n\t\tqry:    nextQry,\n\t\tpos:    pos,\n\t\tcancel: cancel,\n\t}\n}\n\nfunc (n *nextIter) fetchAsync() {\n\tn.oncea.Do(func() {\n\t\tgo n.fetch()\n\t})\n}\n\nfunc (n *nextIter) storeFetched(next *Iter) {\n\tif next == nil {\n\t\treturn\n\t}\n\n\tn.mu.Lock()\n\tif n.closed {\n\t\tn.mu.Unlock()\n\t\tnext.discard()\n\t\treturn\n\t}\n\tn.next = next\n\tn.mu.Unlock()\n}\n\nfunc (n *nextIter) close() {\n\tif n.cancel != nil {\n\t\tn.cancel()\n\t}\n\n\tn.mu.Lock()\n\tn.closed = true\n\tnext := n.next\n\tn.next = nil\n\tn.mu.Unlock()\n\n\tif next != nil {\n\t\tnext.discard()\n\t}\n}\n\n// consume retires the next-page fetch context after the fetched page has been\n// handed off to the caller. Unlike close(), it keeps the fetched Iter alive so\n// its page data can become the current iterator state.\nfunc (n *nextIter) consume() {\n\tif n.cancel != nil {\n\t\tn.cancel()\n\t}\n\n\tn.mu.Lock()\n\tn.closed = true\n\tn.next = nil\n\tn.mu.Unlock()\n}\n\nfunc (n *nextIter) fetch() *Iter {\n\tn.once.Do(func() {\n\t\t// if the query was specifically run on a connection then re-use that\n\t\t// connection when fetching the next results\n\t\tvar next *Iter\n\t\tif n.qry.conn != nil {\n\t\t\tnext = n.qry.conn.executeQuery(n.qry.Context(), n.qry)\n\t\t} else {\n\t\t\tnext = n.qry.session.executeQuery(n.qry)\n\t\t}\n\t\tn.storeFetched(next)\n\t})\n\n\tn.mu.Lock()\n\tnext := n.next\n\tn.mu.Unlock()\n\treturn next\n}\n\ntype Batch struct {\n\tcontext  context.Context\n\trt       RetryPolicy\n\tspec     SpeculativeExecutionPolicy\n\ttrace    Tracer\n\tobserver BatchObserver\n\t// routingInfo is a pointer because Query can be copied and copyable struct can't hold a mutex.\n\troutingInfo   *queryRoutingInfo\n\tmetrics       *queryMetrics\n\tcancelBatch   func()\n\tCustomPayload map[string][]byte\n\tsession       *Session\n\tkeyspace      string\n\t// hostID specifies the host on which the query should be executed.\n\t// If it is empty, then the host is picked by HostSelectionPolicy\n\thostID                string\n\troutingKey            []byte\n\tEntries               []BatchEntry\n\tdefaultTimestampValue int64\n\t// requestTimeout is a timeout on waiting for response from serve\n\trequestTimeout   time.Duration\n\tserialCons       Consistency\n\tCons             Consistency\n\tdefaultTimestamp bool\n\tType             BatchType\n}\n\n// NewBatch creates a new batch operation using defaults defined in the cluster\n//\n// Deprecated: use session.Batch instead\nfunc (s *Session) NewBatch(typ BatchType) *Batch {\n\treturn s.Batch(typ)\n}\n\n// BatchWithContext creates a new batch operation using defaults defined in the cluster, with context\nfunc (s *Session) BatchWithContext(ctx context.Context, typ BatchType) *Batch {\n\tb := s.Batch(typ)\n\tb.context = ctx\n\treturn b\n}\n\n// Batch creates a new batch operation using defaults defined in the cluster\nfunc (s *Session) Batch(typ BatchType) *Batch {\n\ts.mu.RLock()\n\tbatch := &Batch{\n\t\tType:             typ,\n\t\trt:               s.cfg.RetryPolicy,\n\t\tserialCons:       s.cfg.SerialConsistency,\n\t\ttrace:            s.trace,\n\t\tobserver:         s.batchObserver,\n\t\tsession:          s,\n\t\tCons:             s.cons,\n\t\tdefaultTimestamp: s.cfg.DefaultTimestamp,\n\t\tkeyspace:         s.cfg.Keyspace,\n\t\tmetrics:          &queryMetrics{m: make(map[UUID]*hostMetrics)},\n\t\tspec:             defaultNonSpecExec,\n\t\troutingInfo:      &queryRoutingInfo{},\n\t\trequestTimeout:   s.cfg.Timeout,\n\t}\n\n\ts.mu.RUnlock()\n\treturn batch\n}\n\n// Trace enables tracing of this batch. Look at the documentation of the\n// Tracer interface to learn more about tracing.\nfunc (b *Batch) Trace(trace Tracer) *Batch {\n\tb.trace = trace\n\treturn b\n}\n\n// Observer enables batch-level observer on this batch.\n// The provided observer will be called every time this batched query is executed.\nfunc (b *Batch) Observer(observer BatchObserver) *Batch {\n\tb.observer = observer\n\treturn b\n}\n\nfunc (b *Batch) Keyspace() string {\n\treturn b.keyspace\n}\n\n// Batch has no reasonable eqivalent of Query.Table().\nfunc (b *Batch) Table() string {\n\treturn b.routingInfo.table\n}\n\nfunc (b *Batch) GetSession() *Session {\n\treturn b.session\n}\n\n// Attempts returns the number of attempts made to execute the batch.\nfunc (b *Batch) Attempts() int {\n\treturn b.metrics.attempts()\n}\n\nfunc (b *Batch) AddAttempts(i int, host *HostInfo) {\n\tb.metrics.attempt(i, 0, host, false)\n}\n\n// Latency returns the average number of nanoseconds to execute a single attempt of the batch.\nfunc (b *Batch) Latency() int64 {\n\treturn b.metrics.latency()\n}\n\nfunc (b *Batch) AddLatency(l int64, host *HostInfo) {\n\tb.metrics.attempt(0, time.Duration(l)*time.Nanosecond, host, false)\n}\n\n// GetConsistency returns the currently configured consistency level for the batch\n// operation.\nfunc (b *Batch) GetConsistency() Consistency {\n\treturn b.Cons\n}\n\n// SetConsistency sets the currently configured consistency level for the batch\n// operation.\nfunc (b *Batch) SetConsistency(c Consistency) {\n\tb.Cons = c\n}\n\nfunc (b *Batch) Context() context.Context {\n\tif b.context == nil {\n\t\treturn context.Background()\n\t}\n\treturn b.context\n}\n\nfunc (b *Batch) IsIdempotent() bool {\n\tfor _, entry := range b.Entries {\n\t\tif !entry.Idempotent {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (b *Batch) IsLWT() bool {\n\treturn b.routingInfo.isLWT()\n}\n\nfunc (b *Batch) GetCustomPartitioner() Partitioner {\n\treturn b.routingInfo.getPartitioner()\n}\n\nfunc (b *Batch) speculativeExecutionPolicy() SpeculativeExecutionPolicy {\n\treturn b.spec\n}\n\nfunc (b *Batch) SpeculativeExecutionPolicy(sp SpeculativeExecutionPolicy) *Batch {\n\tb.spec = sp\n\treturn b\n}\n\n// Query adds the query to the batch operation\nfunc (b *Batch) Query(stmt string, args ...any) *Batch {\n\tb.Entries = append(b.Entries, BatchEntry{Stmt: stmt, Args: args})\n\treturn b\n}\n\n// Bind adds the query to the batch operation and correlates it with a binding callback\n// that will be invoked when the batch is executed. The binding callback allows the application\n// to define which query argument values will be marshalled as part of the batch execution.\nfunc (b *Batch) Bind(stmt string, bind func(q *QueryInfo) ([]any, error)) {\n\tb.Entries = append(b.Entries, BatchEntry{Stmt: stmt, binding: bind})\n}\n\nfunc (b *Batch) retryPolicy() RetryPolicy {\n\treturn b.rt\n}\n\n// RetryPolicy sets the retry policy to use when executing the batch operation\nfunc (b *Batch) RetryPolicy(r RetryPolicy) *Batch {\n\tb.rt = r\n\treturn b\n}\n\nfunc (b *Batch) withContext(ctx context.Context) ExecutableQuery {\n\treturn b.WithContext(ctx)\n}\n\n// WithContext returns a shallow copy of b with its context\n// set to ctx.\n//\n// The provided context controls the entire lifetime of executing a\n// query, queries will be canceled and return once the context is\n// canceled.\nfunc (b *Batch) WithContext(ctx context.Context) *Batch {\n\tb2 := *b\n\tb2.context = ctx\n\treturn &b2\n}\n\n// Deprecate: does nothing, cancel the context passed to WithContext\nfunc (*Batch) Cancel() {\n\t// TODO: delete\n}\n\n// Size returns the number of batch statements to be executed by the batch operation.\nfunc (b *Batch) Size() int {\n\treturn len(b.Entries)\n}\n\n// SerialConsistency sets the consistency level for the\n// serial phase of conditional updates. That consistency can only be\n// either SERIAL or LOCAL_SERIAL and if not present, it defaults to\n// SERIAL. This option will be ignored for anything else that a\n// conditional update/insert.\n//\n// Only available for protocol 3 and above\nfunc (b *Batch) SerialConsistency(cons Consistency) *Batch {\n\tif !cons.IsSerial() {\n\t\tpanic(\"Serial consistency can only be SERIAL or LOCAL_SERIAL got \" + cons.String())\n\t}\n\tb.serialCons = cons\n\treturn b\n}\n\n// DefaultTimestamp will enable the with default timestamp flag on the query.\n// If enable, this will replace the server side assigned\n// timestamp as default timestamp. Note that a timestamp in the query itself\n// will still override this timestamp. This is entirely optional.\n//\n// Only available on protocol >= 3\nfunc (b *Batch) DefaultTimestamp(enable bool) *Batch {\n\tb.defaultTimestamp = enable\n\treturn b\n}\n\n// WithTimestamp will enable the with default timestamp flag on the query\n// like DefaultTimestamp does. But also allows to define value for timestamp.\n// It works the same way as USING TIMESTAMP in the query itself, but\n// should not break prepared query optimization.\n//\n// Only available on protocol >= 3\nfunc (b *Batch) WithTimestamp(timestamp int64) *Batch {\n\tb.DefaultTimestamp(true)\n\tb.defaultTimestampValue = timestamp\n\treturn b\n}\n\nfunc (b *Batch) attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo) {\n\tlatency := end.Sub(start)\n\tattempt, metricsForHost := b.metrics.attempt(1, latency, host, b.observer != nil)\n\n\tif b.observer == nil {\n\t\treturn\n\t}\n\n\tstatements := make([]string, len(b.Entries))\n\tvalues := make([][]any, len(b.Entries))\n\n\tfor i, entry := range b.Entries {\n\t\tstatements[i] = entry.Stmt\n\t\tvalues[i] = entry.Args\n\t}\n\n\tb.observer.ObserveBatch(b.Context(), ObservedBatch{\n\t\tKeyspace:   keyspace,\n\t\tStatements: statements,\n\t\tValues:     values,\n\t\tStart:      start,\n\t\tEnd:        end,\n\t\t// Rows not used in batch observations // TODO - might be able to support it when using BatchCAS\n\t\tHost:    host,\n\t\tMetrics: metricsForHost,\n\t\tErr:     iter.err,\n\t\tAttempt: attempt,\n\t})\n}\n\nfunc (b *Batch) GetRoutingKey() ([]byte, error) {\n\tif b.routingKey != nil {\n\t\treturn b.routingKey, nil\n\t}\n\n\tif len(b.Entries) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tentry := b.Entries[0]\n\tif entry.binding != nil {\n\t\t// bindings do not have the values let's skip it like Query does.\n\t\treturn nil, nil\n\t}\n\t// try to determine the routing key\n\troutingKeyInfo, err := b.session.routingKeyInfo(b.Context(), entry.Stmt, b.GetRequestTimeout())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif routingKeyInfo != nil {\n\t\tb.routingInfo.mu.Lock()\n\t\tb.routingInfo.lwt = routingKeyInfo.lwt\n\t\tb.routingInfo.partitioner = routingKeyInfo.partitioner\n\t\tb.routingInfo.mu.Unlock()\n\t}\n\n\treturn createRoutingKey(routingKeyInfo, entry.Args)\n}\n\n// GetRequestTimeout returns time driver waits for single server response\n// This timeout is applied to preparing statement request and for query execution requests\nfunc (b *Batch) GetRequestTimeout() time.Duration {\n\treturn b.requestTimeout\n}\n\n// SetRequestTimeout sets time driver waits for single server response\n// This timeout is applied to preparing statement request and for query execution requests\nfunc (b *Batch) SetRequestTimeout(timeout time.Duration) *Batch {\n\tb.requestTimeout = timeout\n\treturn b\n}\n\nfunc createRoutingKey(routingKeyInfo *routingKeyInfo, values []any) ([]byte, error) {\n\tif routingKeyInfo == nil {\n\t\treturn nil, nil\n\t}\n\n\tif len(routingKeyInfo.indexes) == 1 {\n\t\t// single column routing key\n\t\troutingKey, err := Marshal(\n\t\t\troutingKeyInfo.types[0],\n\t\t\tvalues[routingKeyInfo.indexes[0]],\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn routingKey, nil\n\t}\n\n\t// composite routing key\n\t// Use a stack-allocated backing array to avoid heap allocation for the\n\t// common case where the composite key fits in 256 bytes. Each component\n\t// is encoded as: [2-byte big-endian length][marshaled value][0x00 terminator].\n\tvar backing [256]byte\n\tbuf := backing[:0]\n\tfor i := range routingKeyInfo.indexes {\n\t\tencoded, err := Marshal(\n\t\t\troutingKeyInfo.types[i],\n\t\t\tvalues[routingKeyInfo.indexes[i]],\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tn := len(encoded)\n\t\tbuf = append(buf, byte(n>>8), byte(n))\n\t\tbuf = append(buf, encoded...)\n\t\tbuf = append(buf, 0x00)\n\t}\n\t// Return a copy so the backing array doesn't escape when the result\n\t// is stored beyond this stack frame.\n\troutingKey := make([]byte, len(buf))\n\tcopy(routingKey, buf)\n\treturn routingKey, nil\n}\n\nfunc (b *Batch) borrowForExecution() {\n\t// empty, because Batch has no equivalent of Query.Release()\n\t// that would race with speculative executions.\n}\n\nfunc (b *Batch) releaseAfterExecution() {\n\t// empty, because Batch has no equivalent of Query.Release()\n\t// that would race with speculative executions.\n}\n\n// SetHostID allows to define the host the query should be executed against. If the\n// host was filtered or otherwise unavailable, then the query will error. If an empty\n// string is sent, the default behavior, using the configured HostSelectionPolicy will\n// be used. A hostID can be obtained from HostInfo.HostID() after calling GetHosts().\nfunc (b *Batch) SetHostID(hostID string) *Batch {\n\tb.hostID = hostID\n\treturn b\n}\n\n// GetHostID satisfies ExecutableQuery interface but does noop.\nfunc (b *Batch) GetHostID() string {\n\treturn b.hostID\n}\n\ntype BatchType byte\n\nconst (\n\tLoggedBatch   BatchType = 0\n\tUnloggedBatch BatchType = 1\n\tCounterBatch  BatchType = 2\n)\n\ntype BatchEntry struct {\n\tbinding    func(q *QueryInfo) ([]any, error)\n\tStmt       string\n\tArgs       []any\n\tIdempotent bool\n}\n\ntype ColumnInfo struct {\n\tTypeInfo TypeInfo\n\tKeyspace string\n\tTable    string\n\tName     string\n}\n\nfunc (c ColumnInfo) String() string {\n\treturn fmt.Sprintf(\"[column keyspace=%s table=%s name=%s type=%v]\", c.Keyspace, c.Table, c.Name, c.TypeInfo)\n}\n\n// routing key indexes LRU cache\ntype routingKeyInfoLRU struct {\n\tlru *lru.Cache[string]\n\tmu  sync.Mutex\n}\n\ntype routingKeyInfo struct {\n\tpartitioner Partitioner\n\tkeyspace    string\n\ttable       string\n\tindexes     []int\n\ttypes       []TypeInfo\n\tlwt         bool\n}\n\nfunc (r *routingKeyInfo) String() string {\n\treturn fmt.Sprintf(\"routing key index=%v types=%v\", r.indexes, r.types)\n}\n\nfunc (r *routingKeyInfoLRU) Remove(key string) {\n\tr.mu.Lock()\n\tr.lru.Remove(key)\n\tr.mu.Unlock()\n}\n\n// Max adjusts the maximum size of the cache and cleans up the oldest records if\n// the new max is lower than the previous value. Not concurrency safe.\nfunc (r *routingKeyInfoLRU) Max(max int) {\n\tr.mu.Lock()\n\tfor r.lru.Len() > max {\n\t\tr.lru.RemoveOldest()\n\t}\n\tr.lru.MaxEntries = max\n\tr.mu.Unlock()\n}\n\ntype inflightCachedEntry struct {\n\terr   error\n\tvalue any\n\twg    sync.WaitGroup\n}\n\n// GetHosts return a list of hosts in the ring the driver knows of.\nfunc (s *Session) GetHosts() []*HostInfo {\n\treturn s.hostSource.getHostsList()\n}\n\ntype HostInformation interface {\n\tPeer() net.IP\n\tConnectAddress() net.IP\n\tUntranslatedConnectAddress() net.IP\n\tBroadcastAddress() net.IP\n\tListenAddress() net.IP\n\tRPCAddress() net.IP\n\tPreferredIP() net.IP\n\tDataCenter() string\n\tRack() string\n\tHostID() string\n\tWorkLoad() string\n\tPartitioner() string\n\tClusterName() string\n\tTokens() []string\n\tPort() int\n\tIsUp() bool\n\tScyllaShardAwarePort() uint16\n\tScyllaShardAwarePortTLS() uint16\n\tScyllaShardCount() int\n}\n\ntype HostPoolInfo interface {\n\tGetConnectionCount() int\n\tGetExcessConnectionCount() int\n\tGetShardCount() int\n\tString() string\n\tInFlight() int\n\tHost() HostInformation\n\tIsClosed() bool\n}\n\nfunc (s *Session) GetHostPoolByID(hostID string) HostPoolInfo {\n\thostPool, _ := s.pool.getPoolByHostID(hostID)\n\treturn hostPool\n}\n\nfunc (s *Session) IterateHostPools(iter func(info HostPoolInfo) bool) {\n\ts.pool.iteratePool(iter)\n}\n\ntype ObservedQuery struct {\n\t// Start is a time when the query was attempted\n\tStart time.Time\n\t// End is a time when the query attempt was completed\n\tEnd time.Time\n\t// Err is the error in the query.\n\t// It only tracks network errors or errors of bad cassandra syntax, in particular selects with no match return nil error\n\t// Do not modify the values here, they are shared with multiple goroutines.\n\tErr error\n\t// Host is a reference to the host where the query was executed.\n\tHost *HostInfo\n\t// Metrics is the metrics for this attempt\n\tMetrics   *hostMetrics\n\tKeyspace  string\n\tStatement string\n\t// Values holds a slice of bound values for the query.\n\t// Do not modify the values here, they are shared with multiple goroutines.\n\tValues []any\n\t// Rows is the number of rows in the current iter.\n\t// In paginated queries, rows from previous scans are not counted.\n\t// Rows is not used in batch queries and remains at the default value\n\tRows int\n\t// Attempt is the index of attempt at executing this query.\n\t// The first attempt is number zero and any retries have non-zero attempt number.\n\tAttempt int\n}\n\n// QueryObserver is the interface implemented by query observers / stat collectors.\ntype QueryObserver interface {\n\t// ObserveQuery gets called on every query to cassandra, including all queries in an iterator when paging is enabled.\n\t// It doesn't get called if there is no query because the session is closed or there are no connections available.\n\t// The error reported only shows query errors, i.e. if a SELECT is valid but finds no matches it will be nil.\n\tObserveQuery(context.Context, ObservedQuery)\n}\n\ntype ObservedBatch struct {\n\t// Start is a time when the batch was attempted\n\tStart time.Time\n\t// End is a time when the batch attempt was completed\n\tEnd time.Time\n\t// Err is the error in the batch query.\n\t// It only tracks network errors or errors of bad cassandra syntax, in particular selects with no match return nil error\n\tErr error\n\t// Host is a reference to the host where the batch was executed.\n\tHost *HostInfo\n\t// Metrics is the metrics for this attempt\n\tMetrics    *hostMetrics\n\tKeyspace   string\n\tStatements []string\n\t// Values holds a slice of bound values for each statement.\n\t// Values[i] are bound values passed to Statements[i].\n\t// Do not modify the values here, they are shared with multiple goroutines.\n\tValues [][]any\n\t// Attempt is the index of attempt at executing this query.\n\t// The first attempt is number zero and any retries have non-zero attempt number.\n\tAttempt int\n}\n\n// BatchObserver is the interface implemented by batch observers / stat collectors.\ntype BatchObserver interface {\n\t// ObserveBatch gets called on every batch query to cassandra.\n\t// It also gets called once for each query in a batch.\n\t// It doesn't get called if there is no query because the session is closed or there are no connections available.\n\t// The error reported only shows query errors, i.e. if a SELECT is valid but finds no matches it will be nil.\n\t// Unlike QueryObserver.ObserveQuery it does no reporting on rows read.\n\tObserveBatch(context.Context, ObservedBatch)\n}\n\ntype ObservedConnect struct {\n\t// Host is the information about the host about to connect\n\tHost *HostInfo\n\n\tStart time.Time // time immediately before the dial is called\n\tEnd   time.Time // time immediately after the dial returned\n\n\t// Err is the connection error (if any)\n\tErr error\n}\n\n// ConnectObserver is the interface implemented by connect observers / stat collectors.\ntype ConnectObserver interface {\n\t// ObserveConnect gets called when a new connection to cassandra is made.\n\tObserveConnect(ObservedConnect)\n}\n\ntype Error struct {\n\tMessage string\n\tCode    int\n}\n\nfunc (e Error) Error() string {\n\treturn e.Message\n}\n\nvar (\n\tErrNotFound             = errors.New(\"not found\")\n\tErrUnavailable          = errors.New(\"unavailable\")\n\tErrUnsupported          = errors.New(\"feature not supported\")\n\tErrTooManyStmts         = errors.New(\"too many statements\")\n\tErrUseStmt              = errors.New(\"use statements aren't supported. Please see https://github.com/apache/cassandra-gocql-driver for explanation.\")\n\tErrSessionClosed        = errors.New(\"session has been closed\")\n\tErrNoConnections        = errors.New(\"gocql: no hosts available in the pool\")\n\tErrNoKeyspace           = errors.New(\"no keyspace provided\")\n\tErrNoTable              = errors.New(\"no table name provided\")\n\tErrKeyspaceDoesNotExist = errors.New(\"keyspace does not exist\")\n\tErrNoMetadata           = errors.New(\"no metadata available\")\n\tErrTabletsNotUsed       = errors.New(\"tablets not used\")\n\tErrSessionNotReady      = errors.New(\"session is not ready yet\")\n)\n\ntype ErrProtocol struct{ error }\n\nfunc NewErrProtocol(format string, args ...any) error {\n\treturn ErrProtocol{error: fmt.Errorf(format, args...)}\n}\n\n// BatchSizeMaximum is the maximum number of statements a batch operation can have.\n// This limit is set by cassandra and could change in the future.\nconst BatchSizeMaximum = 65535\n"
  },
  {
    "path": "session_connect_test.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n)\n\ntype OneConnTestServer struct {\n\tErr        error\n\tlistener   net.Listener\n\tacceptChan chan struct{}\n\tAddr       net.IP\n\tPort       int\n\tmu         sync.Mutex\n\tclosed     bool\n}\n\nfunc NewOneConnTestServer() (*OneConnTestServer, error) {\n\tlstn, err := net.Listen(\"tcp4\", \"localhost:0\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taddr, port := parseAddressPort(lstn.Addr().String())\n\treturn &OneConnTestServer{\n\t\tlistener:   lstn,\n\t\tacceptChan: make(chan struct{}),\n\t\tAddr:       addr,\n\t\tPort:       port,\n\t}, nil\n}\n\nfunc (c *OneConnTestServer) Accepted() chan struct{} {\n\treturn c.acceptChan\n}\n\nfunc (c *OneConnTestServer) Close() {\n\tc.lockedClose()\n}\n\nfunc (c *OneConnTestServer) Serve() {\n\tconn, err := c.listener.Accept()\n\tc.Err = err\n\tif conn != nil {\n\t\tconn.Close()\n\t}\n\tc.lockedClose()\n}\n\nfunc (c *OneConnTestServer) lockedClose() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif !c.closed {\n\t\tclose(c.acceptChan)\n\t\tc.listener.Close()\n\t\tc.closed = true\n\t}\n}\n\nfunc parseAddressPort(hostPort string) (net.IP, int) {\n\thost, portStr, err := net.SplitHostPort(hostPort)\n\tif err != nil {\n\t\treturn net.ParseIP(\"\"), 0\n\t}\n\tport, _ := strconv.Atoi(portStr)\n\treturn net.ParseIP(host), port\n}\n"
  },
  {
    "path": "session_event_bus_integration_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/events\"\n)\n\n// WARNING: This test must NOT use t.Parallel(). It listens for schema events\n// and concurrent DDL from parallel tests could cause spurious matches.\n//\n//nolint:paralleltest // listens for schema events from the global control connection\nfunc TestSessionEventBusReceivesSchemaChangeEvent(t *testing.T) {\n\tcluster := createCluster()\n\tcluster.Events.DisableSchemaEvents = false\n\n\tsess, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create session: %v\", err)\n\t}\n\tdefer sess.Close()\n\n\tkeyspace := fmt.Sprintf(\"eventbus_schema_%d\", time.Now().UnixNano())\n\n\t// Filter events to the specific keyspace this test creates, so that\n\t// concurrent DDL from parallel tests does not cause spurious matches.\n\tsub := sess.SubscribeToEvents(\"schema-event\", 10, func(ev events.Event) bool {\n\t\tif ks, ok := ev.(*events.SchemaChangeKeyspaceEvent); ok {\n\t\t\treturn ks.Keyspace == keyspace\n\t\t}\n\t\treturn false\n\t})\n\tdefer sub.Stop()\n\n\tcreateStmt := fmt.Sprintf(`CREATE KEYSPACE %s WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 1}`, keyspace)\n\tif err := sess.Query(createStmt).Exec(); err != nil {\n\t\tt.Fatalf(\"create keyspace: %v\", err)\n\t}\n\tdefer sess.Query(\"DROP KEYSPACE \" + keyspace).Exec()\n\n\tselect {\n\tcase ev := <-sub.Events():\n\t\tif _, ok := ev.(*events.SchemaChangeKeyspaceEvent); !ok {\n\t\t\tt.Fatalf(\"unexpected event type: %T\", ev)\n\t\t}\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"timeout waiting for schema change event\")\n\t}\n}\n\nfunc TestSessionEventBusReceivesControlReconnectEvent(t *testing.T) {\n\tt.Parallel()\n\n\tcluster := createCluster()\n\tcluster.Events.DisableTopologyEvents = true\n\tcluster.Events.DisableNodeStatusEvents = true\n\n\tsess, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create session: %v\", err)\n\t}\n\tdefer sess.Close()\n\n\tsub := sess.SubscribeToEvents(\"control-reconnect\", 10, func(ev events.Event) bool {\n\t\treturn ev.Type() == events.SessionEventTypeControlConnectionRecreated\n\t})\n\tdefer sub.Stop()\n\n\tif err := sess.control.reconnect(); err != nil {\n\t\tt.Fatalf(\"control reconnect: %v\", err)\n\t}\n\n\tselect {\n\tcase ev := <-sub.Events():\n\t\tif _, ok := ev.(*events.ControlConnectionRecreatedEvent); !ok {\n\t\t\tt.Fatalf(\"unexpected event type: %T\", ev)\n\t\t}\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"timeout waiting for control reconnect event\")\n\t}\n}\n"
  },
  {
    "path": "session_event_bus_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage gocql\n\nimport (\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/events\"\n\t\"github.com/gocql/gocql/internal/eventbus\"\n)\n\nfunc TestSessionEventBusPublishesEvents(t *testing.T) {\n\ts := &Session{\n\t\teventBus: eventbus.New[events.Event](eventbus.EventBusConfig{\n\t\t\tInputEventsQueueSize: 1,\n\t\t}, nil),\n\t\tlogger: &nopLogger{},\n\t}\n\n\tif err := s.eventBus.Start(); err != nil {\n\t\tt.Fatalf(\"starting event bus: %v\", err)\n\t}\n\tdefer s.eventBus.Stop()\n\n\tsub := s.SubscribeToEvents(\"test\", 1, nil)\n\tdefer sub.Stop()\n\n\tev := &events.StatusChangeEvent{\n\t\tChange: \"UP\",\n\t\tHost:   net.ParseIP(\"127.0.0.1\"),\n\t\tPort:   9042,\n\t}\n\n\ts.publishEvent(ev)\n\n\tselect {\n\tcase received := <-sub.Events():\n\t\tif received != ev {\n\t\t\tt.Fatalf(\"unexpected event pointer: got %p want %p\", received, ev)\n\t\t}\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"timeout waiting for event\")\n\t}\n}\n"
  },
  {
    "path": "session_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestSessionAPI(t *testing.T) {\n\tt.Parallel()\n\n\tcfg := &ClusterConfig{}\n\n\ts := &Session{\n\t\tcfg:    *cfg,\n\t\tcons:   Quorum,\n\t\tpolicy: RoundRobinHostPolicy(),\n\t\tlogger: cfg.logger(),\n\t}\n\n\ts.pool = cfg.PoolConfig.buildPool(s)\n\ts.executor = &queryExecutor{\n\t\tpool:   s.pool,\n\t\tpolicy: s.policy,\n\t}\n\tdefer s.Close()\n\n\ts.SetConsistency(All)\n\tif s.cons != All {\n\t\tt.Fatalf(\"expected consistency 'All', got '%v'\", s.cons)\n\t}\n\n\ts.SetPageSize(100)\n\tif s.pageSize != 100 {\n\t\tt.Fatalf(\"expected pageSize 100, got %v\", s.pageSize)\n\t}\n\n\ts.SetPrefetch(0.75)\n\tif s.prefetch != 0.75 {\n\t\tt.Fatalf(\"expceted prefetch 0.75, got %v\", s.prefetch)\n\t}\n\n\ttrace := NewTracer(nil)\n\n\ts.SetTrace(trace)\n\tif s.trace != trace {\n\t\tt.Fatalf(\"expected tracer '%v',got '%v'\", trace, s.trace)\n\t}\n\n\tqry := s.Query(\"test\", 1)\n\tif v, ok := qry.values[0].(int); !ok {\n\t\tt.Fatalf(\"expected qry.values[0] to be an int, got %v\", qry.values[0])\n\t} else if v != 1 {\n\t\tt.Fatalf(\"expceted qry.values[0] to be 1, got %v\", v)\n\t} else if qry.stmt != \"test\" {\n\t\tt.Fatalf(\"expected qry.stmt to be 'test', got '%v'\", qry.stmt)\n\t}\n\n\tboundQry := s.Bind(\"test\", func(q *QueryInfo) ([]any, error) {\n\t\treturn nil, nil\n\t})\n\tif boundQry.binding == nil {\n\t\tt.Fatal(\"expected qry.binding to be defined, got nil\")\n\t} else if boundQry.stmt != \"test\" {\n\t\tt.Fatalf(\"expected qry.stmt to be 'test', got '%v'\", boundQry.stmt)\n\t}\n\n\titr := s.executeQuery(qry)\n\tif itr.err != ErrSessionNotReady {\n\t\tt.Fatalf(\"expected itr.err to be '%v', got '%v'\", ErrSessionNotReady, itr.err)\n\t}\n\n\ttestBatch := s.Batch(LoggedBatch)\n\ttestBatch.Query(\"test\")\n\terr := s.ExecuteBatch(testBatch)\n\n\tif err != ErrSessionNotReady {\n\t\tt.Fatalf(\"expected session.ExecuteBatch to return '%v', got '%v'\", ErrSessionNotReady, err)\n\t}\n\n\ts.Close()\n\tif !s.Closed() {\n\t\tt.Fatal(\"expected s.Closed() to be true, got false\")\n\t}\n\t//Should just return cleanly\n\ts.Close()\n\n\terr = s.ExecuteBatch(testBatch)\n\tif err != ErrSessionClosed {\n\t\tt.Fatalf(\"expected session.ExecuteBatch to return '%v', got '%v'\", ErrSessionClosed, err)\n\t}\n}\n\ntype funcQueryObserver func(context.Context, ObservedQuery)\n\nfunc (f funcQueryObserver) ObserveQuery(ctx context.Context, o ObservedQuery) {\n\tf(ctx, o)\n}\n\nfunc TestQueryBasicAPI(t *testing.T) {\n\tt.Parallel()\n\n\tqry := &Query{routingInfo: &queryRoutingInfo{}}\n\n\t// Initiate host\n\tip := \"127.0.0.1\"\n\thostID := TimeUUID()\n\n\tqry.metrics = preFilledQueryMetrics(map[UUID]*hostMetrics{hostID: {Attempts: 0, TotalLatency: 0}})\n\tif qry.Latency() != 0 {\n\t\tt.Fatalf(\"expected Query.Latency() to return 0, got %v\", qry.Latency())\n\t}\n\n\tqry.metrics = preFilledQueryMetrics(map[UUID]*hostMetrics{hostID: {Attempts: 2, TotalLatency: 4}})\n\tif qry.Attempts() != 2 {\n\t\tt.Fatalf(\"expected Query.Attempts() to return 2, got %v\", qry.Attempts())\n\t}\n\tif qry.Latency() != 2 {\n\t\tt.Fatalf(\"expected Query.Latency() to return 2, got %v\", qry.Latency())\n\t}\n\n\tqry.AddAttempts(2, &HostInfo{hostname: ip, connectAddress: net.ParseIP(ip), port: 9042})\n\tif qry.Attempts() != 4 {\n\t\tt.Fatalf(\"expected Query.Attempts() to return 4, got %v\", qry.Attempts())\n\t}\n\n\tqry.Consistency(All)\n\tif qry.GetConsistency() != All {\n\t\tt.Fatalf(\"expected Query.GetConsistency to return 'All', got '%s'\", qry.GetConsistency())\n\t}\n\n\tqry.Consistency(LocalSerial)\n\tif qry.GetConsistency() != LocalSerial {\n\t\tt.Fatalf(\"expected Query.GetConsistency to return 'LocalSerial', got '%s'\", qry.GetConsistency())\n\t}\n\n\tqry.SerialConsistency(LocalSerial)\n\tif qry.GetConsistency() != LocalSerial {\n\t\tt.Fatalf(\"expected Query.GetConsistency to return 'LocalSerial', got '%s'\", qry.GetConsistency())\n\t}\n\n\ttrace := NewTracer(nil)\n\tqry.Trace(trace)\n\tif qry.trace != trace {\n\t\tt.Fatalf(\"expected Query.Trace to be '%v', got '%v'\", trace, qry.trace)\n\t}\n\n\tobserver := funcQueryObserver(func(context.Context, ObservedQuery) {})\n\tqry.Observer(observer)\n\tif qry.observer == nil { // can't compare func to func, checking not nil instead\n\t\tt.Fatal(\"expected Query.QueryObserver to be set, got nil\")\n\t}\n\n\tqry.PageSize(10)\n\tif qry.pageSize != 10 {\n\t\tt.Fatalf(\"expected Query.PageSize to be 10, got %v\", qry.pageSize)\n\t}\n\n\tqry.Prefetch(0.75)\n\tif qry.prefetch != 0.75 {\n\t\tt.Fatalf(\"expected Query.Prefetch to be 0.75, got %v\", qry.prefetch)\n\t}\n\n\trt := &SimpleRetryPolicy{NumRetries: 3}\n\tif qry.RetryPolicy(rt); qry.rt != rt {\n\t\tt.Fatalf(\"expected Query.RetryPolicy to be '%v', got '%v'\", rt, qry.rt)\n\t}\n\n\tqry.Bind(qry)\n\tif qry.values[0] != qry {\n\t\tt.Fatalf(\"expected Query.Values[0] to be '%v', got '%v'\", qry, qry.values[0])\n\t}\n}\n\nfunc TestQueryShouldPrepare(t *testing.T) {\n\tt.Parallel()\n\n\ttoPrepare := []string{\"select * \", \"INSERT INTO\", \"update table\", \"delete from\", \"begin batch\"}\n\tcantPrepare := []string{\"create table\", \"USE table\", \"LIST keyspaces\", \"alter table\", \"drop table\", \"grant user\", \"revoke user\"}\n\n\tfor i := 0; i < len(toPrepare); i++ {\n\t\tq := &Query{stmt: toPrepare[i], routingInfo: &queryRoutingInfo{}}\n\t\tif !q.shouldPrepare() {\n\t\t\tt.Fatalf(\"expected Query.shouldPrepare to return true, got false for statement '%v'\", toPrepare[i])\n\t\t}\n\t}\n\n\tfor i := 0; i < len(cantPrepare); i++ {\n\t\tq := &Query{stmt: cantPrepare[i], routingInfo: &queryRoutingInfo{}}\n\t\tif q.shouldPrepare() {\n\t\t\tt.Fatalf(\"expected Query.shouldPrepare to return false, got true for statement '%v'\", cantPrepare[i])\n\t\t}\n\t}\n}\n\nfunc TestBatchBasicAPI(t *testing.T) {\n\tt.Parallel()\n\n\tcfg := &ClusterConfig{RetryPolicy: &SimpleRetryPolicy{NumRetries: 2}}\n\n\ts := &Session{\n\t\tcfg:    *cfg,\n\t\tcons:   Quorum,\n\t\tlogger: cfg.logger(),\n\t}\n\tdefer s.Close()\n\n\ts.pool = cfg.PoolConfig.buildPool(s)\n\n\t// Test UnloggedBatch\n\tb := s.Batch(UnloggedBatch)\n\tif b.Type != UnloggedBatch {\n\t\tt.Fatalf(\"expceted batch.Type to be '%v', got '%v'\", UnloggedBatch, b.Type)\n\t} else if b.rt != cfg.RetryPolicy {\n\t\tt.Fatalf(\"expceted batch.RetryPolicy to be '%v', got '%v'\", cfg.RetryPolicy, b.rt)\n\t}\n\n\t// Test LoggedBatch\n\tb = s.Batch(LoggedBatch)\n\tif b.Type != LoggedBatch {\n\t\tt.Fatalf(\"expected batch.Type to be '%v', got '%v'\", LoggedBatch, b.Type)\n\t}\n\n\tip := \"127.0.0.1\"\n\thostID := TimeUUID()\n\n\t// Test attempts\n\tb.metrics = preFilledQueryMetrics(map[UUID]*hostMetrics{hostID: {Attempts: 1}})\n\tif b.Attempts() != 1 {\n\t\tt.Fatalf(\"expected batch.Attempts() to return %v, got %v\", 1, b.Attempts())\n\t}\n\n\tb.AddAttempts(2, &HostInfo{hostname: ip, connectAddress: net.ParseIP(ip), port: 9042})\n\tif b.Attempts() != 3 {\n\t\tt.Fatalf(\"expected batch.Attempts() to return %v, got %v\", 3, b.Attempts())\n\t}\n\n\t// Test latency\n\tif b.Latency() != 0 {\n\t\tt.Fatalf(\"expected batch.Latency() to be 0, got %v\", b.Latency())\n\t}\n\n\tb.metrics = preFilledQueryMetrics(map[UUID]*hostMetrics{hostID: {Attempts: 1, TotalLatency: 4}})\n\tif b.Latency() != 4 {\n\t\tt.Fatalf(\"expected batch.Latency() to return %v, got %v\", 4, b.Latency())\n\t}\n\n\t// Test Consistency\n\tb.Cons = One\n\tif b.GetConsistency() != One {\n\t\tt.Fatalf(\"expected batch.GetConsistency() to return 'One', got '%s'\", b.GetConsistency())\n\t}\n\n\ttrace := NewTracer(nil)\n\tb.Trace(trace)\n\tif b.trace != trace {\n\t\tt.Fatalf(\"expected batch.Trace to be '%v', got '%v'\", trace, b.trace)\n\t}\n\n\t// Test batch.Query()\n\tb.Query(\"test\", 1)\n\tif b.Entries[0].Stmt != \"test\" {\n\t\tt.Fatalf(\"expected batch.Entries[0].Statement to be 'test', got '%v'\", b.Entries[0].Stmt)\n\t} else if b.Entries[0].Args[0].(int) != 1 {\n\t\tt.Fatalf(\"expected batch.Entries[0].Args[0] to be 1, got %v\", b.Entries[0].Args[0])\n\t}\n\n\tb.Bind(\"test2\", func(q *QueryInfo) ([]any, error) {\n\t\treturn nil, nil\n\t})\n\n\tif b.Entries[1].Stmt != \"test2\" {\n\t\tt.Fatalf(\"expected batch.Entries[1].Statement to be 'test2', got '%v'\", b.Entries[1].Stmt)\n\t} else if b.Entries[1].binding == nil {\n\t\tt.Fatal(\"expected batch.Entries[1].binding to be defined, got nil\")\n\t}\n\n\t// Test RetryPolicy\n\tr := &SimpleRetryPolicy{NumRetries: 4}\n\n\tb.RetryPolicy(r)\n\tif b.rt != r {\n\t\tt.Fatalf(\"expected batch.RetryPolicy to be '%v', got '%v'\", r, b.rt)\n\t}\n\n\tif b.Size() != 2 {\n\t\tt.Fatalf(\"expected batch.Size() to return 2, got %v\", b.Size())\n\t}\n\n}\n\nfunc TestConsistencyNames(t *testing.T) {\n\tt.Parallel()\n\n\tnames := map[fmt.Stringer]string{\n\t\tAny:         \"ANY\",\n\t\tOne:         \"ONE\",\n\t\tTwo:         \"TWO\",\n\t\tThree:       \"THREE\",\n\t\tQuorum:      \"QUORUM\",\n\t\tAll:         \"ALL\",\n\t\tLocalQuorum: \"LOCAL_QUORUM\",\n\t\tEachQuorum:  \"EACH_QUORUM\",\n\t\tSerial:      \"SERIAL\",\n\t\tLocalSerial: \"LOCAL_SERIAL\",\n\t\tLocalOne:    \"LOCAL_ONE\",\n\t}\n\n\tfor k, v := range names {\n\t\tif k.String() != v {\n\t\t\tt.Fatalf(\"expected '%v', got '%v'\", v, k.String())\n\t\t}\n\t}\n}\n\nfunc TestIsUseStatement(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []struct {\n\t\tinput string\n\t\texp   bool\n\t}{\n\t\t{\"USE foo\", true},\n\t\t{\"USe foo\", true},\n\t\t{\"UsE foo\", true},\n\t\t{\"Use foo\", true},\n\t\t{\"uSE foo\", true},\n\t\t{\"uSe foo\", true},\n\t\t{\"usE foo\", true},\n\t\t{\"use foo\", true},\n\t\t{\"SELECT \", false},\n\t\t{\"UPDATE \", false},\n\t\t{\"INSERT \", false},\n\t\t{\"\", false},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tv := isUseStatement(tc.input)\n\t\tif v != tc.exp {\n\t\t\tt.Fatalf(\"expected %v but got %v for statement %q\", tc.exp, v, tc.input)\n\t\t}\n\t}\n}\n\ntype simpleTestRetryPolycy struct {\n\tRetryType  RetryType\n\tNumRetries int\n}\n\nfunc (p *simpleTestRetryPolycy) Attempt(q RetryableQuery) bool {\n\treturn q.Attempts() <= p.NumRetries\n}\n\nfunc (p *simpleTestRetryPolycy) GetRetryType(error) RetryType {\n\treturn p.RetryType\n}\n\n// TestRetryType_IgnoreRethrow verify that with Ignore/Rethrow retry types:\n// - retries stopped\n// - return error is not nil on Rethrow, Ignore\n// - observed error is not nil\nfunc TestRetryType_IgnoreRethrow(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tvar observedErr error\n\tvar observedAttempts int\n\n\tresetObserved := func() {\n\t\tobservedErr = nil\n\t\tobservedAttempts = 0\n\t}\n\n\tobserver := funcQueryObserver(func(ctx context.Context, o ObservedQuery) {\n\t\tobservedErr = o.Err\n\t\tobservedAttempts++\n\t})\n\n\tfor i, caseParams := range []struct {\n\t\tretries   int\n\t\tretryType RetryType\n\t}{\n\t\t{0, Ignore},  // check that stops retries\n\t\t{1, Ignore},  // check that stops retries\n\t\t{0, Rethrow}, // check that stops retries\n\t\t{1, Rethrow}, // check that stops retries\n\t} {\n\t\tretryPolicy := &simpleTestRetryPolycy{RetryType: caseParams.retryType, NumRetries: caseParams.retries}\n\n\t\terr := session.Query(\"INSERT INTO gocql_test.invalid_table(value) VALUES(1)\").Idempotent(true).RetryPolicy(retryPolicy).Observer(observer).Exec()\n\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"case %d [%v] Expected unconfigured table error, got: nil\", i, caseParams.retryType)\n\t\t}\n\n\t\tif observedErr == nil {\n\t\t\tt.Fatalf(\"case %d expected unconfigured table error in Obserer, got: nil\", i)\n\t\t}\n\n\t\texpectedAttempts := caseParams.retries\n\t\tif expectedAttempts == 0 {\n\t\t\texpectedAttempts = 1\n\t\t}\n\t\tif observedAttempts != expectedAttempts {\n\t\t\tt.Fatalf(\"case %d expected %d attempts, got: %d\", i, expectedAttempts, observedAttempts)\n\t\t}\n\n\t\tresetObserved()\n\t}\n}\n\ntype sessionCache struct {\n\torig       tls.ClientSessionCache\n\tvalues     map[string][][]byte\n\tcaches     map[string][]int64\n\tvaluesLock sync.Mutex\n}\n\nfunc (c *sessionCache) Get(sessionKey string) (session *tls.ClientSessionState, ok bool) {\n\treturn c.orig.Get(sessionKey)\n}\n\nfunc (c *sessionCache) Put(sessionKey string, cs *tls.ClientSessionState) {\n\tticket, _, err := cs.ResumptionState()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(ticket) == 0 {\n\t\tpanic(\"ticket should not be empty\")\n\t}\n\tc.valuesLock.Lock()\n\tc.values[sessionKey] = append(c.values[sessionKey], ticket)\n\tc.valuesLock.Unlock()\n\tc.orig.Put(sessionKey, cs)\n}\n\nfunc (c *sessionCache) NumberOfTickets() int {\n\tc.valuesLock.Lock()\n\tdefer c.valuesLock.Unlock()\n\ttotal := 0\n\tfor _, tickets := range c.values {\n\t\ttotal += len(tickets)\n\t}\n\treturn total\n}\n\nfunc newSessionCache() *sessionCache {\n\treturn &sessionCache{\n\t\torig:       tls.NewLRUClientSessionCache(1024),\n\t\tvalues:     make(map[string][][]byte),\n\t\tcaches:     make(map[string][]int64),\n\t\tvaluesLock: sync.Mutex{},\n\t}\n}\n\nfunc withSessionCache(cache tls.ClientSessionCache) func(config *ClusterConfig) {\n\treturn func(config *ClusterConfig) {\n\t\tconfig.SslOpts = &SslOptions{\n\t\t\tEnableHostVerification: false,\n\t\t\tConfig: &tls.Config{\n\t\t\t\tClientSessionCache: cache,\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t}\n\t}\n}\n\nfunc TestTLSTicketResumption(t *testing.T) {\n\tt.Parallel()\n\n\tt.Skip(\"TLS ticket resumption is only supported by 2025.2 and later\")\n\n\tc := newSessionCache()\n\tsession := createSession(t, withSessionCache(c))\n\tdefer session.Close()\n\n\twaitAllConnectionsOpened := func() error {\n\t\tprintln(\"wait all connections opened\")\n\t\tdefer println(\"end of wait all connections closed\")\n\t\tendtime := time.Now().UTC().Add(time.Second * 10)\n\t\tfor {\n\t\t\tif time.Now().UTC().After(endtime) {\n\t\t\t\treturn fmt.Errorf(\"timed out waiting for all connections opened\")\n\t\t\t}\n\t\t\tmissing, err := session.MissingConnections()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to get missing connections count: %w\", err)\n\t\t\t}\n\t\t\tif missing == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t}\n\t}\n\n\tif err := waitAllConnectionsOpened(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttickets := c.NumberOfTickets()\n\tif tickets == 0 {\n\t\tt.Fatal(\"no tickets learned, which means that server does not support TLS tickets\")\n\t}\n\n\tsession.CloseAllConnections()\n\tif err := waitAllConnectionsOpened(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tnewTickets1 := c.NumberOfTickets()\n\n\tsession.CloseAllConnections()\n\tif err := waitAllConnectionsOpened(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tnewTickets2 := c.NumberOfTickets()\n\n\tif newTickets1 != tickets {\n\t\tt.Fatalf(\"new tickets learned, it looks like tls tickets where not reused: new %d, was %d\", newTickets1, tickets)\n\t}\n\tif newTickets2 != tickets {\n\t\tt.Fatalf(\"new tickets learned, it looks like tls tickets where not reused: new %d, was %d\", newTickets2, tickets)\n\t}\n}\n"
  },
  {
    "path": "session_unit_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"reflect\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/tablets\"\n)\n\nfunc TestShouldPrepareNonDML(t *testing.T) {\n\tt.Parallel()\n\n\tnonDMLStatements := []string{\n\t\t\"CREATE TABLE ks.tbl (id int PRIMARY KEY)\",\n\t\t\"ALTER TABLE ks.tbl ADD col text\",\n\t\t\"DROP TABLE ks.tbl\",\n\t\t\"TRUNCATE ks.tbl\",\n\t\t\"CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy'}\",\n\t\t\"DROP KEYSPACE ks\",\n\t\t\"GRANT SELECT ON ks.tbl TO user1\",\n\t\t\"USE ks\",\n\t}\n\n\tfor _, stmt := range nonDMLStatements {\n\t\tt.Run(stmt, func(t *testing.T) {\n\t\t\tq := &Query{stmt: stmt, routingInfo: &queryRoutingInfo{}}\n\t\t\tif q.shouldPrepare() {\n\t\t\t\tt.Errorf(\"shouldPrepare(%q) = true, want false\", stmt)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestShouldPrepareDML(t *testing.T) {\n\tt.Parallel()\n\n\tdmlStatements := []string{\n\t\t\"SELECT * FROM ks.tbl\",\n\t\t\"INSERT INTO ks.tbl (id) VALUES (?)\",\n\t\t\"UPDATE ks.tbl SET col = ? WHERE id = ?\",\n\t\t\"DELETE FROM ks.tbl WHERE id = ?\",\n\t\t\"BEGIN BATCH INSERT INTO ks.tbl (id) VALUES (1) APPLY BATCH\",\n\t\t\"BEGIN BATCH INSERT INTO ks.tbl (id) VALUES (1) APPLY BATCH;\",\n\t\t\"BEGIN UNLOGGED BATCH INSERT INTO ks.tbl (id) VALUES (1) APPLY BATCH\",\n\t\t\"  SELECT * FROM ks.tbl\",\n\t\t\"\\t INSERT INTO ks.tbl (id) VALUES (?)\",\n\t\t\"\\u00a0SELECT * FROM ks.tbl\",\n\t}\n\n\tfor _, stmt := range dmlStatements {\n\t\tt.Run(stmt, func(t *testing.T) {\n\t\t\tq := &Query{stmt: stmt, routingInfo: &queryRoutingInfo{}}\n\t\t\tif !q.shouldPrepare() {\n\t\t\t\tt.Errorf(\"shouldPrepare(%q) = false, want true\", stmt)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAsyncSessionInit(t *testing.T) {\n\tt.Parallel()\n\n\t// Build a 3 node cluster to test host metric mapping\n\tvar addresses = []string{\n\t\t\"127.0.0.1\",\n\t\t\"127.0.0.2\",\n\t\t\"127.0.0.3\",\n\t}\n\t// only build 1 of the servers so that we can test not connecting to the last\n\t// one\n\tsrv := NewTestServerWithAddress(addresses[0]+\":0\", t, defaultProto, context.Background())\n\tdefer srv.Stop()\n\n\t// just choose any port\n\tcluster := testCluster(defaultProto, srv.Address, addresses[1]+\":9999\", addresses[2]+\":9999\")\n\tcluster.PoolConfig.HostSelectionPolicy = SingleHostReadyPolicy(RoundRobinHostPolicy())\n\tdb, err := cluster.CreateSession()\n\tif err != nil {\n\t\tt.Fatalf(\"NewCluster: %v\", err)\n\t}\n\tdefer db.Close()\n\n\t// make sure the session works\n\tif err := db.Query(\"void\").Exec(); err != nil {\n\t\tt.Fatalf(\"unexpected error from void\")\n\t}\n}\n\nfunc TestExtractKeyspaceTableFromDDL(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname      string\n\t\tddl       string\n\t\twantKS    string\n\t\twantTable string\n\t}{\n\t\t{\n\t\t\tname:      \"simple_create_table\",\n\t\t\tddl:       \"CREATE TABLE gocql_test.my_table (id int PRIMARY KEY)\",\n\t\t\twantKS:    \"gocql_test\",\n\t\t\twantTable: \"my_table\",\n\t\t},\n\t\t{\n\t\t\tname:      \"create_table_if_not_exists\",\n\t\t\tddl:       \"CREATE TABLE IF NOT EXISTS gocql_test.my_table (id int PRIMARY KEY)\",\n\t\t\twantKS:    \"gocql_test\",\n\t\t\twantTable: \"my_table\",\n\t\t},\n\t\t{\n\t\t\tname:      \"lowercase_create_table\",\n\t\t\tddl:       \"create table gocql_test.my_table (id int primary key)\",\n\t\t\twantKS:    \"gocql_test\",\n\t\t\twantTable: \"my_table\",\n\t\t},\n\t\t{\n\t\t\tname:      \"mixed_case_if_not_exists\",\n\t\t\tddl:       \"Create Table If Not Exists gocql_test.my_table (id int PRIMARY KEY)\",\n\t\t\twantKS:    \"gocql_test\",\n\t\t\twantTable: \"my_table\",\n\t\t},\n\t\t{\n\t\t\tname:      \"no_keyspace_prefix\",\n\t\t\tddl:       \"CREATE TABLE my_table (id int PRIMARY KEY)\",\n\t\t\twantKS:    \"\",\n\t\t\twantTable: \"\",\n\t\t},\n\t\t{\n\t\t\tname:      \"empty_string\",\n\t\t\tddl:       \"\",\n\t\t\twantKS:    \"\",\n\t\t\twantTable: \"\",\n\t\t},\n\t\t{\n\t\t\tname:      \"create_keyspace_ignored\",\n\t\t\tddl:       \"CREATE KEYSPACE my_ks WITH replication = {}\",\n\t\t\twantKS:    \"\",\n\t\t\twantTable: \"\",\n\t\t},\n\t\t{\n\t\t\tname:      \"materialized_view_ignored\",\n\t\t\tddl:       \"CREATE MATERIALIZED VIEW my_ks.my_view AS SELECT * FROM my_ks.my_table WHERE id IS NOT NULL PRIMARY KEY (id)\",\n\t\t\twantKS:    \"\",\n\t\t\twantTable: \"\",\n\t\t},\n\t\t{\n\t\t\tname:      \"multiline_ddl\",\n\t\t\tddl:       \"CREATE TABLE gocql_test.test_single_routing_key (\\n\\tfirst_id int,\\n\\tsecond_id int,\\n\\tPRIMARY KEY (first_id, second_id)\\n)\",\n\t\t\twantKS:    \"gocql_test\",\n\t\t\twantTable: \"test_single_routing_key\",\n\t\t},\n\t\t{\n\t\t\tname:      \"tablets_disabled_keyspace\",\n\t\t\tddl:       \"CREATE TABLE gocql_test_tablets_disabled.my_table (id int PRIMARY KEY)\",\n\t\t\twantKS:    \"gocql_test_tablets_disabled\",\n\t\t\twantTable: \"my_table\",\n\t\t},\n\t\t{\n\t\t\tname:      \"drop_table_if_exists\",\n\t\t\tddl:       \"DROP TABLE IF EXISTS gocql_test.my_table\",\n\t\t\twantKS:    \"gocql_test\",\n\t\t\twantTable: \"my_table\",\n\t\t},\n\t\t{\n\t\t\tname:      \"drop_table_if_exists_lowercase\",\n\t\t\tddl:       \"drop table if exists gocql_test.my_table\",\n\t\t\twantKS:    \"gocql_test\",\n\t\t\twantTable: \"my_table\",\n\t\t},\n\t\t{\n\t\t\tname:      \"drop_table_no_keyspace\",\n\t\t\tddl:       \"DROP TABLE IF EXISTS my_table\",\n\t\t\twantKS:    \"\",\n\t\t\twantTable: \"\",\n\t\t},\n\t\t{\n\t\t\tname:      \"table_with_space_before_paren\",\n\t\t\tddl:       \"CREATE TABLE gocql_test.t1 (id int PRIMARY KEY)\",\n\t\t\twantKS:    \"gocql_test\",\n\t\t\twantTable: \"t1\",\n\t\t},\n\t\t{\n\t\t\tname:      \"drop_keyspace_returns_empty\",\n\t\t\tddl:       \"DROP KEYSPACE IF EXISTS gocql_test\",\n\t\t\twantKS:    \"\",\n\t\t\twantTable: \"\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgotKS, gotTable := extractKeyspaceTableFromDDL(tt.ddl)\n\t\t\tif gotKS != tt.wantKS {\n\t\t\t\tt.Errorf(\"extractKeyspaceTableFromDDL(%q) keyspace = %q, want %q\", tt.ddl, gotKS, tt.wantKS)\n\t\t\t}\n\t\t\tif gotTable != tt.wantTable {\n\t\t\t\tt.Errorf(\"extractKeyspaceTableFromDDL(%q) table = %q, want %q\", tt.ddl, gotTable, tt.wantTable)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestTableMetadataAfterInvalidation(t *testing.T) {\n\tt.Parallel()\n\n\tctrl := &schemaDataMock{\n\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\"test_ks\": {\n\t\t\t\t{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t},\n\t\t},\n\t}\n\ts := newSchemaEventTestSessionWithMock(ctrl)\n\tdefer s.Close()\n\ts.isInitialized = true\n\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\")\n\n\ttbl, err := s.TableMetadata(\"test_ks\", \"tbl_a\")\n\tif err != nil {\n\t\tt.Fatalf(\"initial TableMetadata failed: %v\", err)\n\t}\n\tif tbl.Name != \"tbl_a\" {\n\t\tt.Fatalf(\"expected table name tbl_a, got %s\", tbl.Name)\n\t}\n\n\ts.metadataDescriber.invalidateTableSchema(\"test_ks\", \"tbl_a\")\n\n\tctrl.resetQueries()\n\n\ttbl, err = s.TableMetadata(\"test_ks\", \"tbl_a\")\n\tif err != nil {\n\t\tt.Fatalf(\"TableMetadata after invalidation failed: %v\", err)\n\t}\n\tif tbl.Name != \"tbl_a\" {\n\t\tt.Fatalf(\"expected table name tbl_a, got %s\", tbl.Name)\n\t}\n\tif ctrl.getQueryCount() == 0 {\n\t\tt.Fatal(\"expected queries to refresh tbl_a after invalidation\")\n\t}\n}\n\nfunc TestTableMetadataAfterKeyspaceInvalidation(t *testing.T) {\n\tt.Parallel()\n\n\tctrl := &schemaDataMock{\n\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\"test_ks\": {\n\t\t\t\t{name: \"tbl_a\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t},\n\t\t},\n\t}\n\ts := newSchemaEventTestSessionWithMock(ctrl)\n\tdefer s.Close()\n\ts.isInitialized = true\n\tpopulateKeyspace(s, \"test_ks\", \"tbl_a\")\n\n\t_, err := s.TableMetadata(\"test_ks\", \"tbl_a\")\n\tif err != nil {\n\t\tt.Fatalf(\"initial TableMetadata failed: %v\", err)\n\t}\n\n\ts.metadataDescriber.invalidateKeyspaceSchema(\"test_ks\")\n\n\tctrl.resetQueries()\n\n\ttbl, err := s.TableMetadata(\"test_ks\", \"tbl_a\")\n\tif err != nil {\n\t\tt.Fatalf(\"TableMetadata after keyspace invalidation failed: %v\", err)\n\t}\n\tif tbl.Name != \"tbl_a\" {\n\t\tt.Fatalf(\"expected table name tbl_a, got %s\", tbl.Name)\n\t}\n\tif ctrl.getQueryCount() == 0 {\n\t\tt.Fatal(\"expected queries to reload keyspace after invalidation\")\n\t}\n}\n\nfunc newTestSessionForTableMetadata(ctrl *schemaDataMock) *Session {\n\ts := newSchemaEventTestSessionWithMock(ctrl)\n\ts.isInitialized = true\n\treturn s\n}\n\nfunc TestScyllaIsCdcTableAfterInvalidation(t *testing.T) {\n\tt.Parallel()\n\n\tctrl := &schemaDataMock{\n\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\"test_ks\": {\n\t\t\t\t{name: \"tbl_scylla_cdc_log\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t},\n\t\t},\n\t}\n\ts := newTestSessionForTableMetadata(ctrl)\n\tdefer s.Close()\n\tpopulateKeyspace(s, \"test_ks\", \"tbl_scylla_cdc_log\")\n\n\t_, err := scyllaIsCdcTable(s, \"test_ks\", \"tbl_scylla_cdc_log\")\n\tif err != nil {\n\t\tt.Fatalf(\"initial scyllaIsCdcTable failed: %v\", err)\n\t}\n\n\ts.metadataDescriber.invalidateTableSchema(\"test_ks\", \"tbl_scylla_cdc_log\")\n\tctrl.resetQueries()\n\n\t_, err = scyllaIsCdcTable(s, \"test_ks\", \"tbl_scylla_cdc_log\")\n\tif err != nil {\n\t\tt.Fatalf(\"scyllaIsCdcTable after invalidation failed: %v\", err)\n\t}\n\tif ctrl.getQueryCount() == 0 {\n\t\tt.Fatal(\"expected queries to refresh tbl_scylla_cdc_log after invalidation\")\n\t}\n}\n\nfunc TestScyllaIsCdcTableNotCdcSuffix(t *testing.T) {\n\tt.Parallel()\n\n\tctrl := &schemaDataMock{\n\t\tknownKeyspaces: map[string][]tableInfo{\n\t\t\t\"test_ks\": {\n\t\t\t\t{name: \"regular_table\", columns: []columnInfo{{name: \"id\", kind: \"partition_key\", position: 0}}},\n\t\t\t},\n\t\t},\n\t}\n\ts := newTestSessionForTableMetadata(ctrl)\n\tdefer s.Close()\n\tpopulateKeyspace(s, \"test_ks\", \"regular_table\")\n\n\tisCdc, err := scyllaIsCdcTable(s, \"test_ks\", \"regular_table\")\n\tif err != nil {\n\t\tt.Fatalf(\"scyllaIsCdcTable failed: %v\", err)\n\t}\n\tif isCdc {\n\t\tt.Fatal(\"expected regular_table to not be a CDC table\")\n\t}\n}\n\nfunc TestTestTableName(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname  string\n\t\tparts []string\n\t\twant  string\n\t}{\n\t\t{\n\t\t\tname: \"basic\",\n\t\t\twant: \"testtesttablename_basic\",\n\t\t},\n\t\t{\n\t\t\tname:  \"with_parts\",\n\t\t\tparts: []string{\"single\"},\n\t\t\twant:  \"testtesttablename_with_parts_single\",\n\t\t},\n\t\t{\n\t\t\tname:  \"multiple_parts\",\n\t\t\tparts: []string{\"foo\", \"bar\"},\n\t\t\twant:  \"testtesttablename_multiple_parts_foo_bar\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := testTableName(t, tt.parts...)\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"testTableName() = %q, want %q\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestTestTableNameSanitizesSpecialChars(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"sub/with/slashes\", func(t *testing.T) {\n\t\tgot := testTableName(t)\n\t\tif strings.Contains(got, \"/\") {\n\t\t\tt.Errorf(\"expected no slashes, got %q\", got)\n\t\t}\n\t\tif strings.Contains(got, \"__\") {\n\t\t\tt.Errorf(\"expected no consecutive underscores, got %q\", got)\n\t\t}\n\t})\n}\n\nfunc TestTestTableNameTruncation(t *testing.T) {\n\tt.Parallel()\n\n\tlong := \"abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz\"\n\tt.Run(long, func(t *testing.T) {\n\t\tgot := testTableName(t, \"extra\")\n\t\tif len(got) > maxCQLIdentifierLen {\n\t\t\tt.Errorf(\"len = %d, want <= %d; value = %q\", len(got), maxCQLIdentifierLen, got)\n\t\t}\n\t\t// Should preserve chars from both the start and end around the hash.\n\t\tif got[:5] != \"testt\" {\n\t\t\tt.Errorf(\"expected prefix from test name, got %q\", got)\n\t\t}\n\t\tif !strings.HasSuffix(got, \"_extra\") {\n\t\t\tt.Errorf(\"expected suffix from test name and parts, got %q\", got)\n\t\t}\n\t\tif len(got) != maxCQLIdentifierLen {\n\t\t\tt.Errorf(\"expected truncated name to use full identifier budget, got len=%d value=%q\", len(got), got)\n\t\t}\n\t\tif got[15] != '_' || got[32] != '_' {\n\t\t\tt.Errorf(\"expected <first-n>_<hash>_<last-n> structure, got %q\", got)\n\t\t}\n\t\tfor _, ch := range got[16:32] {\n\t\t\tif (ch < '0' || ch > '9') && (ch < 'a' || ch > 'f') {\n\t\t\t\tt.Errorf(\"expected hex hash in the middle, got %q\", got)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc TestTestTableNameUniqueness(t *testing.T) {\n\tt.Parallel()\n\n\ta := testTableName(t, \"alpha\")\n\tb := testTableName(t, \"beta\")\n\tif a == b {\n\t\tt.Errorf(\"expected different names, both got %q\", a)\n\t}\n}\n\n// testWarningFramer is a mock framerInterface that returns configurable warnings.\ntype testWarningFramer struct {\n\twarnings      []string\n\tcustomPayload map[string][]byte\n\treleased      bool\n}\n\nfunc (f *testWarningFramer) ReadBytesInternal() ([]byte, error) { return nil, nil }\nfunc (f *testWarningFramer) GetCustomPayload() map[string][]byte {\n\treturn f.customPayload\n}\nfunc (f *testWarningFramer) GetHeaderWarnings() []string { return f.warnings }\nfunc (f *testWarningFramer) Release()                    { f.released = true }\n\ntype recordingWarningHandler struct {\n\tcalls     int\n\tlastHost  *HostInfo\n\tlastQry   ExecutableQuery\n\tqueryStmt string\n\twarnings  []string\n}\n\nfunc (h *recordingWarningHandler) HandleWarnings(qry ExecutableQuery, host *HostInfo, warnings []string) {\n\th.calls++\n\th.lastQry = qry\n\th.lastHost = host\n\tif query, ok := qry.(*Query); ok {\n\t\th.queryStmt = query.stmt\n\t}\n\th.warnings = slices.Clone(warnings)\n}\n\ntype staticConnPicker struct {\n\tconn *Conn\n}\n\nfunc (p staticConnPicker) Pick(Token, ExecutableQuery) *Conn { return p.conn }\nfunc (p staticConnPicker) Put(*Conn) error                   { return nil }\nfunc (p staticConnPicker) Remove(*Conn)                      {}\nfunc (p staticConnPicker) InFlight() int                     { return 0 }\nfunc (p staticConnPicker) Size() (int, int)                  { return 1, 0 }\nfunc (p staticConnPicker) Close()                            {}\nfunc (p staticConnPicker) NextShard() (shardID, nrShards int) {\n\treturn 0, 0\n}\nfunc (p staticConnPicker) GetConnectionCount() int       { return 1 }\nfunc (p staticConnPicker) GetExcessConnectionCount() int { return 0 }\nfunc (p staticConnPicker) GetShardCount() int            { return 0 }\n\ntype staticSelectedHost struct {\n\thost *HostInfo\n}\n\nfunc (h staticSelectedHost) Info() *HostInfo { return h.host }\nfunc (h staticSelectedHost) Token() Token    { return nil }\nfunc (h staticSelectedHost) Mark(error)      {}\n\ntype pagingTestConn struct {\n\texecuteQueryFunc func(ctx context.Context, qry *Query) *Iter\n}\n\nfunc (*pagingTestConn) Close() {}\nfunc (*pagingTestConn) exec(context.Context, frameBuilder, Tracer, time.Duration) (*framer, error) {\n\treturn nil, nil\n}\nfunc (*pagingTestConn) awaitSchemaAgreement(context.Context) error { return nil }\nfunc (c *pagingTestConn) executeQuery(ctx context.Context, qry *Query) *Iter {\n\treturn c.executeQueryFunc(ctx, qry)\n}\nfunc (*pagingTestConn) querySystem(context.Context, string, ...any) *Iter { return nil }\nfunc (*pagingTestConn) getIsSchemaV2() bool                               { return false }\nfunc (*pagingTestConn) setSchemaV2(bool)                                  {}\nfunc (*pagingTestConn) getScyllaSupported() ScyllaConnectionFeatures {\n\treturn ScyllaConnectionFeatures{}\n}\n\ntype fixedRetryPolicy struct {\n\tmaxRetries int\n\tretryType  RetryType\n}\n\nfunc (p *fixedRetryPolicy) Attempt(q RetryableQuery) bool {\n\treturn q.Attempts() <= p.maxRetries\n}\n\nfunc (p *fixedRetryPolicy) GetRetryType(error) RetryType {\n\treturn p.retryType\n}\n\ntype executorTestQuery struct {\n\tctx         context.Context\n\trt          RetryPolicy\n\tspec        SpeculativeExecutionPolicy\n\tidempotent  bool\n\tconsistency Consistency\n\tattempts    int\n\tborrowed    int\n\treleased    int\n\texecuteFunc func(context.Context, *Conn) *Iter\n}\n\nfunc (q *executorTestQuery) borrowForExecution() {\n\tq.borrowed++\n}\n\nfunc (q *executorTestQuery) releaseAfterExecution() {\n\tq.released++\n}\n\nfunc (q *executorTestQuery) execute(ctx context.Context, conn *Conn) *Iter {\n\treturn q.executeFunc(ctx, conn)\n}\n\nfunc (q *executorTestQuery) attempt(string, time.Time, time.Time, *Iter, *HostInfo) {\n\tq.attempts++\n}\n\nfunc (q *executorTestQuery) retryPolicy() RetryPolicy {\n\treturn q.rt\n}\n\nfunc (q *executorTestQuery) speculativeExecutionPolicy() SpeculativeExecutionPolicy {\n\tif q.spec == nil {\n\t\treturn NonSpeculativeExecution{}\n\t}\n\treturn q.spec\n}\n\nfunc (q *executorTestQuery) GetRoutingKey() ([]byte, error) { return nil, nil }\nfunc (q *executorTestQuery) Keyspace() string               { return \"\" }\nfunc (q *executorTestQuery) Table() string                  { return \"\" }\nfunc (q *executorTestQuery) IsIdempotent() bool             { return q.idempotent }\nfunc (q *executorTestQuery) IsLWT() bool                    { return false }\nfunc (q *executorTestQuery) GetCustomPartitioner() Partitioner {\n\treturn nil\n}\nfunc (q *executorTestQuery) GetHostID() string { return \"\" }\n\nfunc (q *executorTestQuery) withContext(ctx context.Context) ExecutableQuery {\n\tq2 := *q\n\tq2.ctx = ctx\n\treturn &q2\n}\n\nfunc (q *executorTestQuery) Attempts() int {\n\treturn q.attempts\n}\n\nfunc (q *executorTestQuery) SetConsistency(c Consistency) {\n\tq.consistency = c\n}\n\nfunc (q *executorTestQuery) GetConsistency() Consistency {\n\treturn q.consistency\n}\n\nfunc (q *executorTestQuery) Context() context.Context {\n\tif q.ctx == nil {\n\t\treturn context.Background()\n\t}\n\treturn q.ctx\n}\n\nfunc (q *executorTestQuery) GetSession() *Session { return nil }\n\nfunc newTestQueryExecutor(host *HostInfo) *queryExecutor {\n\treturn &queryExecutor{\n\t\tpool: &policyConnPool{\n\t\t\thostConnPools: map[string]*hostConnPool{\n\t\t\t\thost.HostID(): &hostConnPool{\n\t\t\t\t\thost:       host,\n\t\t\t\t\tconnPicker: staticConnPicker{conn: &Conn{}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc newWarningTestQuery() *Query {\n\treturn &Query{\n\t\tcontext:     context.Background(),\n\t\troutingInfo: &queryRoutingInfo{},\n\t\tmetrics:     &queryMetrics{m: make(map[UUID]*hostMetrics)},\n\t\trt:          &SimpleRetryPolicy{NumRetries: 0},\n\t\tspec:        NonSpeculativeExecution{},\n\t}\n}\n\nfunc TestIterWarnings(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"NoFramer\", func(t *testing.T) {\n\t\titer := &Iter{}\n\t\twarnings := iter.Warnings()\n\t\tif len(warnings) != 0 {\n\t\t\tt.Errorf(\"expected no warnings, got %v\", warnings)\n\t\t}\n\t})\n\n\tt.Run(\"SinglePage\", func(t *testing.T) {\n\t\tframer := &testWarningFramer{warnings: []string{\"warn1\", \"warn2\"}}\n\t\titer := &Iter{framer: framer}\n\n\t\twarnings := iter.Warnings()\n\t\twant := []string{\"warn1\", \"warn2\"}\n\t\tif !slices.Equal(warnings, want) {\n\t\t\tt.Errorf(\"Warnings() = %v, want %v\", warnings, want)\n\t\t}\n\t})\n\n\tt.Run(\"ReturnsCopy\", func(t *testing.T) {\n\t\tframer := &testWarningFramer{warnings: []string{\"warn1\"}}\n\t\titer := &Iter{framer: framer}\n\n\t\tw1 := iter.Warnings()\n\t\tw2 := iter.Warnings()\n\n\t\t// Mutating w1 should not affect w2\n\t\tw1[0] = \"mutated\"\n\t\tif w2[0] == \"mutated\" {\n\t\t\tt.Error(\"Warnings() returned a shared slice, expected independent copies\")\n\t\t}\n\t})\n\n\tt.Run(\"AccumulatedAcrossPages\", func(t *testing.T) {\n\t\tpage1Framer := &testWarningFramer{warnings: []string{\"page1-warn1\", \"page1-warn2\"}}\n\t\titer := &Iter{\n\t\t\tframer:  page1Framer,\n\t\t\tnumRows: 1,\n\t\t\tpos:     1,\n\t\t\tnext:    nil,\n\t\t}\n\n\t\tif w := iter.framer.GetHeaderWarnings(); len(w) > 0 {\n\t\t\titer.allWarnings = append(iter.allWarnings, w...)\n\t\t}\n\t\titer.framer.Release()\n\t\tpage2Framer := &testWarningFramer{warnings: []string{\"page2-warn1\"}}\n\t\titer.framer = page2Framer\n\n\t\twarnings := iter.Warnings()\n\t\twant := []string{\"page1-warn1\", \"page1-warn2\", \"page2-warn1\"}\n\t\tif !slices.Equal(warnings, want) {\n\t\t\tt.Errorf(\"Warnings() = %v, want %v\", warnings, want)\n\t\t}\n\n\t\tif !page1Framer.released {\n\t\t\tt.Error(\"page 1 framer was not released\")\n\t\t}\n\t})\n\n\tt.Run(\"AfterClose\", func(t *testing.T) {\n\t\tframer := &testWarningFramer{warnings: []string{\"last-page-warn\"}}\n\t\titer := &Iter{\n\t\t\tframer:      framer,\n\t\t\tallWarnings: []string{\"prev-page-warn\"},\n\t\t}\n\n\t\titer.Close()\n\n\t\tif !framer.released {\n\t\t\tt.Error(\"framer was not released on Close()\")\n\t\t}\n\t\tif iter.framer != nil {\n\t\t\tt.Error(\"framer was not nilled on Close()\")\n\t\t}\n\n\t\twarnings := iter.Warnings()\n\t\twant := []string{\"prev-page-warn\", \"last-page-warn\"}\n\t\tif !slices.Equal(warnings, want) {\n\t\t\tt.Errorf(\"Warnings() after Close() = %v, want %v\", warnings, want)\n\t\t}\n\t})\n\n\tt.Run(\"EmptyPages\", func(t *testing.T) {\n\t\titer := &Iter{\n\t\t\tallWarnings: []string{\"page1-warn\"},\n\t\t}\n\t\tpage2Framer := &testWarningFramer{warnings: nil}\n\t\titer.framer = page2Framer\n\n\t\twarnings := iter.Warnings()\n\t\twant := []string{\"page1-warn\"}\n\t\tif !slices.Equal(warnings, want) {\n\t\t\tt.Errorf(\"Warnings() = %v, want %v\", warnings, want)\n\t\t}\n\t})\n\n\tt.Run(\"CloseIdempotent\", func(t *testing.T) {\n\t\tframer := &testWarningFramer{warnings: []string{\"warn\"}}\n\t\titer := &Iter{framer: framer}\n\n\t\titer.Close()\n\t\titer.Close()\n\n\t\twarnings := iter.Warnings()\n\t\twant := []string{\"warn\"}\n\t\tif !slices.Equal(warnings, want) {\n\t\t\tt.Errorf(\"Warnings() after double Close() = %v, want %v\", warnings, want)\n\t\t}\n\t})\n}\n\nfunc TestNewErrorIterWithReleasedFramer(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"PreservesMetadata\", func(t *testing.T) {\n\t\tpayload := map[string][]byte{\"tablet\": {1, 2, 3}}\n\t\tframer := &testWarningFramer{\n\t\t\twarnings:      []string{\"warn1\"},\n\t\t\tcustomPayload: payload,\n\t\t}\n\n\t\titer := newErrorIterWithReleasedFramer(errors.New(\"boom\"), framer)\n\n\t\tif !framer.released {\n\t\t\tt.Fatal(\"expected framer to be released\")\n\t\t}\n\t\tif !slices.Equal(iter.Warnings(), []string{\"warn1\"}) {\n\t\t\tt.Fatalf(\"Warnings() = %v, want %v\", iter.Warnings(), []string{\"warn1\"})\n\t\t}\n\t\tif !reflect.DeepEqual(iter.GetCustomPayload(), payload) {\n\t\t\tt.Fatalf(\"GetCustomPayload() = %v, want %v\", iter.GetCustomPayload(), payload)\n\t\t}\n\t})\n}\n\nfunc TestIterWarningHandler(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"CloseDispatchesAccumulatedWarnings\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\thost := &HostInfo{hostId: UUID{1}}\n\t\tqry := &Query{\n\t\t\troutingInfo: &queryRoutingInfo{},\n\t\t\tmetrics:     &queryMetrics{m: make(map[UUID]*hostMetrics)},\n\t\t}\n\t\titer := (&Iter{\n\t\t\tframer:      &testWarningFramer{warnings: []string{\"page2\"}},\n\t\t\tallWarnings: []string{\"page1\"},\n\t\t\thost:        host,\n\t\t}).bindWarningHandler(qry, handler)\n\n\t\tif err := iter.Close(); err != nil {\n\t\t\tt.Fatalf(\"Close() returned unexpected error: %v\", err)\n\t\t}\n\n\t\twant := []string{\"page1\", \"page2\"}\n\t\tif !slices.Equal(handler.warnings, want) {\n\t\t\tt.Fatalf(\"handler warnings = %v, want %v\", handler.warnings, want)\n\t\t}\n\t\tif handler.calls != 1 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 1\", handler.calls)\n\t\t}\n\t\tif handler.lastHost != host {\n\t\t\tt.Fatal(\"handler host mismatch\")\n\t\t}\n\t\tif handler.lastQry != qry {\n\t\t\tt.Fatal(\"handler query mismatch\")\n\t\t}\n\t})\n\n\tt.Run(\"CloseIsIdempotent\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\titer := (&Iter{\n\t\t\tframer: &testWarningFramer{warnings: []string{\"warn\"}},\n\t\t}).bindWarningHandler(&Query{\n\t\t\troutingInfo: &queryRoutingInfo{},\n\t\t\tmetrics:     &queryMetrics{m: make(map[UUID]*hostMetrics)},\n\t\t}, handler)\n\n\t\titer.Close()\n\t\titer.Close()\n\n\t\tif handler.calls != 1 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 1\", handler.calls)\n\t\t}\n\t})\n\n\tt.Run(\"CopyPageDataTransfersReleasedMetadata\", func(t *testing.T) {\n\t\tsrc := newErrorIterWithReleasedFramer(errors.New(\"boom\"), &testWarningFramer{\n\t\t\twarnings:      []string{\"warn\"},\n\t\t\tcustomPayload: map[string][]byte{\"k\": {9}},\n\t\t})\n\t\tdst := &Iter{\n\t\t\tallWarnings: []string{\"first-page\"},\n\t\t}\n\n\t\tdst.copyPageData(src)\n\n\t\twantWarnings := []string{\"first-page\", \"warn\"}\n\t\tif !slices.Equal(dst.Warnings(), wantWarnings) {\n\t\t\tt.Fatalf(\"Warnings() = %v, want %v\", dst.Warnings(), wantWarnings)\n\t\t}\n\t\tif !reflect.DeepEqual(dst.GetCustomPayload(), map[string][]byte{\"k\": {9}}) {\n\t\t\tt.Fatalf(\"GetCustomPayload() = %v, want %v\", dst.GetCustomPayload(), map[string][]byte{\"k\": {9}})\n\t\t}\n\t})\n\n\tt.Run(\"BindIgnoresNilHandler\", func(t *testing.T) {\n\t\titer := (&Iter{}).bindWarningHandler(&Query{\n\t\t\troutingInfo: &queryRoutingInfo{},\n\t\t\tmetrics:     &queryMetrics{m: make(map[UUID]*hostMetrics)},\n\t\t}, nil)\n\t\tif iter.warningHandler != nil {\n\t\t\tt.Fatal(\"expected warning handler to remain nil\")\n\t\t}\n\t})\n\n\tt.Run(\"HostPreservedAcrossClose\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\thost := &HostInfo{port: 9042, hostId: UUID{2}}\n\t\titer := (&Iter{\n\t\t\tframer: &testWarningFramer{warnings: []string{\"warn\"}},\n\t\t\thost:   host,\n\t\t}).bindWarningHandler(&Batch{\n\t\t\tcontext:     context.Background(),\n\t\t\troutingInfo: &queryRoutingInfo{},\n\t\t\tmetrics:     &queryMetrics{m: make(map[UUID]*hostMetrics)},\n\t\t\trt:          &SimpleRetryPolicy{NumRetries: 0},\n\t\t\tspec:        NonSpeculativeExecution{},\n\t\t}, handler)\n\n\t\titer.Close()\n\n\t\tif handler.lastHost != host {\n\t\t\tt.Fatal(\"expected handler to receive the iterator host\")\n\t\t}\n\t})\n\n\tt.Run(\"CloseClearsBatchWarningQueryReference\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\tbatch := &Batch{\n\t\t\tcontext:     context.Background(),\n\t\t\troutingInfo: &queryRoutingInfo{},\n\t\t\tmetrics:     &queryMetrics{m: make(map[UUID]*hostMetrics)},\n\t\t\trt:          &SimpleRetryPolicy{NumRetries: 0},\n\t\t\tspec:        NonSpeculativeExecution{},\n\t\t}\n\t\titer := (&Iter{\n\t\t\tframer: &testWarningFramer{warnings: []string{\"warn\"}},\n\t\t}).bindWarningHandler(batch, handler)\n\n\t\tif err := iter.Close(); err != nil {\n\t\t\tt.Fatalf(\"Close() returned unexpected error: %v\", err)\n\t\t}\n\t\tif handler.lastQry != batch {\n\t\t\tt.Fatal(\"handler batch mismatch\")\n\t\t}\n\t\tif iter.warningQuery != nil {\n\t\t\tt.Fatal(\"expected warning query to be cleared after Close\")\n\t\t}\n\t\tif iter.warningQueryOwned {\n\t\t\tt.Fatal(\"expected warningQueryOwned to be false after Close\")\n\t\t}\n\t})\n\n\tt.Run(\"CloseWithoutWarningsDoesNotInvokeHandler\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\titer := (&Iter{\n\t\t\tframer: &testWarningFramer{},\n\t\t}).bindWarningHandler(&Query{\n\t\t\tcontext:     context.Background(),\n\t\t\troutingInfo: &queryRoutingInfo{},\n\t\t\tmetrics:     &queryMetrics{m: make(map[UUID]*hostMetrics)},\n\t\t\trt:          &SimpleRetryPolicy{NumRetries: 0},\n\t\t\tspec:        NonSpeculativeExecution{},\n\t\t}, handler)\n\n\t\titer.Close()\n\n\t\tif handler.calls != 0 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 0\", handler.calls)\n\t\t}\n\t})\n\n\tt.Run(\"HandleWarningsOnceAfterManualAccumulation\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\titer := (&Iter{\n\t\t\tallWarnings: []string{\"warn1\"},\n\t\t\thost:        &HostInfo{hostId: UUID{3}},\n\t\t}).bindWarningHandler(&Query{\n\t\t\troutingInfo: &queryRoutingInfo{},\n\t\t\tmetrics:     &queryMetrics{m: make(map[UUID]*hostMetrics)},\n\t\t}, handler)\n\n\t\titer.handleWarningsOnce()\n\t\titer.handleWarningsOnce()\n\n\t\tif handler.calls != 1 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 1\", handler.calls)\n\t\t}\n\t})\n\n\tt.Run(\"QueryReleaseBeforeCloseKeepsWarningQueryAlive\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\tqry := newWarningTestQuery()\n\t\tqry.refCount = 1\n\t\tqry.stmt = \"SELECT now() FROM system.local\"\n\t\titer := (&Iter{\n\t\t\tframer: &testWarningFramer{warnings: []string{\"warn\"}},\n\t\t}).bindWarningHandler(qry, handler)\n\n\t\tqry.Release()\n\n\t\tif qry.stmt != \"SELECT now() FROM system.local\" {\n\t\t\tt.Fatalf(\"query statement reset before iterator close: %q\", qry.stmt)\n\t\t}\n\t\tif err := iter.Close(); err != nil {\n\t\t\tt.Fatalf(\"Close() returned unexpected error: %v\", err)\n\t\t}\n\t\tif handler.calls != 1 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 1\", handler.calls)\n\t\t}\n\t\tcapturedQry, ok := handler.lastQry.(*Query)\n\t\tif !ok {\n\t\t\tt.Fatalf(\"handler query type = %T, want *Query\", handler.lastQry)\n\t\t}\n\t\tif capturedQry != qry {\n\t\t\tt.Fatal(\"handler query mismatch\")\n\t\t}\n\t\tif handler.queryStmt != \"SELECT now() FROM system.local\" {\n\t\t\tt.Fatalf(\"handler saw query statement %q, want %q\", handler.queryStmt, \"SELECT now() FROM system.local\")\n\t\t}\n\t})\n\n\tt.Run(\"ReleasedErrorIterAutoFinalizesOnBind\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\tqry := newWarningTestQuery()\n\t\tqry.refCount = 1\n\t\tqry.stmt = \"SELECT fail()\"\n\n\t\titer := newErrorIterWithReleasedFramer(errors.New(\"boom\"), &testWarningFramer{\n\t\t\twarnings: []string{\"warn\"},\n\t\t}).bindWarningHandler(qry, handler)\n\n\t\tif got := atomic.LoadUint32(&qry.refCount); got != 1 {\n\t\t\tt.Fatalf(\"query refCount = %d, want 1\", got)\n\t\t}\n\t\tif iter.warningQuery != nil {\n\t\t\tt.Fatal(\"expected warning query to be released\")\n\t\t}\n\t\tif handler.calls != 1 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 1\", handler.calls)\n\t\t}\n\t\tif !slices.Equal(handler.warnings, []string{\"warn\"}) {\n\t\t\tt.Fatalf(\"handler warnings = %v, want %v\", handler.warnings, []string{\"warn\"})\n\t\t}\n\t\tif err := iter.Close(); err == nil || err.Error() != \"boom\" {\n\t\t\tt.Fatalf(\"Close() = %v, want boom\", err)\n\t\t}\n\t})\n}\n\nfunc TestIterAutoFinalizeOnTerminalConsumption(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"ScanEOFReleasesResources\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\tqry := newWarningTestQuery()\n\t\tqry.refCount = 1\n\t\tframer := &testWarningFramer{warnings: []string{\"scan-eof\"}}\n\t\titer := (&Iter{\n\t\t\tframer:  framer,\n\t\t\tnumRows: 1,\n\t\t\tmeta: resultMetadata{\n\t\t\t\tactualColCount: 0,\n\t\t\t},\n\t\t}).bindWarningHandler(qry, handler)\n\n\t\tif !iter.Scan() {\n\t\t\tt.Fatal(\"expected first Scan() to succeed\")\n\t\t}\n\t\tif iter.Scan() {\n\t\t\tt.Fatal(\"expected second Scan() to report EOF\")\n\t\t}\n\t\tif !framer.released {\n\t\t\tt.Fatal(\"expected EOF to release the framer\")\n\t\t}\n\t\tif iter.framer != nil {\n\t\t\tt.Fatal(\"expected framer to be cleared after EOF\")\n\t\t}\n\t\tif got := atomic.LoadUint32(&qry.refCount); got != 1 {\n\t\t\tt.Fatalf(\"query refCount = %d, want 1\", got)\n\t\t}\n\t\tif handler.calls != 1 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 1\", handler.calls)\n\t\t}\n\t\tif !slices.Equal(handler.warnings, []string{\"scan-eof\"}) {\n\t\t\tt.Fatalf(\"handler warnings = %v, want %v\", handler.warnings, []string{\"scan-eof\"})\n\t\t}\n\t})\n\n\tt.Run(\"ScannerNextEOFReleasesResources\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\tqry := newWarningTestQuery()\n\t\tqry.refCount = 1\n\t\tframer := &testWarningFramer{warnings: []string{\"scanner-eof\"}}\n\t\titer := (&Iter{\n\t\t\tframer:  framer,\n\t\t\tnumRows: 1,\n\t\t\tmeta: resultMetadata{\n\t\t\t\tactualColCount: 0,\n\t\t\t},\n\t\t}).bindWarningHandler(qry, handler)\n\t\tscanner := iter.Scanner()\n\n\t\tif !scanner.Next() {\n\t\t\tt.Fatal(\"expected first Next() to succeed\")\n\t\t}\n\t\tif err := scanner.Scan(); err != nil {\n\t\t\tt.Fatalf(\"Scan() returned unexpected error: %v\", err)\n\t\t}\n\t\tif scanner.Next() {\n\t\t\tt.Fatal(\"expected second Next() to report EOF\")\n\t\t}\n\t\tif !framer.released {\n\t\t\tt.Fatal(\"expected EOF to release the framer\")\n\t\t}\n\t\tif iter.framer != nil {\n\t\t\tt.Fatal(\"expected framer to be cleared after EOF\")\n\t\t}\n\t\tif got := atomic.LoadUint32(&qry.refCount); got != 1 {\n\t\t\tt.Fatalf(\"query refCount = %d, want 1\", got)\n\t\t}\n\t\tif handler.calls != 1 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 1\", handler.calls)\n\t\t}\n\t\tif !slices.Equal(handler.warnings, []string{\"scanner-eof\"}) {\n\t\t\tt.Fatalf(\"handler warnings = %v, want %v\", handler.warnings, []string{\"scanner-eof\"})\n\t\t}\n\t})\n}\n\nfunc TestQueryExecutorRetryAndDiscardWarningHandling(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"SpeculativeLoserIsDiscardedWithoutWarnings\", func(t *testing.T) {\n\t\thost := (&HostInfo{hostId: UUID{4}}).setState(NodeUp)\n\t\thandler := &recordingWarningHandler{}\n\t\tframer := &testWarningFramer{warnings: []string{\"loser\"}}\n\t\tqry := &executorTestQuery{\n\t\t\trt:         &fixedRetryPolicy{maxRetries: 0, retryType: Rethrow},\n\t\t\tspec:       NonSpeculativeExecution{},\n\t\t\tidempotent: true,\n\t\t}\n\t\tqry.executeFunc = func(context.Context, *Conn) *Iter {\n\t\t\treturn (&Iter{framer: framer}).bindWarningHandler(qry, handler)\n\t\t}\n\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tcancel()\n\n\t\texecutor := newTestQueryExecutor(host)\n\t\texecutor.run(ctx, qry, func() SelectedHost { return staticSelectedHost{host: host} }, make(chan *Iter))\n\n\t\tif handler.calls != 0 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 0\", handler.calls)\n\t\t}\n\t\tif !framer.released {\n\t\t\tt.Fatal(\"speculative loser framer was not released\")\n\t\t}\n\t\tif qry.released != 1 {\n\t\t\tt.Fatalf(\"releaseAfterExecution calls = %d, want 1\", qry.released)\n\t\t}\n\t})\n\n\tt.Run(\"RetriedAttemptStillWarnsOnce\", func(t *testing.T) {\n\t\thost := (&HostInfo{hostId: UUID{5}}).setState(NodeUp)\n\t\thandler := &recordingWarningHandler{}\n\t\tfirstFramer := &testWarningFramer{warnings: []string{\"retry-warn\"}}\n\t\tfinalFramer := &testWarningFramer{}\n\t\tqry := &executorTestQuery{\n\t\t\tctx:        context.Background(),\n\t\t\trt:         &fixedRetryPolicy{maxRetries: 1, retryType: Retry},\n\t\t\tspec:       NonSpeculativeExecution{},\n\t\t\tidempotent: true,\n\t\t}\n\n\t\tattempt := 0\n\t\tqry.executeFunc = func(context.Context, *Conn) *Iter {\n\t\t\tattempt++\n\t\t\tif attempt == 1 {\n\t\t\t\treturn (&Iter{err: errors.New(\"boom\"), framer: firstFramer}).bindWarningHandler(qry, handler)\n\t\t\t}\n\t\t\treturn (&Iter{framer: finalFramer}).bindWarningHandler(qry, handler)\n\t\t}\n\n\t\texecutor := newTestQueryExecutor(host)\n\t\titer := executor.do(context.Background(), qry, func() SelectedHost { return staticSelectedHost{host: host} })\n\t\tdefer iter.Close()\n\n\t\tif iter.err != nil {\n\t\t\tt.Fatalf(\"unexpected final error: %v\", iter.err)\n\t\t}\n\t\tif !firstFramer.released {\n\t\t\tt.Fatal(\"retried attempt framer was not released\")\n\t\t}\n\t\tif handler.calls != 1 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 1\", handler.calls)\n\t\t}\n\t\tif !slices.Equal(handler.warnings, []string{\"retry-warn\"}) {\n\t\t\tt.Fatalf(\"handler warnings = %v, want %v\", handler.warnings, []string{\"retry-warn\"})\n\t\t}\n\t})\n}\n\nfunc TestIterCloseCleansPrefetchedNextPage(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"MaterializedNextPageIsReleasedWithoutDispatchingItsWarnings\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\tqry := newWarningTestQuery()\n\t\tcurrentFramer := &testWarningFramer{warnings: []string{\"current\"}}\n\t\tnextFramer := &testWarningFramer{warnings: []string{\"prefetched\"}}\n\t\titer := (&Iter{\n\t\t\tframer: currentFramer,\n\t\t\tnext: &nextIter{\n\t\t\t\tnext: (&Iter{framer: nextFramer}).bindWarningHandler(qry, handler),\n\t\t\t},\n\t\t}).bindWarningHandler(qry, handler)\n\n\t\titer.Close()\n\n\t\tif !currentFramer.released {\n\t\t\tt.Fatal(\"current framer was not released\")\n\t\t}\n\t\tif !nextFramer.released {\n\t\t\tt.Fatal(\"prefetched next framer was not released\")\n\t\t}\n\t\tif handler.calls != 1 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 1\", handler.calls)\n\t\t}\n\t\tif !slices.Equal(handler.warnings, []string{\"current\"}) {\n\t\t\tt.Fatalf(\"handler warnings = %v, want %v\", handler.warnings, []string{\"current\"})\n\t\t}\n\t\tif iter.next != nil {\n\t\t\tt.Fatal(\"expected prefetched next iterator to be cleared on Close\")\n\t\t}\n\t})\n\n\tt.Run(\"LatePrefetchResultIsClosedAfterCancellation\", func(t *testing.T) {\n\t\thandler := &recordingWarningHandler{}\n\t\tnext := newNextIter(newWarningTestQuery(), 1)\n\n\t\tnext.close()\n\t\tselect {\n\t\tcase <-next.qry.Context().Done():\n\t\tdefault:\n\t\t\tt.Fatal(\"expected next-page context to be canceled\")\n\t\t}\n\n\t\tlateFramer := &testWarningFramer{warnings: []string{\"late\"}}\n\t\tnext.storeFetched((&Iter{framer: lateFramer}).bindWarningHandler(next.qry, handler))\n\n\t\tif !lateFramer.released {\n\t\t\tt.Fatal(\"late prefetched framer was not released\")\n\t\t}\n\t\tif handler.calls != 0 {\n\t\t\tt.Fatalf(\"handler call count = %d, want 0\", handler.calls)\n\t\t}\n\t})\n}\n\nfunc TestSliceMapClosesIterator(t *testing.T) {\n\tt.Parallel()\n\n\thandler := &recordingWarningHandler{}\n\tqry := newWarningTestQuery()\n\tframer := &testWarningFramer{warnings: []string{\"slice-map\"}}\n\titer := (&Iter{\n\t\tframer: framer,\n\t\tmeta: resultMetadata{\n\t\t\tactualColCount: 0,\n\t\t},\n\t}).bindWarningHandler(qry, handler)\n\n\trows, err := iter.SliceMap()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected SliceMap error: %v\", err)\n\t}\n\tif len(rows) != 0 {\n\t\tt.Fatalf(\"expected no rows, got %d\", len(rows))\n\t}\n\tif !framer.released {\n\t\tt.Fatal(\"expected SliceMap to release the iterator framer\")\n\t}\n\tif handler.calls != 1 {\n\t\tt.Fatalf(\"handler call count = %d, want 1\", handler.calls)\n\t}\n\tif !slices.Equal(handler.warnings, []string{\"slice-map\"}) {\n\t\tt.Fatalf(\"handler warnings = %v, want %v\", handler.warnings, []string{\"slice-map\"})\n\t}\n}\n\nfunc TestIterFetchNextPageRetiresConsumedFetchContextOnly(t *testing.T) {\n\tt.Parallel()\n\n\trootCtx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tvar fetchedQry *Query\n\tnextPageFramer := &testWarningFramer{warnings: []string{\"next\"}}\n\tconn := &pagingTestConn{\n\t\texecuteQueryFunc: func(_ context.Context, qry *Query) *Iter {\n\t\t\tfetchedQry = qry\n\t\t\treturn &Iter{\n\t\t\t\tframer:  nextPageFramer,\n\t\t\t\tnumRows: 1,\n\t\t\t\tnext:    newNextIter(qry, 1),\n\t\t\t}\n\t\t},\n\t}\n\n\tbaseQry := newWarningTestQuery().WithContext(rootCtx)\n\tbaseQry.conn = conn\n\tcurrentFramer := &testWarningFramer{warnings: []string{\"current\"}}\n\titer := &Iter{\n\t\tframer:  currentFramer,\n\t\tnumRows: 1,\n\t\tpos:     1,\n\t\tnext:    newNextIter(baseQry, 1),\n\t}\n\tdefer iter.Close()\n\n\tif !iter.fetchNextPage() {\n\t\tt.Fatal(\"expected next page fetch to succeed\")\n\t}\n\tif fetchedQry == nil {\n\t\tt.Fatal(\"expected next-page query to execute\")\n\t}\n\tselect {\n\tcase <-fetchedQry.Context().Done():\n\tdefault:\n\t\tt.Fatal(\"expected consumed next-page context to be canceled\")\n\t}\n\tselect {\n\tcase <-iter.next.qry.Context().Done():\n\t\tt.Fatal(\"expected following page context to remain active\")\n\tdefault:\n\t}\n\tif !currentFramer.released {\n\t\tt.Fatal(\"expected current page framer to be released\")\n\t}\n\tif iter.framer != nextPageFramer {\n\t\tt.Fatal(\"expected fetched page framer to become current\")\n\t}\n}\n\nfunc TestQueryIterManualPagingDefersHiddenEmptyPageWarnings(t *testing.T) {\n\tt.Parallel()\n\n\thandler := &recordingWarningHandler{}\n\tfirstFramer := &testWarningFramer{warnings: []string{\"empty-page\"}}\n\tfinalFramer := &testWarningFramer{warnings: []string{\"final-page\"}}\n\tbaseQry := newWarningTestQuery()\n\tbaseQry.refCount = 1\n\tbaseQry.PageState([]byte(\"initial\"))\n\n\tcall := 0\n\tbaseQry.conn = &pagingTestConn{\n\t\texecuteQueryFunc: func(_ context.Context, qry *Query) *Iter {\n\t\t\tcall++\n\t\t\tswitch call {\n\t\t\tcase 1:\n\t\t\t\tif !slices.Equal(qry.pageState, []byte(\"initial\")) {\n\t\t\t\t\tt.Fatalf(\"first page state = %q, want %q\", qry.pageState, []byte(\"initial\"))\n\t\t\t\t}\n\t\t\t\treturn (&Iter{\n\t\t\t\t\tframer:  firstFramer,\n\t\t\t\t\tnumRows: 0,\n\t\t\t\t\tmeta: resultMetadata{\n\t\t\t\t\t\tpagingState: []byte(\"next\"),\n\t\t\t\t\t},\n\t\t\t\t}).bindWarningHandler(qry, handler)\n\t\t\tcase 2:\n\t\t\t\tif !slices.Equal(qry.pageState, []byte(\"next\")) {\n\t\t\t\t\tt.Fatalf(\"second page state = %q, want %q\", qry.pageState, []byte(\"next\"))\n\t\t\t\t}\n\t\t\t\treturn (&Iter{\n\t\t\t\t\tframer:  finalFramer,\n\t\t\t\t\tnumRows: 1,\n\t\t\t\t}).bindWarningHandler(qry, handler)\n\t\t\tdefault:\n\t\t\t\tt.Fatalf(\"unexpected executeQuery call %d\", call)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t},\n\t}\n\n\titer := baseQry.Iter()\n\n\tif call != 2 {\n\t\tt.Fatalf(\"executeQuery call count = %d, want 2\", call)\n\t}\n\tif handler.calls != 0 {\n\t\tt.Fatalf(\"handler call count before Close = %d, want 0\", handler.calls)\n\t}\n\tif !firstFramer.released {\n\t\tt.Fatal(\"hidden empty-page framer was not released\")\n\t}\n\tif warnings := iter.Warnings(); !slices.Equal(warnings, []string{\"empty-page\", \"final-page\"}) {\n\t\tt.Fatalf(\"Warnings() = %v, want %v\", warnings, []string{\"empty-page\", \"final-page\"})\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\tt.Fatalf(\"Close() returned unexpected error: %v\", err)\n\t}\n\tif handler.calls != 1 {\n\t\tt.Fatalf(\"handler call count after Close = %d, want 1\", handler.calls)\n\t}\n\tif !slices.Equal(handler.warnings, []string{\"empty-page\", \"final-page\"}) {\n\t\tt.Fatalf(\"handler warnings = %v, want %v\", handler.warnings, []string{\"empty-page\", \"final-page\"})\n\t}\n}\n\nfunc TestQueryIterManualPagingPreservesHiddenWarningsOnTerminalError(t *testing.T) {\n\tt.Parallel()\n\n\thandler := &recordingWarningHandler{}\n\tfirstFramer := &testWarningFramer{warnings: []string{\"empty-page\"}}\n\tbaseQry := newWarningTestQuery()\n\tbaseQry.refCount = 1\n\tbaseQry.PageState([]byte(\"initial\"))\n\n\tcall := 0\n\tbaseQry.conn = &pagingTestConn{\n\t\texecuteQueryFunc: func(_ context.Context, qry *Query) *Iter {\n\t\t\tcall++\n\t\t\tswitch call {\n\t\t\tcase 1:\n\t\t\t\tif !slices.Equal(qry.pageState, []byte(\"initial\")) {\n\t\t\t\t\tt.Fatalf(\"first page state = %q, want %q\", qry.pageState, []byte(\"initial\"))\n\t\t\t\t}\n\t\t\t\treturn (&Iter{\n\t\t\t\t\tframer:  firstFramer,\n\t\t\t\t\tnumRows: 0,\n\t\t\t\t\tmeta: resultMetadata{\n\t\t\t\t\t\tpagingState: []byte(\"next\"),\n\t\t\t\t\t},\n\t\t\t\t}).bindWarningHandler(qry, handler)\n\t\t\tcase 2:\n\t\t\t\tif !slices.Equal(qry.pageState, []byte(\"next\")) {\n\t\t\t\t\tt.Fatalf(\"second page state = %q, want %q\", qry.pageState, []byte(\"next\"))\n\t\t\t\t}\n\t\t\t\treturn newErrorIterWithReleasedFramer(errors.New(\"boom\"), &testWarningFramer{\n\t\t\t\t\twarnings: []string{\"final-error\"},\n\t\t\t\t}).bindWarningHandler(qry, handler)\n\t\t\tdefault:\n\t\t\t\tt.Fatalf(\"unexpected executeQuery call %d\", call)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t},\n\t}\n\n\titer := baseQry.Iter()\n\n\tif call != 2 {\n\t\tt.Fatalf(\"executeQuery call count = %d, want 2\", call)\n\t}\n\tif !firstFramer.released {\n\t\tt.Fatal(\"hidden empty-page framer was not released\")\n\t}\n\tif handler.calls != 1 {\n\t\tt.Fatalf(\"handler call count after Iter = %d, want 1\", handler.calls)\n\t}\n\tif !slices.Equal(handler.warnings, []string{\"empty-page\", \"final-error\"}) {\n\t\tt.Fatalf(\"handler warnings = %v, want %v\", handler.warnings, []string{\"empty-page\", \"final-error\"})\n\t}\n\tif warnings := iter.Warnings(); !slices.Equal(warnings, []string{\"empty-page\", \"final-error\"}) {\n\t\tt.Fatalf(\"Warnings() = %v, want %v\", warnings, []string{\"empty-page\", \"final-error\"})\n\t}\n\tif err := iter.Close(); err == nil || err.Error() != \"boom\" {\n\t\tt.Fatalf(\"Close() = %v, want boom\", err)\n\t}\n}\n\nfunc TestTableTabletsMetadata(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"HappyPath\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.tabletsRoutingV1 = true\n\n\t\taddTestTablets(t, s, \"test_ks\", \"tbl_a\")\n\n\t\tentries, err := s.TableTabletsMetadata(\"test_ks\", \"tbl_a\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif len(entries) != 2 {\n\t\t\tt.Fatalf(\"expected 2 tablet entries, got %d\", len(entries))\n\t\t}\n\t})\n\n\tt.Run(\"ClosedSession\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.tabletsRoutingV1 = true\n\t\ts.isClosed = true\n\n\t\t_, err := s.TableTabletsMetadata(\"ks\", \"tb\")\n\t\tif !errors.Is(err, ErrSessionClosed) {\n\t\t\tt.Fatalf(\"expected ErrSessionClosed, got %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"NotReady\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.tabletsRoutingV1 = true\n\n\t\t_, err := s.TableTabletsMetadata(\"ks\", \"tb\")\n\t\tif !errors.Is(err, ErrSessionNotReady) {\n\t\t\tt.Fatalf(\"expected ErrSessionNotReady, got %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"TabletsNotEnabled\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\n\t\t_, err := s.TableTabletsMetadata(\"ks\", \"tb\")\n\t\tif !errors.Is(err, ErrTabletsNotUsed) {\n\t\t\tt.Fatalf(\"expected ErrTabletsNotUsed, got %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"EmptyKeyspace\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.tabletsRoutingV1 = true\n\n\t\t_, err := s.TableTabletsMetadata(\"\", \"tb\")\n\t\tif !errors.Is(err, ErrNoKeyspace) {\n\t\t\tt.Fatalf(\"expected ErrNoKeyspace, got %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"EmptyTable\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.tabletsRoutingV1 = true\n\n\t\t_, err := s.TableTabletsMetadata(\"ks\", \"\")\n\t\tif !errors.Is(err, ErrNoTable) {\n\t\t\tt.Fatalf(\"expected ErrNoTable, got %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"NoData\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.tabletsRoutingV1 = true\n\n\t\tentries, err := s.TableTabletsMetadata(\"ks\", \"nonexistent\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif entries != nil {\n\t\t\tt.Fatalf(\"expected nil for nonexistent table, got %d entries\", len(entries))\n\t\t}\n\t})\n}\n\nfunc TestForEachTablet(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"HappyPath\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.tabletsRoutingV1 = true\n\n\t\taddTestTablets(t, s, \"ks1\", \"tbl_a\")\n\t\taddTestTablets(t, s, \"ks2\", \"tbl_b\")\n\n\t\tvisited := make(map[string]int)\n\t\terr := s.ForEachTablet(func(keyspace, table string, entries tablets.TabletEntryList) bool {\n\t\t\tvisited[keyspace+\".\"+table] = len(entries)\n\t\t\treturn true\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif len(visited) != 2 {\n\t\t\tt.Fatalf(\"expected 2 tables visited, got %d\", len(visited))\n\t\t}\n\t\tif visited[\"ks1.tbl_a\"] != 2 {\n\t\t\tt.Fatalf(\"expected 2 entries for ks1.tbl_a, got %d\", visited[\"ks1.tbl_a\"])\n\t\t}\n\t\tif visited[\"ks2.tbl_b\"] != 2 {\n\t\t\tt.Fatalf(\"expected 2 entries for ks2.tbl_b, got %d\", visited[\"ks2.tbl_b\"])\n\t\t}\n\t})\n\n\tt.Run(\"EarlyStop\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.tabletsRoutingV1 = true\n\n\t\taddTestTablets(t, s, \"ks1\", \"tbl_a\")\n\t\taddTestTablets(t, s, \"ks2\", \"tbl_b\")\n\n\t\tcount := 0\n\t\terr := s.ForEachTablet(func(keyspace, table string, entries tablets.TabletEntryList) bool {\n\t\t\tcount++\n\t\t\treturn false\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif count != 1 {\n\t\t\tt.Fatalf(\"expected 1 callback invocation, got %d\", count)\n\t\t}\n\t})\n\n\tt.Run(\"ClosedSession\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.tabletsRoutingV1 = true\n\t\ts.isClosed = true\n\n\t\terr := s.ForEachTablet(func(keyspace, table string, entries tablets.TabletEntryList) bool {\n\t\t\tt.Fatal(\"callback should not be called on closed session\")\n\t\t\treturn true\n\t\t})\n\t\tif !errors.Is(err, ErrSessionClosed) {\n\t\t\tt.Fatalf(\"expected ErrSessionClosed, got %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"TabletsNotEnabled\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\n\t\terr := s.ForEachTablet(func(keyspace, table string, entries tablets.TabletEntryList) bool {\n\t\t\tt.Fatal(\"callback should not be called when tablets not enabled\")\n\t\t\treturn true\n\t\t})\n\t\tif !errors.Is(err, ErrTabletsNotUsed) {\n\t\t\tt.Fatalf(\"expected ErrTabletsNotUsed, got %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"NilCallback\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.tabletsRoutingV1 = true\n\n\t\taddTestTablets(t, s, \"ks\", \"tb\")\n\n\t\terr := s.ForEachTablet(nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"expected nil error for nil callback, got %v\", err)\n\t\t}\n\t})\n}\n\nfunc TestFindTabletReplicasUnsafeForToken(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"NilMetadataDescriber\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\ts := &Session{}\n\t\ts.metadataDescriber = nil\n\n\t\tresult := s.findTabletReplicasUnsafeForToken(\"ks\", \"tb\", 42)\n\t\tif result != nil {\n\t\t\tt.Fatalf(\"expected nil replicas for nil metadataDescriber, got %v\", result)\n\t\t}\n\t})\n\n\tt.Run(\"NilMetadata\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\ts := &Session{}\n\t\ts.metadataDescriber = &metadataDescriber{\n\t\t\tsession:  s,\n\t\t\tmetadata: nil,\n\t\t}\n\n\t\tresult := s.findTabletReplicasUnsafeForToken(\"ks\", \"tb\", 42)\n\t\tif result != nil {\n\t\t\tt.Fatalf(\"expected nil replicas for nil metadata, got %v\", result)\n\t\t}\n\t})\n\n\tt.Run(\"ClosedSession\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\t\ts.isClosed = true\n\n\t\tresult := s.findTabletReplicasUnsafeForToken(\"ks\", \"tb\", 42)\n\t\tif result != nil {\n\t\t\tt.Fatalf(\"expected nil replicas for closed session, got %v\", result)\n\t\t}\n\t})\n}\n\nfunc TestTableMetadataValidation(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"EmptyTableReturnsErrNoTable\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tctrl := &schemaDataMock{knownKeyspaces: map[string][]tableInfo{}}\n\t\ts := newSchemaEventTestSessionWithMock(ctrl)\n\t\tdefer s.Close()\n\t\ts.isInitialized = true\n\n\t\t_, err := s.TableMetadata(\"ks\", \"\")\n\t\tif !errors.Is(err, ErrNoTable) {\n\t\t\tt.Fatalf(\"TableMetadata: expected ErrNoTable, got %v\", err)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "stress_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"sync/atomic\"\n\n\t\"testing\"\n)\n\nfunc BenchmarkConnStress(b *testing.B) {\n\tconst workers = 16\n\n\tcluster := createCluster()\n\tcluster.NumConns = 1\n\tsession := createSessionFromCluster(cluster, b)\n\tdefer session.Close()\n\n\ttable := testTableName(b)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE IF NOT EXISTS %s (id int primary key)\", table)); err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tvar seed uint64\n\twriter := func(pb *testing.PB) {\n\t\tseed := atomic.AddUint64(&seed, 1)\n\t\tvar i uint64 = 0\n\t\tfor pb.Next() {\n\t\t\tif err := session.Query(fmt.Sprintf(\"insert into %s (id) values (?)\", table), i*seed).Exec(); err != nil {\n\t\t\t\tb.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n\n\tb.SetParallelism(workers)\n\tb.RunParallel(writer)\n}\n\nfunc BenchmarkConnRoutingKey(b *testing.B) {\n\tconst workers = 16\n\n\tcluster := createCluster()\n\tcluster.NumConns = 1\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())\n\tsession := createSessionFromCluster(cluster, b)\n\tdefer session.Close()\n\n\ttable := testTableName(b)\n\n\tif err := createTable(session, fmt.Sprintf(\"CREATE TABLE IF NOT EXISTS %s (id int primary key)\", table)); err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tvar seed uint64\n\twriter := func(pb *testing.PB) {\n\t\tseed := atomic.AddUint64(&seed, 1)\n\t\tvar i uint64 = 0\n\t\tquery := session.Query(fmt.Sprintf(\"insert into %s (id) values (?)\", table))\n\n\t\tfor pb.Next() {\n\t\t\tif _, err := query.Bind(i * seed).GetRoutingKey(); err != nil {\n\t\t\t\tb.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n\n\tb.SetParallelism(workers)\n\tb.RunParallel(writer)\n}\n"
  },
  {
    "path": "tablet_integration_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n)\n\n// Check if TokenAwareHostPolicy works correctly when using tablets\nfunc TestTablets(t *testing.T) {\n\tt.Parallel()\n\n\tif !isTabletsSupported() {\n\t\tt.Skip(\"Tablets are not supported by this server\")\n\t}\n\tcluster := createCluster()\n\n\tfallback := RoundRobinHostPolicy()\n\tcluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(fallback)\n\n\tsession := createSessionFromCluster(cluster, t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE %s (pk int, ck int, v int, PRIMARY KEY (pk, ck));\n\t`, table)); err != nil {\n\t\tt.Fatalf(\"unable to create table: %v\", err)\n\t}\n\n\thosts := session.hostSource.getHostsList()\n\n\thostAddresses := []string{}\n\tfor _, host := range hosts {\n\t\thostAddresses = append(hostAddresses, host.connectAddress.String())\n\t}\n\n\tctx := context.Background()\n\n\tfor i := 0; i < 5; i++ {\n\t\terr := session.Query(fmt.Sprintf(`INSERT INTO %s (pk, ck, v) VALUES (?, ?, ?);`, table), i, i%5, i%2).WithContext(ctx).Exec()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tfor i := range 5 {\n\t\tstartTime := time.Now()\n\t\ttimeout := 2 * time.Second\n\t\tbackoffDelay := 100 * time.Millisecond\n\t\tsuccess := false\n\n\t\tfor attempt := 1; time.Since(startTime) < timeout; attempt++ {\n\t\t\titer := session.Query(fmt.Sprintf(`SELECT pk, ck, v FROM %s WHERE pk = ?;`, table), i).WithContext(ctx).Consistency(One).Iter()\n\n\t\t\tpayload := iter.GetCustomPayload()\n\n\t\t\tif err := iter.Close(); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tif payload == nil || payload[\"tablets-routing-v1\"] == nil {\n\t\t\t\t// Routing is working correctly\n\t\t\t\tsuccess = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Hint received, tablet migration may be in progress\n\t\t\thint := payload[\"tablets-routing-v1\"]\n\t\t\ttablet, err := unmarshalTabletHint(hint, 4, \"\", \"\")\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to extract tablet information: %s\", err.Error())\n\t\t\t}\n\t\t\tt.Logf(\"Attempt %d: received tablet hint (replicas: %s) - tablet migration may be in progress, backing off %v\", attempt, tablet.Replicas(), backoffDelay)\n\n\t\t\t// Backoff to allow tablet migration to complete, but do not exceed the overall timeout.\n\t\t\tremaining := timeout - time.Since(startTime)\n\t\t\tif remaining <= 0 {\n\t\t\t\t// Overall timeout reached; exit the retry loop and fail after the loop.\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsleepFor := backoffDelay\n\t\t\tif sleepFor > remaining {\n\t\t\t\tsleepFor = remaining\n\t\t\t}\n\t\t\ttime.Sleep(sleepFor)\n\t\t\tbackoffDelay *= 2 // Exponential backoff\n\t\t}\n\n\t\tif !success {\n\t\t\telapsed := time.Since(startTime)\n\t\t\tt.Fatalf(\"Timed out after %v (elapsed %v) waiting for tablets to stabilize (migrations still in progress)\", timeout, elapsed)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "tablets/cow_tablet_list_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage tablets\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n)\n\nfunc testHostUUID(s string) HostUUID {\n\tvar u HostUUID\n\tcopy(u[:], s)\n\treturn u\n}\n\nfunc compareEntryRanges(entries TabletEntryList, ranges [][]int64) bool {\n\tif len(entries) != len(ranges) {\n\t\treturn false\n\t}\n\tfor i, e := range entries {\n\t\tif e.firstToken != ranges[i][0] || e.lastToken != ranges[i][1] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc TestAddTabletToPerTableList(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"Empty\", func(t *testing.T) {\n\t\ttl := TabletEntryList{}\n\t\ttl = tl.addEntry(TabletEntry{\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t})\n\n\t\ttests.AssertEqual(t, \"length\", 1, len(tl))\n\t\ttests.AssertEqual(t, \"firstToken\", int64(-100), tl[0].firstToken)\n\t\ttests.AssertEqual(t, \"lastToken\", int64(100), tl[0].lastToken)\n\t})\n\n\tt.Run(\"Beginning\", func(t *testing.T) {\n\t\ttl := TabletEntryList{{\n\t\t\tfirstToken: 100, lastToken: 200,\n\t\t}}\n\t\ttl = tl.addEntry(TabletEntry{\n\t\t\tfirstToken: -200, lastToken: -100,\n\t\t})\n\n\t\ttests.AssertEqual(t, \"length\", 2, len(tl))\n\t\ttests.AssertTrue(t, \"sorted\", compareEntryRanges(tl, [][]int64{{-200, -100}, {100, 200}}))\n\t})\n\n\tt.Run(\"End\", func(t *testing.T) {\n\t\ttl := TabletEntryList{{\n\t\t\tfirstToken: -200, lastToken: -100,\n\t\t}}\n\t\ttl = tl.addEntry(TabletEntry{\n\t\t\tfirstToken: 100, lastToken: 200,\n\t\t})\n\n\t\ttests.AssertEqual(t, \"length\", 2, len(tl))\n\t\ttests.AssertTrue(t, \"sorted\", compareEntryRanges(tl, [][]int64{{-200, -100}, {100, 200}}))\n\t})\n\n\tt.Run(\"Overlap\", func(t *testing.T) {\n\t\ttl := TabletEntryList{\n\t\t\t{firstToken: -300, lastToken: -200},\n\t\t\t{firstToken: -200, lastToken: -100},\n\t\t\t{firstToken: -100, lastToken: 0},\n\t\t\t{firstToken: 0, lastToken: 100},\n\t\t}\n\t\ttl = tl.addEntry(TabletEntry{\n\t\t\tfirstToken: -150, lastToken: 50,\n\t\t})\n\n\t\ttests.AssertTrue(t, \"overlap resolved\",\n\t\t\tcompareEntryRanges(tl, [][]int64{{-300, -200}, {-150, 50}}))\n\t})\n\n\tt.Run(\"NewTabletContainedWithinExisting\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -300, lastToken: 300},\n\t\t}\n\t\tresult := entries.addEntry(TabletEntry{firstToken: -100, lastToken: 100})\n\t\tif len(result) != 1 {\n\t\t\tt.Errorf(\"expected 1 tablet after replacement, got %d\", len(result))\n\t\t}\n\t\tif len(result) == 1 {\n\t\t\ttests.AssertEqual(t, \"tablet firstToken\", int64(-100), result[0].firstToken)\n\t\t\ttests.AssertEqual(t, \"tablet lastToken\", int64(100), result[0].lastToken)\n\t\t}\n\t})\n\n\tt.Run(\"NewTabletContainsMultiple\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -200, lastToken: -100},\n\t\t\t{firstToken: -100, lastToken: 0},\n\t\t\t{firstToken: 0, lastToken: 100},\n\t\t}\n\t\tresult := entries.addEntry(TabletEntry{firstToken: -300, lastToken: 200})\n\t\tif len(result) != 1 {\n\t\t\tt.Errorf(\"expected consolidation to 1 tablet, got %d\", len(result))\n\t\t}\n\t\tif len(result) == 1 {\n\t\t\ttests.AssertEqual(t, \"tablet firstToken\", int64(-300), result[0].firstToken)\n\t\t\ttests.AssertEqual(t, \"tablet lastToken\", int64(200), result[0].lastToken)\n\t\t}\n\t})\n\n\tt.Run(\"MultiplePartialOverlaps\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -300, lastToken: -200},\n\t\t\t{firstToken: -100, lastToken: 0},\n\t\t\t{firstToken: 100, lastToken: 200},\n\t\t}\n\t\tresult := entries.addEntry(TabletEntry{firstToken: -150, lastToken: 150})\n\t\tif len(result) != 2 {\n\t\t\tt.Errorf(\"expected 2 tablets after partial overlap, got %d\", len(result))\n\t\t}\n\t\tif len(result) == 2 {\n\t\t\ttests.AssertEqual(t, \"first tablet firstToken\", int64(-300), result[0].firstToken)\n\t\t\ttests.AssertEqual(t, \"first tablet lastToken\", int64(-200), result[0].lastToken)\n\t\t\ttests.AssertEqual(t, \"second tablet firstToken\", int64(-150), result[1].firstToken)\n\t\t\ttests.AssertEqual(t, \"second tablet lastToken\", int64(150), result[1].lastToken)\n\t\t}\n\t})\n}\n\nfunc TestBulkAddToPerTableList(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"Empty\", func(t *testing.T) {\n\t\ttl := TabletEntryList{}\n\t\tbatch := TabletEntryList{\n\t\t\t{firstToken: -200, lastToken: -100},\n\t\t\t{firstToken: -100, lastToken: 0},\n\t\t\t{firstToken: 0, lastToken: 100},\n\t\t}\n\t\ttl = tl.bulkAddEntries(batch)\n\n\t\ttests.AssertEqual(t, \"length\", 3, len(tl))\n\t\ttests.AssertTrue(t, \"ranges\", compareEntryRanges(tl, [][]int64{{-200, -100}, {-100, 0}, {0, 100}}))\n\t})\n\n\tt.Run(\"Overlap\", func(t *testing.T) {\n\t\ttl := TabletEntryList{\n\t\t\t{firstToken: -400, lastToken: -300},\n\t\t\t{firstToken: -300, lastToken: -200},\n\t\t\t{firstToken: -200, lastToken: -100},\n\t\t\t{firstToken: 100, lastToken: 200},\n\t\t}\n\t\tbatch := TabletEntryList{\n\t\t\t{firstToken: -350, lastToken: -250},\n\t\t\t{firstToken: -250, lastToken: -150},\n\t\t}\n\t\ttl = tl.bulkAddEntries(batch)\n\n\t\ttests.AssertTrue(t, \"overlap resolved\",\n\t\t\tcompareEntryRanges(tl, [][]int64{{-350, -250}, {-250, -150}, {100, 200}}))\n\t})\n\n\tt.Run(\"IntraBatchOverlappingPair\", func(t *testing.T) {\n\t\ttl := TabletEntryList{}\n\t\tbatch := TabletEntryList{\n\t\t\t{firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{testHostUUID(\"h1\"), 0}}},\n\t\t\t{firstToken: 50, lastToken: 150, replicas: []ReplicaInfo{{testHostUUID(\"h2\"), 0}}},\n\t\t}\n\t\ttl = tl.bulkAddEntries(batch)\n\n\t\ttests.AssertEqual(t, \"length\", 1, len(tl))\n\t\ttests.AssertEqual(t, \"firstToken\", int64(50), tl[0].firstToken)\n\t\ttests.AssertEqual(t, \"lastToken\", int64(150), tl[0].lastToken)\n\t\ttests.AssertEqual(t, \"host\", testHostUUID(\"h2\"), tl[0].replicas[0].hostId)\n\t})\n\n\tt.Run(\"IntraBatchOverlappingTriple\", func(t *testing.T) {\n\t\ttl := TabletEntryList{}\n\t\tbatch := TabletEntryList{\n\t\t\t{firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{testHostUUID(\"h1\"), 0}}},\n\t\t\t{firstToken: 50, lastToken: 150, replicas: []ReplicaInfo{{testHostUUID(\"h2\"), 0}}},\n\t\t\t{firstToken: 100, lastToken: 200, replicas: []ReplicaInfo{{testHostUUID(\"h3\"), 0}}},\n\t\t}\n\t\ttl = tl.bulkAddEntries(batch)\n\n\t\ttests.AssertEqual(t, \"length\", 1, len(tl))\n\t\ttests.AssertEqual(t, \"firstToken\", int64(100), tl[0].firstToken)\n\t\ttests.AssertEqual(t, \"lastToken\", int64(200), tl[0].lastToken)\n\t})\n\n\tt.Run(\"IntraBatchOverlappingWithExistingList\", func(t *testing.T) {\n\t\ttl := TabletEntryList{\n\t\t\t{firstToken: -500, lastToken: -400},\n\t\t\t{firstToken: 500, lastToken: 600},\n\t\t}\n\t\tbatch := TabletEntryList{\n\t\t\t{firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{testHostUUID(\"h1\"), 0}}},\n\t\t\t{firstToken: 50, lastToken: 150, replicas: []ReplicaInfo{{testHostUUID(\"h2\"), 0}}},\n\t\t}\n\t\ttl = tl.bulkAddEntries(batch)\n\n\t\ttests.AssertEqual(t, \"length\", 3, len(tl))\n\t\ttests.AssertEqual(t, \"existing-pre firstToken\", int64(-500), tl[0].firstToken)\n\t\ttests.AssertEqual(t, \"resolved firstToken\", int64(50), tl[1].firstToken)\n\t\ttests.AssertEqual(t, \"resolved lastToken\", int64(150), tl[1].lastToken)\n\t\ttests.AssertEqual(t, \"existing-post firstToken\", int64(500), tl[2].firstToken)\n\t})\n\n\tt.Run(\"NonOverlappingBatchStillWorks\", func(t *testing.T) {\n\t\ttl := TabletEntryList{}\n\t\tbatch := TabletEntryList{\n\t\t\t{firstToken: 0, lastToken: 100},\n\t\t\t{firstToken: 100, lastToken: 200},\n\t\t\t{firstToken: 200, lastToken: 300},\n\t\t}\n\t\ttl = tl.bulkAddEntries(batch)\n\n\t\ttests.AssertEqual(t, \"length\", 3, len(tl))\n\t\ttests.AssertTrue(t, \"ranges\", compareEntryRanges(tl, [][]int64{{0, 100}, {100, 200}, {200, 300}}))\n\t})\n\n\tt.Run(\"BatchWithGapsPreservesExisting\", func(t *testing.T) {\n\t\ttl := TabletEntryList{\n\t\t\t{firstToken: 200, lastToken: 300, replicas: []ReplicaInfo{{testHostUUID(\"existing\"), 0}}},\n\t\t}\n\t\tbatch := TabletEntryList{\n\t\t\t{firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{testHostUUID(\"h1\"), 0}}},\n\t\t\t{firstToken: 500, lastToken: 600, replicas: []ReplicaInfo{{testHostUUID(\"h2\"), 0}}},\n\t\t}\n\t\ttl = tl.bulkAddEntries(batch)\n\n\t\ttests.AssertEqual(t, \"length\", 3, len(tl))\n\t\ttests.AssertTrue(t, \"ranges\", compareEntryRanges(tl, [][]int64{{0, 100}, {200, 300}, {500, 600}}))\n\t\ttests.AssertEqual(t, \"existing host preserved\", testHostUUID(\"existing\"), tl[1].replicas[0].hostId)\n\t})\n}\n\nfunc TestCowTabletListAddAndFind(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"BasicAddAndFind\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb1\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb1\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tti, ok := cl.FindTabletForToken(\"ks1\", \"tb1\", -50)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for token -50\")\n\t\t}\n\t\ttests.AssertEqual(t, \"lastToken\", int64(0), ti.LastToken())\n\n\t\tti, ok = cl.FindTabletForToken(\"ks1\", \"tb1\", 50)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for token 50\")\n\t\t}\n\t\ttests.AssertEqual(t, \"lastToken\", int64(100), ti.LastToken())\n\n\t\t_, ok = cl.FindTabletForToken(\"ks1\", \"unknown\", 0)\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for unknown table\")\n\t\t}\n\n\t\t_, ok = cl.FindTabletForToken(\"unknown\", \"tb1\", 0)\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for unknown keyspace\")\n\t\t}\n\t})\n\n\tt.Run(\"FindReplicasUnsafeForToken\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thosts := GenerateHostUUIDs(2)\n\t\thost1 := hosts[0]\n\t\thost2 := hosts[1]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb1\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}, {host2, 1}},\n\t\t})\n\t\tcl.Flush()\n\n\t\treplicas := cl.FindReplicasUnsafeForToken(\"ks1\", \"tb1\", 0)\n\t\ttests.AssertEqual(t, \"replica count\", 2, len(replicas))\n\t\ttests.AssertEqual(t, \"replica0 host\", host1.String(), replicas[0].HostID())\n\t\ttests.AssertEqual(t, \"replica1 host\", host2.String(), replicas[1].HostID())\n\n\t\treplicas = cl.FindReplicasUnsafeForToken(\"ks1\", \"missing\", 0)\n\t\tif replicas != nil {\n\t\t\tt.Fatal(\"expected nil replicas for missing table\")\n\t\t}\n\t})\n\n\tt.Run(\"MultiTable\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb1\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb2\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 5}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tr1 := cl.FindReplicasUnsafeForToken(\"ks\", \"tb1\", 0)\n\t\ttests.AssertEqual(t, \"tb1 shard\", 0, r1[0].ShardID())\n\n\t\tr2 := cl.FindReplicasUnsafeForToken(\"ks\", \"tb2\", 0)\n\t\ttests.AssertEqual(t, \"tb2 shard\", 5, r2[0].ShardID())\n\t})\n\n\tt.Run(\"MultiKeyspace\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks2\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 2}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tr1 := cl.FindReplicasUnsafeForToken(\"ks1\", \"tb\", 0)\n\t\ttests.AssertEqual(t, \"ks1 shard\", 1, r1[0].ShardID())\n\n\t\tr2 := cl.FindReplicasUnsafeForToken(\"ks2\", \"tb\", 0)\n\t\ttests.AssertEqual(t, \"ks2 shard\", 2, r2[0].ShardID())\n\t})\n\n\tt.Run(\"OverwritesExisting\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thosts := GenerateHostUUIDs(2)\n\t\thost1 := hosts[0]\n\t\thost2 := hosts[1]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host2, 5}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tti, ok := cl.FindTabletForToken(\"ks\", \"tb\", 0)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet\")\n\t\t}\n\t\ttests.AssertEqual(t, \"updated host\", host2.String(), ti.Replicas()[0].HostID())\n\t\ttests.AssertEqual(t, \"updated shard\", 5, ti.Replicas()[0].ShardID())\n\t})\n\n\tt.Run(\"SameFirstTokenDifferentLastToken\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thosts := GenerateHostUUIDs(2)\n\t\thost1 := hosts[0]\n\t\thost2 := hosts[1]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: 0, lastToken: 200,\n\t\t\treplicas: []ReplicaInfo{{host2, 1}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tti, ok := cl.FindTabletForToken(\"ks\", \"tb\", 50)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for token 50\")\n\t\t}\n\t\ttests.AssertEqual(t, \"replaced host\", host2.String(), ti.Replicas()[0].HostID())\n\t\ttests.AssertEqual(t, \"replaced lastToken\", int64(200), ti.LastToken())\n\n\t\tti, ok = cl.FindTabletForToken(\"ks\", \"tb\", 150)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for token 150\")\n\t\t}\n\t\ttests.AssertEqual(t, \"host at 150\", host2.String(), ti.Replicas()[0].HostID())\n\t})\n}\n\nfunc TestCowTabletListBulkAdd(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"Basic\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tbatch := TabletInfoList{\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb\", firstToken: -300, lastToken: -200, replicas: []ReplicaInfo{{host1, 0}}},\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb\", firstToken: -200, lastToken: -100, replicas: []ReplicaInfo{{host1, 1}}},\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb\", firstToken: -100, lastToken: 0, replicas: []ReplicaInfo{{host1, 2}}},\n\t\t}\n\t\tcl.BulkAddTablets(batch)\n\t\tcl.Flush()\n\n\t\tti, ok := cl.FindTabletForToken(\"ks\", \"tb\", -250)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet\")\n\t\t}\n\t\ttests.AssertEqual(t, \"shard\", 0, ti.Replicas()[0].ShardID())\n\n\t\tti, ok = cl.FindTabletForToken(\"ks\", \"tb\", -150)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet\")\n\t\t}\n\t\ttests.AssertEqual(t, \"shard\", 1, ti.Replicas()[0].ShardID())\n\t})\n\n\tt.Run(\"MultiTable\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tbatch := TabletInfoList{\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb1\", firstToken: -100, lastToken: 0, replicas: []ReplicaInfo{{host1, 0}}},\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb1\", firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{host1, 1}}},\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb2\", firstToken: -50, lastToken: 50, replicas: []ReplicaInfo{{host1, 2}}},\n\t\t}\n\t\tcl.BulkAddTablets(batch)\n\t\tcl.Flush()\n\n\t\tti, ok := cl.FindTabletForToken(\"ks\", \"tb1\", -50)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for tb1 token -50\")\n\t\t}\n\t\ttests.AssertEqual(t, \"tb1 shard\", 0, ti.Replicas()[0].ShardID())\n\n\t\tti, ok = cl.FindTabletForToken(\"ks\", \"tb2\", 0)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for tb2 token 0\")\n\t\t}\n\t\ttests.AssertEqual(t, \"tb2 shard\", 2, ti.Replicas()[0].ShardID())\n\t})\n\n\tt.Run(\"SortsPerTableGroups\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tbatch := TabletInfoList{\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb1\", firstToken: 100, lastToken: 200, replicas: []ReplicaInfo{{host1, 2}}},\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb2\", firstToken: -100, lastToken: 100, replicas: []ReplicaInfo{{host1, 7}}},\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb1\", firstToken: -100, lastToken: 0, replicas: []ReplicaInfo{{host1, 0}}},\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb1\", firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{host1, 1}}},\n\t\t}\n\n\t\tcl.BulkAddTablets(batch)\n\t\tcl.Flush()\n\n\t\tti, ok := cl.FindTabletForToken(\"ks\", \"tb1\", -50)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for tb1 token -50\")\n\t\t}\n\t\ttests.AssertEqual(t, \"tb1 shard for -50\", 0, ti.Replicas()[0].ShardID())\n\n\t\tti, ok = cl.FindTabletForToken(\"ks\", \"tb1\", 50)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for tb1 token 50\")\n\t\t}\n\t\ttests.AssertEqual(t, \"tb1 shard for 50\", 1, ti.Replicas()[0].ShardID())\n\n\t\tti, ok = cl.FindTabletForToken(\"ks\", \"tb1\", 150)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for tb1 token 150\")\n\t\t}\n\t\ttests.AssertEqual(t, \"tb1 shard for 150\", 2, ti.Replicas()[0].ShardID())\n\n\t\tti, ok = cl.FindTabletForToken(\"ks\", \"tb2\", 0)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for tb2 token 0\")\n\t\t}\n\t\ttests.AssertEqual(t, \"tb2 shard\", 7, ti.Replicas()[0].ShardID())\n\t})\n\n\tt.Run(\"ZeroValueEntries\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.BulkAddTablets(TabletInfoList{\n\t\t\t{},\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb\", firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{host1, 0}}},\n\t\t\t{},\n\t\t})\n\t\tcl.Flush()\n\n\t\tresult := cl.GetTableTablets(\"ks\", \"tb\")\n\t\ttests.AssertEqual(t, \"tablet count\", 1, len(result))\n\t\ttests.AssertEqual(t, \"firstToken\", int64(0), result[0].FirstToken())\n\t\ttests.AssertEqual(t, \"lastToken\", int64(100), result[0].LastToken())\n\t})\n\n\tt.Run(\"EmptyIdentifiers\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.BulkAddTablets(TabletInfoList{\n\t\t\t{keyspaceName: \"\", tableName: \"tb\", firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{host1, 0}}},\n\t\t\t{keyspaceName: \"ks\", tableName: \"\", firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{host1, 1}}},\n\t\t\t{keyspaceName: \"\", tableName: \"\", firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{host1, 2}}},\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb\", firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{host1, 3}}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tresult := cl.GetTableTablets(\"ks\", \"tb\")\n\t\ttests.AssertEqual(t, \"valid tablet count\", 1, len(result))\n\t\ttests.AssertEqual(t, \"shard\", 3, result[0].Replicas()[0].ShardID())\n\n\t\ttests.AssertEqual(t, \"empty-ks phantom\", 0, len(cl.GetTableTablets(\"\", \"tb\")))\n\t\ttests.AssertEqual(t, \"empty-table phantom\", 0, len(cl.GetTableTablets(\"ks\", \"\")))\n\t\ttests.AssertEqual(t, \"both-empty phantom\", 0, len(cl.GetTableTablets(\"\", \"\")))\n\t})\n\n\tt.Run(\"IntraBatchOverlap\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\n\t\tbatch := TabletInfoList{\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb\", firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{testHostUUID(\"h1\"), 0}}},\n\t\t\t{keyspaceName: \"ks\", tableName: \"tb\", firstToken: 50, lastToken: 150, replicas: []ReplicaInfo{{testHostUUID(\"h2\"), 1}}},\n\t\t}\n\t\tcl.BulkAddTablets(batch)\n\t\tcl.Flush()\n\n\t\tresult := cl.GetTableTablets(\"ks\", \"tb\")\n\t\ttests.AssertEqual(t, \"tablet count\", 1, len(result))\n\t\ttests.AssertEqual(t, \"firstToken\", int64(50), result[0].FirstToken())\n\t\ttests.AssertEqual(t, \"lastToken\", int64(150), result[0].LastToken())\n\t\ttests.AssertEqual(t, \"shard\", 1, result[0].Replicas()[0].ShardID())\n\t})\n}\n\nfunc TestCowTabletListGet(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"AllTablets\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb1\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb2\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks2\", tableName: \"tb1\",\n\t\t\tfirstToken: 100, lastToken: 200,\n\t\t\treplicas: []ReplicaInfo{{host1, 2}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tflat := cl.Get()\n\t\ttests.AssertEqual(t, \"total tablets\", 3, len(flat))\n\n\t\tsort.Slice(flat, func(i, j int) bool {\n\t\t\treturn flat[i].FirstToken() < flat[j].FirstToken()\n\t\t})\n\t\ttests.AssertEqual(t, \"first\", int64(-100), flat[0].FirstToken())\n\t\ttests.AssertEqual(t, \"second\", int64(0), flat[1].FirstToken())\n\t\ttests.AssertEqual(t, \"third\", int64(100), flat[2].FirstToken())\n\t})\n\n\tt.Run(\"Empty\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\tflat := cl.Get()\n\t\ttests.AssertEqual(t, \"empty list length\", 0, len(flat))\n\t})\n}\n\nfunc TestCowTabletListGetTableTablets(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"MultipleTablesAndKeyspaces\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb1\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb1\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb2\",\n\t\t\tfirstToken: 100, lastToken: 200,\n\t\t\treplicas: []ReplicaInfo{{host1, 2}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks2\", tableName: \"tb1\",\n\t\t\tfirstToken: 200, lastToken: 300,\n\t\t\treplicas: []ReplicaInfo{{host1, 3}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tresult := cl.GetTableTablets(\"ks1\", \"tb1\")\n\t\ttests.AssertEqual(t, \"ks1.tb1 count\", 2, len(result))\n\t\ttests.AssertEqual(t, \"ks1.tb1 first token\", int64(-100), result[0].FirstToken())\n\t\ttests.AssertEqual(t, \"ks1.tb1 second token\", int64(0), result[1].FirstToken())\n\n\t\tresult = cl.GetTableTablets(\"ks1\", \"tb2\")\n\t\ttests.AssertEqual(t, \"ks1.tb2 count\", 1, len(result))\n\t\ttests.AssertEqual(t, \"ks1.tb2 first token\", int64(100), result[0].FirstToken())\n\n\t\tresult = cl.GetTableTablets(\"ks2\", \"tb1\")\n\t\ttests.AssertEqual(t, \"ks2.tb1 count\", 1, len(result))\n\t\ttests.AssertEqual(t, \"ks2.tb1 first token\", int64(200), result[0].FirstToken())\n\t})\n\n\tt.Run(\"NonExistent\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb1\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tresult := cl.GetTableTablets(\"no_such_ks\", \"tb1\")\n\t\tif result != nil {\n\t\t\tt.Fatalf(\"expected nil for non-existent keyspace, got %d tablets\", len(result))\n\t\t}\n\n\t\tresult = cl.GetTableTablets(\"ks1\", \"no_such_tb\")\n\t\tif result != nil {\n\t\t\tt.Fatalf(\"expected nil for non-existent table, got %d tablets\", len(result))\n\t\t}\n\t})\n\n\tt.Run(\"ReturnsCopy\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tresult1 := cl.GetTableTablets(\"ks\", \"tb\")\n\t\tresult2 := cl.GetTableTablets(\"ks\", \"tb\")\n\n\t\tresult1[0] = TabletEntry{}\n\t\tif result2[0].FirstToken() != -100 {\n\t\t\tt.Fatal(\"GetTableTablets should return independent copies\")\n\t\t}\n\t})\n\n\tt.Run(\"Empty\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\n\t\tresult := cl.GetTableTablets(\"ks\", \"tb\")\n\t\tif result != nil {\n\t\t\tt.Fatalf(\"expected nil for empty list, got %d tablets\", len(result))\n\t\t}\n\t})\n}\n\nfunc TestCowTabletListRemove(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"WithHost\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thosts := GenerateHostUUIDs(2)\n\t\tremovedHost := hosts[0]\n\t\tkeptHost := hosts[1]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb1\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{removedHost, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb1\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{keptHost, 1}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb2\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{removedHost, 2}},\n\t\t})\n\t\tcl.RemoveTabletsWithHost(removedHost)\n\t\tcl.Flush()\n\n\t\tti, ok := cl.FindTabletForToken(\"ks\", \"tb1\", 50)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected kept tablet in tb1\")\n\t\t}\n\t\ttests.AssertEqual(t, \"kept shard\", 1, ti.Replicas()[0].ShardID())\n\n\t\tflat := cl.Get()\n\t\tfor _, tab := range flat {\n\t\t\tfor _, r := range tab.Replicas() {\n\t\t\t\tif r.HostUUIDValue() == removedHost {\n\t\t\t\t\tt.Fatalf(\"found removed host in tablet %v\", tab)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"WithKeyspace\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"removed_ks\", tableName: \"tb1\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"removed_ks\", tableName: \"tb2\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"kept_ks\", tableName: \"tb1\",\n\t\t\tfirstToken: 100, lastToken: 200,\n\t\t\treplicas: []ReplicaInfo{{host1, 2}},\n\t\t})\n\t\tcl.RemoveTabletsWithKeyspace(\"removed_ks\")\n\t\tcl.Flush()\n\n\t\t_, ok := cl.FindTabletForToken(\"removed_ks\", \"tb1\", -50)\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for removed keyspace table tb1\")\n\t\t}\n\t\t_, ok = cl.FindTabletForToken(\"removed_ks\", \"tb2\", 50)\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for removed keyspace table tb2\")\n\t\t}\n\n\t\tti, ok := cl.FindTabletForToken(\"kept_ks\", \"tb1\", 150)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for kept keyspace\")\n\t\t}\n\t\ttests.AssertEqual(t, \"kept shard\", 2, ti.Replicas()[0].ShardID())\n\n\t\ttests.AssertEqual(t, \"total tablets\", 1, len(cl.Get()))\n\t})\n\n\tt.Run(\"WithTable\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"removed_tb\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"kept_tb\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.RemoveTabletsWithTable(\"ks\", \"removed_tb\")\n\t\tcl.Flush()\n\n\t\t_, ok := cl.FindTabletForToken(\"ks\", \"removed_tb\", -50)\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for removed table\")\n\t\t}\n\n\t\tti, ok := cl.FindTabletForToken(\"ks\", \"kept_tb\", 50)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for kept table\")\n\t\t}\n\t\ttests.AssertEqual(t, \"kept shard\", 1, ti.Replicas()[0].ShardID())\n\t})\n\n\tt.Run(\"Nonexistent\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.RemoveTabletsWithKeyspace(\"nonexistent\")\n\t\tcl.RemoveTabletsWithTable(\"ks\", \"nonexistent\")\n\t\tcl.RemoveTabletsWithHost(testHostUUID(\"nonexistent-host\"))\n\t\tcl.Flush()\n\n\t\ttests.AssertEqual(t, \"still has tablet\", 1, len(cl.Get()))\n\t})\n}\n\nfunc TestCowTabletListForEach(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"VisitsAllTables\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb1\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks1\", tableName: \"tb2\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks2\", tableName: \"tb1\",\n\t\t\tfirstToken: 100, lastToken: 200,\n\t\t\treplicas: []ReplicaInfo{{host1, 2}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tvisited := make(map[string]int) // \"keyspace.table\" -> entry count\n\t\tcl.ForEach(func(keyspace, table string, entries TabletEntryList) bool {\n\t\t\tvisited[keyspace+\".\"+table] = len(entries)\n\t\t\treturn true\n\t\t})\n\n\t\ttests.AssertEqual(t, \"visited count\", 3, len(visited))\n\t\ttests.AssertEqual(t, \"ks1.tb1 entries\", 1, visited[\"ks1.tb1\"])\n\t\ttests.AssertEqual(t, \"ks1.tb2 entries\", 1, visited[\"ks1.tb2\"])\n\t\ttests.AssertEqual(t, \"ks2.tb1 entries\", 1, visited[\"ks2.tb1\"])\n\t})\n\n\tt.Run(\"StopsEarly\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tcl.AddTablet(TabletInfo{\n\t\t\t\tkeyspaceName: \"ks\", tableName: fmt.Sprintf(\"tb%d\", i),\n\t\t\t\tfirstToken: int64(i * 100), lastToken: int64(i*100 + 99),\n\t\t\t\treplicas: []ReplicaInfo{{host1, i}},\n\t\t\t})\n\t\t}\n\t\tcl.Flush()\n\n\t\tcount := 0\n\t\tcl.ForEach(func(keyspace, table string, entries TabletEntryList) bool {\n\t\t\tcount++\n\t\t\treturn count < 3 // stop after visiting 3 tables\n\t\t})\n\n\t\ttests.AssertEqual(t, \"stopped after 3\", 3, count)\n\t})\n\n\tt.Run(\"Empty\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\n\t\tcount := 0\n\t\tcl.ForEach(func(keyspace, table string, entries TabletEntryList) bool {\n\t\t\tcount++\n\t\t\treturn true\n\t\t})\n\n\t\ttests.AssertEqual(t, \"empty iteration\", 0, count)\n\t})\n\n\tt.Run(\"NilReceiver\", func(t *testing.T) {\n\t\tvar cl *CowTabletList\n\n\t\tcl.ForEach(func(keyspace, table string, entries TabletEntryList) bool {\n\t\t\tt.Fatal(\"callback should not be called on nil receiver\")\n\t\t\treturn true\n\t\t})\n\t})\n\n\tt.Run(\"MutationDoesNotCorruptState\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tcl.ForEach(func(keyspace, table string, entries TabletEntryList) bool {\n\t\t\tfor i, j := 0, len(entries)-1; i < j; i, j = i+1, j-1 {\n\t\t\t\tentries[i], entries[j] = entries[j], entries[i]\n\t\t\t}\n\t\t\tentries[0] = TabletEntry{}\n\t\t\treturn true\n\t\t})\n\n\t\tentry, ok := cl.FindTabletForToken(\"ks\", \"tb\", 50)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected to find tablet for token 50 after ForEach mutation\")\n\t\t}\n\t\ttests.AssertEqual(t, \"firstToken\", int64(0), entry.FirstToken())\n\t\ttests.AssertEqual(t, \"lastToken\", int64(100), entry.LastToken())\n\n\t\tentry, ok = cl.FindTabletForToken(\"ks\", \"tb\", -50)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected to find tablet for token -50 after ForEach mutation\")\n\t\t}\n\t\ttests.AssertEqual(t, \"firstToken\", int64(-100), entry.FirstToken())\n\t\ttests.AssertEqual(t, \"lastToken\", int64(0), entry.LastToken())\n\t})\n\n\tt.Run(\"EntriesAreReadable\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: 0, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tcl.ForEach(func(keyspace, table string, entries TabletEntryList) bool {\n\t\t\ttests.AssertEqual(t, \"keyspace\", \"ks\", keyspace)\n\t\t\ttests.AssertEqual(t, \"table\", \"tb\", table)\n\t\t\ttests.AssertEqual(t, \"entry count\", 2, len(entries))\n\t\t\ttests.AssertEqual(t, \"first entry firstToken\", int64(-100), entries[0].FirstToken())\n\t\t\ttests.AssertEqual(t, \"first entry lastToken\", int64(0), entries[0].LastToken())\n\t\t\ttests.AssertEqual(t, \"second entry firstToken\", int64(0), entries[1].FirstToken())\n\t\t\ttests.AssertEqual(t, \"second entry lastToken\", int64(100), entries[1].LastToken())\n\t\t\treturn true\n\t\t})\n\t})\n\n\tt.Run(\"NilCallback\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{testHostUUID(\"host1\"), 0}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tcl.ForEach(nil)\n\t})\n}\n\nfunc TestCowTabletListLifecycle(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"CloseIdempotent\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tcl.Close()\n\n\t\tdone := make(chan struct{})\n\t\tgo func() {\n\t\t\tdefer close(done)\n\t\t\tcl.Close()\n\t\t}()\n\n\t\tselect {\n\t\tcase <-done:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatal(\"second Close call did not return\")\n\t\t}\n\t})\n\n\tt.Run(\"FlushAfterClose\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tcl.Close()\n\n\t\tdone := make(chan struct{})\n\t\tgo func() {\n\t\t\tdefer close(done)\n\t\t\tcl.Flush()\n\t\t}()\n\n\t\tselect {\n\t\tcase <-done:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatal(\"Flush after Close did not return\")\n\t\t}\n\t})\n\n\tt.Run(\"AddAfterCloseNoop\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tcl.Close()\n\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\t\tdone := make(chan struct{})\n\t\tgo func() {\n\t\t\tdefer close(done)\n\t\t\tcl.AddTablet(TabletInfo{\n\t\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t\t})\n\t\t}()\n\n\t\tselect {\n\t\tcase <-done:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatal(\"AddTablet after Close did not return\")\n\t\t}\n\n\t\ttests.AssertEqual(t, \"tablet count\", 0, len(cl.Get()))\n\t})\n\n\tt.Run(\"NilReceiver\", func(t *testing.T) {\n\t\tvar cl *CowTabletList\n\n\t\tcl.Close()\n\t\tcl.Flush()\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{testHostUUID(\"host\"), 0}},\n\t\t})\n\t\tcl.BulkAddTablets(TabletInfoList{{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: -100, lastToken: 100,\n\t\t\treplicas: []ReplicaInfo{{testHostUUID(\"host\"), 0}},\n\t\t}})\n\t\tcl.RemoveTabletsWithHost(testHostUUID(\"host\"))\n\t\tcl.RemoveTabletsWithKeyspace(\"ks\")\n\t\tcl.RemoveTabletsWithTable(\"ks\", \"tb\")\n\t})\n}\n\nfunc TestOpQueueRun(t *testing.T) {\n\tt.Parallel()\n\n\tnewTablet := func(first, last int64) TabletInfo {\n\t\treturn TabletInfo{\n\t\t\tkeyspaceName: \"ks\",\n\t\t\ttableName:    \"tb\",\n\t\t\tfirstToken:   first,\n\t\t\tlastToken:    last,\n\t\t}\n\t}\n\n\trunQueue := func(send func(q *opQueue), wantTypes []string, validate func(t *testing.T, processed []tabletOp)) {\n\t\tt.Helper()\n\n\t\tq := newOpQueue()\n\t\tvar mu sync.Mutex\n\t\tprocessed := make([]tabletOp, 0, len(wantTypes))\n\t\tgo q.run(func(op tabletOp) {\n\t\t\tmu.Lock()\n\t\t\tprocessed = append(processed, op)\n\t\t\tmu.Unlock()\n\t\t\tif flush, ok := op.(opFlush); ok {\n\t\t\t\tclose(flush.done)\n\t\t\t}\n\t\t})\n\n\t\tsend(q)\n\t\tq.close()\n\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\tif len(processed) != len(wantTypes) {\n\t\t\tt.Fatalf(\"processed %d ops, want %d\", len(processed), len(wantTypes))\n\t\t}\n\n\t\tfor i, op := range processed {\n\t\t\tswitch wantTypes[i] {\n\t\t\tcase \"bulk\":\n\t\t\t\tif _, ok := op.(opBulkAddTablets); !ok {\n\t\t\t\t\tt.Fatalf(\"processed[%d] = %T, want opBulkAddTablets\", i, op)\n\t\t\t\t}\n\t\t\tcase \"flush\":\n\t\t\t\tif _, ok := op.(opFlush); !ok {\n\t\t\t\t\tt.Fatalf(\"processed[%d] = %T, want opFlush\", i, op)\n\t\t\t\t}\n\t\t\tcase \"removeHost\":\n\t\t\t\tif _, ok := op.(opRemoveHost); !ok {\n\t\t\t\t\tt.Fatalf(\"processed[%d] = %T, want opRemoveHost\", i, op)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tt.Fatalf(\"unsupported expected type %q\", wantTypes[i])\n\t\t\t}\n\t\t}\n\n\t\tif validate != nil {\n\t\t\tvalidate(t, processed)\n\t\t}\n\t}\n\n\tt.Run(\"CoalescesBufferedAddOps\", func(t *testing.T) {\n\t\trunQueue(func(q *opQueue) {\n\t\t\tq.send(opAddTablet{tablet: newTablet(0, 99)})\n\t\t\tq.send(opAddTablet{tablet: newTablet(100, 199)})\n\t\t\tq.send(opAddTablet{tablet: newTablet(200, 299)})\n\t\t\tq.flush()\n\t\t}, []string{\"bulk\", \"flush\"}, func(t *testing.T, processed []tabletOp) {\n\t\t\tbulk := processed[0].(opBulkAddTablets)\n\t\t\tif len(bulk.tablets) != 3 {\n\t\t\t\tt.Fatalf(\"coalesced %d tablets, want 3\", len(bulk.tablets))\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"DoesNotCrossFlushOrNonAddBoundaries\", func(t *testing.T) {\n\t\trunQueue(func(q *opQueue) {\n\t\t\tflushDone := make(chan struct{})\n\t\t\tq.send(opAddTablet{tablet: newTablet(0, 99)})\n\t\t\tq.send(opFlush{done: flushDone})\n\t\t\tq.send(opRemoveHost{hostID: testHostUUID(\"host-1\")})\n\t\t\tq.send(opAddTablet{tablet: newTablet(100, 199)})\n\t\t\t<-flushDone\n\t\t}, []string{\"bulk\", \"flush\", \"removeHost\", \"bulk\"}, func(t *testing.T, processed []tabletOp) {\n\t\t\tfirst := processed[0].(opBulkAddTablets)\n\t\t\tsecond := processed[3].(opBulkAddTablets)\n\t\t\tif len(first.tablets) != 1 || len(second.tablets) != 1 {\n\t\t\t\tt.Fatalf(\"unexpected coalesced batch sizes: %d and %d\", len(first.tablets), len(second.tablets))\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestCowTabletListConcurrency(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"ConcurrentReads\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thosts := GenerateHostUUIDs(3)\n\t\tcl.BulkAddTablets(createTablets(\"ks\", \"tb\", hosts, 2, 100, 100))\n\t\tcl.Flush()\n\n\t\tvar wg sync.WaitGroup\n\t\tfor i := 0; i < 20; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\trnd := getThreadSafeRnd()\n\t\t\t\tfor j := 0; j < 1000; j++ {\n\t\t\t\t\ttoken := rnd.Int63()\n\t\t\t\t\t_, _ = cl.FindTabletForToken(\"ks\", \"tb\", token)\n\t\t\t\t\tcl.FindReplicasUnsafeForToken(\"ks\", \"tb\", token)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\t})\n\n\tt.Run(\"ConcurrentReadWrite\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thosts := GenerateHostUUIDs(3)\n\t\tcl.BulkAddTablets(createTablets(\"ks\", \"tb\", hosts, 2, 100, 100))\n\t\tcl.Flush()\n\n\t\tvar wg sync.WaitGroup\n\t\tfor i := 0; i < 10; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\trnd := getThreadSafeRnd()\n\t\t\t\tfor j := 0; j < 1000; j++ {\n\t\t\t\t\t_, _ = cl.FindTabletForToken(\"ks\", \"tb\", rnd.Int63())\n\t\t\t\t\tcl.Get()\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\trepGen := NewReplicaSetGenerator(hosts, 2)\n\t\tfor i := 0; i < 5; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\trnd := getThreadSafeRnd()\n\t\t\t\tfor j := 0; j < 100; j++ {\n\t\t\t\t\ttoken := rnd.Int63()\n\t\t\t\t\tcl.AddTablet(TabletInfo{\n\t\t\t\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\t\t\t\tfirstToken: token - 100, lastToken: token,\n\t\t\t\t\t\treplicas: repGen.Next(),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\t})\n\n\tt.Run(\"ConcurrentMultiTableReadWrite\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thosts := GenerateHostUUIDs(6)\n\t\ttables := []string{\"tb1\", \"tb2\", \"tb3\", \"tb4\", \"tb5\"}\n\n\t\tfor _, tb := range tables {\n\t\t\tcl.BulkAddTablets(createTablets(\"ks\", tb, hosts, 3, 50, 50))\n\t\t}\n\t\tcl.Flush()\n\n\t\tvar wg sync.WaitGroup\n\n\t\tfor i := 0; i < 10; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\trnd := getThreadSafeRnd()\n\t\t\t\ttb := tables[idx%len(tables)]\n\t\t\t\tfor j := 0; j < 500; j++ {\n\t\t\t\t\t_, _ = cl.FindTabletForToken(\"ks\", tb, rnd.Int63())\n\t\t\t\t}\n\t\t\t}(i)\n\t\t}\n\n\t\trepGen := NewReplicaSetGenerator(hosts, 3)\n\t\tfor i := 0; i < 5; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\trnd := getThreadSafeRnd()\n\t\t\t\ttb := tables[idx%len(tables)]\n\t\t\t\tfor j := 0; j < 50; j++ {\n\t\t\t\t\ttoken := rnd.Int63()\n\t\t\t\t\tcl.AddTablet(TabletInfo{\n\t\t\t\t\t\tkeyspaceName: \"ks\", tableName: tb,\n\t\t\t\t\t\tfirstToken: token - 100, lastToken: token,\n\t\t\t\t\t\treplicas: repGen.Next(),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}(i)\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor i := 0; i < 5; i++ {\n\t\t\t\tcl.RemoveTabletsWithHost(hosts[i])\n\t\t\t}\n\t\t}()\n\n\t\twg.Wait()\n\t})\n\n\tt.Run(\"ConcurrentRemoveKeyspace\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thosts := GenerateHostUUIDs(3)\n\n\t\tcl.BulkAddTablets(createTablets(\"ks1\", \"tb\", hosts, 2, 50, 50))\n\t\tcl.BulkAddTablets(createTablets(\"ks2\", \"tb\", hosts, 2, 50, 50))\n\t\tcl.Flush()\n\n\t\tvar wg sync.WaitGroup\n\n\t\tfor i := 0; i < 10; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\trnd := getThreadSafeRnd()\n\t\t\t\tfor j := 0; j < 500; j++ {\n\t\t\t\t\t_, _ = cl.FindTabletForToken(\"ks1\", \"tb\", rnd.Int63())\n\t\t\t\t\t_, _ = cl.FindTabletForToken(\"ks2\", \"tb\", rnd.Int63())\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tcl.RemoveTabletsWithKeyspace(\"ks2\")\n\t\t}()\n\n\t\twg.Wait()\n\t\tcl.Flush()\n\n\t\t_, ok := cl.FindTabletForToken(\"ks2\", \"tb\", 0)\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for removed keyspace\")\n\t\t}\n\n\t\t_, ok = cl.FindTabletForToken(\"ks1\", \"tb\", 0)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for ks1\")\n\t\t}\n\t})\n\n\tt.Run(\"ConcurrentRemovalOperations\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\n\t\thostUUIDs := GenerateHostUUIDs(9) // 3 keyspaces * 3 hosts\n\t\thostIdx := 0\n\t\tfor ks := 0; ks < 3; ks++ {\n\t\t\tfor host := 0; host < 3; host++ {\n\t\t\t\thostID := hostUUIDs[hostIdx]\n\t\t\t\thostIdx++\n\t\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\t\tcl.AddTablet(TabletInfo{\n\t\t\t\t\t\tkeyspaceName: fmt.Sprintf(\"ks%d\", ks),\n\t\t\t\t\t\ttableName:    \"tb\",\n\t\t\t\t\t\tfirstToken:   int64(ks*10000 + host*1000 + i*100),\n\t\t\t\t\t\tlastToken:    int64(ks*10000 + host*1000 + i*100 + 99),\n\t\t\t\t\t\treplicas:     []ReplicaInfo{{hostID, 0}},\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tcl.Flush()\n\n\t\tallTablets := cl.Get()\n\t\tvar host0, host1 HostUUID\n\t\tvar host0Set, host1Set bool\n\t\thostCount := make(map[HostUUID]int)\n\t\tfor _, tablet := range allTablets {\n\t\t\tfor _, replica := range tablet.Replicas() {\n\t\t\t\thostID := replica.HostUUIDValue()\n\t\t\t\thostCount[hostID]++\n\t\t\t\tif !host0Set {\n\t\t\t\t\thost0 = hostID\n\t\t\t\t\thost0Set = true\n\t\t\t\t} else if !host1Set && hostID != host0 {\n\t\t\t\t\thost1 = hostID\n\t\t\t\t\thost1Set = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_ = host1 // host1 is used below implicitly via hostCount\n\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(3)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tcl.RemoveTabletsWithHost(host0)\n\t\t}()\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tcl.RemoveTabletsWithKeyspace(\"ks1\")\n\t\t}()\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tcl.RemoveTabletsWithTable(\"ks2\", \"tb\")\n\t\t}()\n\n\t\twg.Wait()\n\t\tcl.Flush()\n\n\t\tremaining := cl.Get()\n\t\tt.Logf(\"Remaining tablets after concurrent removals: %d\", len(remaining))\n\n\t\tfor _, tablet := range remaining {\n\t\t\tfor _, replica := range tablet.Replicas() {\n\t\t\t\tif replica.HostUUIDValue() == host0 {\n\t\t\t\t\tt.Errorf(\"found tablet with removed host %s\", host0.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, tablet := range remaining {\n\t\t\tif tablet.KeyspaceName() == \"ks1\" || tablet.KeyspaceName() == \"ks2\" {\n\t\t\t\tt.Errorf(\"found tablet in removed keyspace: %s\", tablet.KeyspaceName())\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"CloseRace\", func(t *testing.T) {\n\t\tlist := NewCowTabletList()\n\n\t\ttablet := TabletInfo{\n\t\t\tkeyspaceName: \"ks\",\n\t\t\ttableName:    \"tbl\",\n\t\t\tfirstToken:   -100,\n\t\t\tlastToken:    100,\n\t\t\treplicas:     []ReplicaInfo{{testHostUUID(\"host1\"), 0}},\n\t\t}\n\t\tlist.BulkAddTablets(TabletInfoList{tablet})\n\t\tlist.Flush()\n\n\t\tready := make(chan struct{})\n\t\tdone := make(chan bool)\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tgo func() {\n\t\t\t\tdefer func() { done <- true }()\n\t\t\t\tready <- struct{}{}\n\t\t\t\tfor j := 0; j < 1000; j++ {\n\t\t\t\t\t_, _ = list.FindTabletForToken(\"ks\", \"tbl\", 50)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\tfor i := 0; i < 10; i++ {\n\t\t\t<-ready\n\t\t}\n\n\t\tlist.Close()\n\n\t\tfor i := 0; i < 10; i++ {\n\t\t\t<-done\n\t\t}\n\t})\n\n\tt.Run(\"FlushCloseRace\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\n\t\tuuids := GenerateHostUUIDs(100)\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tcl.AddTablet(TabletInfo{\n\t\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\t\tfirstToken: int64(i * 100), lastToken: int64(i*100 + 99),\n\t\t\t\treplicas: []ReplicaInfo{{uuids[i], 0}},\n\t\t\t})\n\t\t}\n\n\t\tvar wg sync.WaitGroup\n\t\tconst flushers = 10\n\n\t\tfor i := 0; i < flushers; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tcl.Flush()\n\t\t\t}()\n\t\t}\n\n\t\tcl.Close()\n\n\t\tdone := make(chan struct{})\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(done)\n\t\t}()\n\n\t\tselect {\n\t\tcase <-done:\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tt.Fatal(\"concurrent Flush + Close caused deadlock\")\n\t\t}\n\t})\n}\n\nfunc TestCowTabletListEdgeCases(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"ExtremeTokenValues\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\t\thost1 := GenerateHostUUIDs(1)[0]\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: math.MinInt64, lastToken: 0,\n\t\t\treplicas: []ReplicaInfo{{host1, 0}},\n\t\t})\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: 0, lastToken: math.MaxInt64,\n\t\t\treplicas: []ReplicaInfo{{host1, 1}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tti, ok := cl.FindTabletForToken(\"ks\", \"tb\", math.MinInt64)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for math.MinInt64\")\n\t\t}\n\t\ttests.AssertEqual(t, \"MinInt64 shard\", 0, ti.Replicas()[0].ShardID())\n\n\t\tti, ok = cl.FindTabletForToken(\"ks\", \"tb\", math.MaxInt64)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for math.MaxInt64\")\n\t\t}\n\t\ttests.AssertEqual(t, \"MaxInt64 shard\", 1, ti.Replicas()[0].ShardID())\n\n\t\t_, ok = cl.FindTabletForToken(\"ks\", \"tb\", 0)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet for token 0\")\n\t\t}\n\t})\n\n\tt.Run(\"EmptyReplicaSet\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\",\n\t\t\ttableName:    \"tb\",\n\t\t\tfirstToken:   -100,\n\t\t\tlastToken:    100,\n\t\t\treplicas:     []ReplicaInfo{},\n\t\t})\n\t\tcl.Flush()\n\n\t\treplicas := cl.FindReplicasForToken(\"ks\", \"tb\", 0)\n\t\tif replicas == nil {\n\t\t\tt.Log(\"FindReplicasForToken returned nil for tablet with no replicas\")\n\t\t} else if len(replicas) != 0 {\n\t\t\tt.Errorf(\"expected empty replica set, got %d replicas\", len(replicas))\n\t\t}\n\n\t\ttablet, ok := cl.FindTabletForToken(\"ks\", \"tb\", 0)\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected tablet to exist\")\n\t\t}\n\t\tif len(tablet.ReplicasUnsafe()) != 0 {\n\t\t\tt.Errorf(\"expected empty replica list, got %d replicas\", len(tablet.ReplicasUnsafe()))\n\t\t}\n\t})\n\n\tt.Run(\"TabletInfoBuilderInvalidRange\", func(t *testing.T) {\n\t\thostID := GenerateHostUUIDs(1)[0]\n\t\tbuilder := TabletInfoBuilder{\n\t\t\tKeyspaceName: \"ks\",\n\t\t\tTableName:    \"tb\",\n\t\t\tFirstToken:   100,\n\t\t\tLastToken:    -100,\n\t\t\tReplicas:     [][]any{{hostID.String(), 0}},\n\t\t}\n\t\t_, err := builder.Build()\n\t\tif err == nil {\n\t\t\tt.Fatal(\"expected error for inverted token range\")\n\t\t}\n\t\tt.Logf(\"Got expected error: %v\", err)\n\t})\n\n\tt.Run(\"QueueSaturation\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\n\t\tconst operations = 10000\n\t\tdone := make(chan bool)\n\n\t\tuuids := GenerateHostUUIDs(operations)\n\t\tgo func() {\n\t\t\tfor i := 0; i < operations; i++ {\n\t\t\t\ttablet := TabletInfo{\n\t\t\t\t\tkeyspaceName: \"ks\",\n\t\t\t\t\ttableName:    \"tb\",\n\t\t\t\t\tfirstToken:   int64(i * 100),\n\t\t\t\t\tlastToken:    int64(i*100 + 99),\n\t\t\t\t\treplicas:     []ReplicaInfo{{uuids[i], 0}},\n\t\t\t\t}\n\t\t\t\tcl.AddTablet(tablet)\n\t\t\t}\n\t\t\tdone <- true\n\t\t}()\n\n\t\tselect {\n\t\tcase <-done:\n\t\t\tcl.Flush()\n\t\t\ttablets := cl.GetTableTablets(\"ks\", \"tb\")\n\t\t\ttests.AssertEqual(t, \"tablet count\", operations, len(tablets))\n\t\tcase <-time.After(10 * time.Second):\n\t\t\tt.Fatal(\"queue saturation caused deadlock\")\n\t\t}\n\t})\n\n\tt.Run(\"QueueSaturationReadsConsistent\", func(t *testing.T) {\n\t\tcl := NewCowTabletList()\n\t\tdefer cl.Close()\n\n\t\tcl.AddTablet(TabletInfo{\n\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\tfirstToken: -1000, lastToken: -900,\n\t\t\treplicas: []ReplicaInfo{{GenerateHostUUIDs(1)[0], 0}},\n\t\t})\n\t\tcl.Flush()\n\n\t\tconst writers = 10000\n\t\tvar wg sync.WaitGroup\n\n\t\twriterUUIDs := GenerateHostUUIDs(writers)\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor i := 0; i < writers; i++ {\n\t\t\t\tcl.AddTablet(TabletInfo{\n\t\t\t\t\tkeyspaceName: \"ks\", tableName: \"tb\",\n\t\t\t\t\tfirstToken: int64(i * 100), lastToken: int64(i*100 + 99),\n\t\t\t\t\treplicas: []ReplicaInfo{{writerUUIDs[i], 0}},\n\t\t\t\t})\n\t\t\t}\n\t\t}()\n\n\t\tconst readers = 5\n\t\tfor r := 0; r < readers; r++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfor i := 0; i < 1000; i++ {\n\t\t\t\t\tentry, ok := cl.FindTabletForToken(\"ks\", \"tb\", -950)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tif entry.FirstToken() > entry.LastToken() {\n\t\t\t\t\t\t\tt.Errorf(\"invalid token range: first=%d > last=%d\", entry.FirstToken(), entry.LastToken())\n\t\t\t\t\t\t}\n\t\t\t\t\t\treplicas := entry.Replicas()\n\t\t\t\t\t\tif len(replicas) == 0 {\n\t\t\t\t\t\t\tt.Error(\"expected at least one replica\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tentries := cl.GetTableTablets(\"ks\", \"tb\")\n\t\t\t\t\tfor _, e := range entries {\n\t\t\t\t\t\tif e.FirstToken() > e.LastToken() {\n\t\t\t\t\t\t\tt.Errorf(\"invalid token range in list: first=%d > last=%d\", e.FirstToken(), e.LastToken())\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\twg.Wait()\n\t})\n\n}\n"
  },
  {
    "path": "tablets/tabets_utils_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage tablets\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\nfunc TestCreateTablets(t *testing.T) {\n\tt.Run(\"BasicDistribution\", func(t *testing.T) {\n\t\thosts := GenerateHostUUIDs(3)\n\t\ttl := createTablets(\"ks\", \"tbl\", hosts, 2, 6, 6)\n\t\tif len(tl) != 6 {\n\t\t\tt.Errorf(\"expected 6 tablets, got %d\", len(tl))\n\t\t}\n\n\t\tfor _, tablet := range tl {\n\t\t\tif len(tablet.replicas) != 2 {\n\t\t\t\tt.Errorf(\"each tablet should have 2 replicas, got %d\", len(tablet.replicas))\n\t\t\t}\n\t\t\treplicaSet := map[HostUUID]bool{}\n\t\t\tfor _, r := range tablet.replicas {\n\t\t\t\tif replicaSet[r.hostId] {\n\t\t\t\t\tt.Errorf(\"duplicate replica %s in tablet\", r.hostId)\n\t\t\t\t}\n\t\t\t\treplicaSet[r.hostId] = true\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"SingleTabletFullRange\", func(t *testing.T) {\n\t\thosts := GenerateHostUUIDs(3)\n\t\ttl := createTablets(\"ks\", \"tbl\", hosts, 3, 1, 1)\n\t\tt0 := tl[0]\n\t\tif t0.firstToken != math.MinInt64 {\n\t\t\tt.Errorf(\"unexpected firstToken: %d\", t0.firstToken)\n\t\t}\n\t\tif t0.lastToken != math.MaxInt64 {\n\t\t\tt.Errorf(\"unexpected lastToken: %d\", t0.lastToken)\n\t\t}\n\t})\n}\n\nfunc TestReplicaGenerator(t *testing.T) {\n\thosts := GenerateHostUUIDs(4)\n\trf := 2\n\tg := NewReplicaSetGenerator(hosts, rf)\n\n\tvar seen [][]HostUUID\n\tfor i := 0; i < 6; i++ {\n\t\tgen := g.Next()\n\n\t\tif len(gen) != rf {\n\t\t\tt.Fatalf(\"expected %d replicas, got %d\", rf, len(gen))\n\t\t}\n\n\t\tvar ids []HostUUID\n\t\tfor _, r := range gen {\n\t\t\tids = append(ids, r.HostUUIDValue())\n\t\t}\n\t\tseen = append(seen, ids)\n\t}\n\n\tfor i := 0; i < len(seen); i++ {\n\touter:\n\t\tfor j := i + 1; j < len(seen); j++ {\n\t\t\tfor k := 0; k < len(seen[i]); k++ {\n\t\t\t\tif seen[i][k] != seen[j][k] {\n\t\t\t\t\tcontinue outer\n\t\t\t\t}\n\t\t\t}\n\t\t\tt.Errorf(\"expected different output for different seeds, but found same seeds for %d and %d: %v == %v\", i, j, seen[i], seen[j])\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "tablets/tablet_utils.go",
    "content": "package tablets\n\nimport (\n\t\"encoding/binary\"\n\t\"math\"\n\t\"math/rand\"\n\t\"sync/atomic\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n)\n\nconst randSeed = 100\n\n// ReplicaSetGenerator generates all possible k-combinations (replica sets) of a given list of hosts,\n// where each combination contains `rf` elements. The generator cycles through all possible combinations\n// infinitely in a thread-safe manner using an atomic counter.\ntype ReplicaSetGenerator struct {\n\thosts   []HostUUID // List of available hosts\n\trf      int        // Replication factor (number of hosts per combination)\n\tlen     int        // Total number of hosts\n\tcounter uint64     // Current position in the sequence of combinations\n\ttotal   uint64     // Total number of possible combinations (n choose rf)\n}\n\n// NewReplicaSetGenerator creates and returns a new ReplicaSetGenerator for the given set of hosts\n// and replication factor `rf`. It panics if `rf` is non-positive or greater than the number of hosts.\n// The generator produces all k-combinations of the input set and loops over them indefinitely.\nfunc NewReplicaSetGenerator(hosts []HostUUID, rf int) *ReplicaSetGenerator {\n\tn := len(hosts)\n\tif rf <= 0 {\n\t\tpanic(\"replication factor must be positive\")\n\t}\n\tif rf > len(hosts) {\n\t\tpanic(\"replication factor cannot exceed number of hosts\")\n\t}\n\treturn &ReplicaSetGenerator{\n\t\thosts: hosts,\n\t\trf:    rf,\n\t\tlen:   n,\n\t\ttotal: uint64(binomial(n, rf)),\n\t}\n}\n\n// Next returns the next replica set as a slice of ReplicaInfo. The combinations are returned in a\n// deterministic order and wrap around after exhausting all possible combinations.\n// This method is safe for concurrent use.\nfunc (it *ReplicaSetGenerator) Next() []ReplicaInfo {\n\t// Advance and wrap around\n\tcounter := atomic.AddUint64(&it.counter, 1) % it.total\n\t// Map current counter to combination\n\treturn unrankCombination(it.len, it.rf, int(counter), it.hosts)\n}\n\n// binomial calculates the number of unique combinations (n choose k)\n// for selecting `rf` elements from a set of `hosts` elements.\n//\n// It returns the binomial coefficient C(hosts, rf), which represents\n// the number of ways to choose `rf` items from a total of `hosts` without\n// regard to order.\n//\n// If rf < 0 or rf > hosts, the function returns 0.\n// If rf == 0 or rf == hosts, the function returns 1.\nfunc binomial(hosts, rf int) int {\n\tif rf < 0 || rf > hosts {\n\t\treturn 0\n\t}\n\tif rf == 0 || rf == hosts {\n\t\treturn 1\n\t}\n\tnum := 1\n\tden := 1\n\tfor i := 1; i <= rf; i++ {\n\t\tnum *= hosts - (i - 1)\n\t\tden *= i\n\t}\n\treturn num / den\n}\n\n// unrankCombination returns the k-combination of elements from the input slice\n// corresponding to the given rank (counter) in lexicographic order.\nfunc unrankCombination(n, k, counter int, input []HostUUID) []ReplicaInfo {\n\tcomb := make([]ReplicaInfo, 0, k)\n\tx := 0\n\tfor i := 0; i < k; i++ {\n\t\tfor {\n\t\t\tb := binomial(n-x-1, k-i-1)\n\t\t\tif counter < b {\n\t\t\t\tcomb = append(comb, ReplicaInfo{\n\t\t\t\t\thostId:  input[x],\n\t\t\t\t\tshardId: 0,\n\t\t\t\t})\n\t\t\t\tx++\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcounter -= b\n\t\t\t\tx++\n\t\t\t}\n\t\t}\n\t}\n\treturn comb\n}\n\nfunc getThreadSafeRnd() *tests.ThreadSafeRand {\n\treturn tests.NewThreadSafeRand(randSeed)\n}\n\nfunc getRnd() *rand.Rand {\n\treturn rand.New(rand.NewSource(randSeed))\n}\n\n// GenerateHostUUIDs generates a slice of deterministic HostUUIDs for testing.\n// Byte 0 is set to 0xFE so that even index 0 is never the zero UUID.\nfunc GenerateHostUUIDs(count int) []HostUUID {\n\thosts := make([]HostUUID, count)\n\tfor i := range hosts {\n\t\thosts[i][0] = 0xFE\n\t\tbinary.BigEndian.PutUint64(hosts[i][8:], uint64(i))\n\t}\n\treturn hosts\n}\n\n// createTablets generates a list of TabletInfo entries for a given keyspace and table.\n// Each tablet is assigned a token range and a set of replica hosts.\nfunc createTablets(ks, table string, hosts []HostUUID, rf, count int, tokenRangeCount int64) TabletInfoList {\n\tout := make(TabletInfoList, count)\n\tstep := math.MaxUint64 / uint64(tokenRangeCount)\n\trepGen := NewReplicaSetGenerator(hosts, rf)\n\tfirstToken := int64(math.MinInt64)\n\tfor i := 0; i < count; i++ {\n\t\tout[i] = TabletInfo{\n\t\t\tkeyspaceName: ks,\n\t\t\ttableName:    table,\n\t\t\tfirstToken:   firstToken,\n\t\t\tlastToken:    firstToken + int64(step),\n\t\t\treplicas:     repGen.Next(),\n\t\t}\n\t\tfirstToken = firstToken + int64(step)\n\t}\n\treturn out\n}\n"
  },
  {
    "path": "tablets/tablets.go",
    "content": "package tablets\n\nimport (\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"slices\"\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n)\n\n// HostUUID is a 16-byte binary UUID used to identify hosts without heap allocation.\ntype HostUUID [16]byte\n\n// String returns the canonical UUID string representation (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx).\nfunc (u HostUUID) String() string {\n\tvar buf [36]byte\n\thex.Encode(buf[0:8], u[0:4])\n\tbuf[8] = '-'\n\thex.Encode(buf[9:13], u[4:6])\n\tbuf[13] = '-'\n\thex.Encode(buf[14:18], u[6:8])\n\tbuf[18] = '-'\n\thex.Encode(buf[19:23], u[8:10])\n\tbuf[23] = '-'\n\thex.Encode(buf[24:36], u[10:16])\n\treturn string(buf[:])\n}\n\n// IsEmpty reports whether the UUID is all zeros.\nfunc (u HostUUID) IsEmpty() bool {\n\treturn u == HostUUID{}\n}\n\n// ParseHostUUID parses a UUID string (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) into a HostUUID.\nfunc ParseHostUUID(s string) (HostUUID, error) {\n\tif len(s) != 36 {\n\t\treturn HostUUID{}, fmt.Errorf(\"invalid UUID length: %d\", len(s))\n\t}\n\tif s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {\n\t\treturn HostUUID{}, fmt.Errorf(\"invalid UUID format: %s\", s)\n\t}\n\tvar u HostUUID\n\tvar buf [32]byte\n\tcopy(buf[0:8], s[0:8])\n\tcopy(buf[8:12], s[9:13])\n\tcopy(buf[12:16], s[14:18])\n\tcopy(buf[16:20], s[19:23])\n\tcopy(buf[20:32], s[24:36])\n\t_, err := hex.Decode(u[:], buf[:])\n\tif err != nil {\n\t\treturn HostUUID{}, fmt.Errorf(\"invalid UUID hex: %s: %w\", s, err)\n\t}\n\treturn u, nil\n}\n\n// MustParseHostUUID is like ParseHostUUID but panics on error.\nfunc MustParseHostUUID(s string) HostUUID {\n\tu, err := ParseHostUUID(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn u\n}\n\ntype ReplicaInfo struct {\n\t// hostId stored as binary UUID to avoid per-replica heap allocation.\n\thostId  HostUUID\n\tshardId int\n}\n\nfunc (r ReplicaInfo) HostID() string {\n\treturn r.hostId.String()\n}\n\n// HostUUIDValue returns the raw binary host UUID for zero-allocation comparison.\nfunc (r ReplicaInfo) HostUUIDValue() HostUUID {\n\treturn r.hostId\n}\n\nfunc (r ReplicaInfo) ShardID() int {\n\treturn r.shardId\n}\n\nfunc (r ReplicaInfo) String() string {\n\treturn fmt.Sprintf(\"ReplicaInfo{hostId:%s, shardId:%d}\", r.hostId.String(), r.shardId)\n}\n\ntype TabletInfoBuilder struct {\n\tKeyspaceName string\n\tTableName    string\n\tReplicas     [][]any\n\tFirstToken   int64\n\tLastToken    int64\n}\n\nfunc NewTabletInfoBuilder() TabletInfoBuilder {\n\treturn TabletInfoBuilder{}\n}\n\ntype toString interface {\n\tString() string\n}\n\n// uuidProvider is satisfied by types that can provide raw UUID bytes (e.g., gocql.UUID).\ntype uuidProvider interface {\n\tBytes() []byte\n}\n\nfunc (b TabletInfoBuilder) Build() (TabletInfo, error) {\n\tif b.FirstToken > b.LastToken {\n\t\treturn TabletInfo{}, fmt.Errorf(\"invalid token range: firstToken (%d) > lastToken (%d)\",\n\t\t\tb.FirstToken, b.LastToken)\n\t}\n\n\ttabletReplicas := make([]ReplicaInfo, 0, len(b.Replicas))\n\tfor _, replica := range b.Replicas {\n\t\tif len(replica) != 2 {\n\t\t\treturn TabletInfo{}, fmt.Errorf(\"replica info should have exactly two elements, but it has %d: %v\", len(replica), replica)\n\t\t}\n\t\tshardId, ok := replica[1].(int)\n\t\tif !ok {\n\t\t\treturn TabletInfo{}, fmt.Errorf(\"second element (shard) of replica is not int: %v\", replica)\n\t\t}\n\t\tvar hostUUID HostUUID\n\t\tswitch v := replica[0].(type) {\n\t\tcase uuidProvider:\n\t\t\traw := v.Bytes()\n\t\t\tif len(raw) != 16 {\n\t\t\t\treturn TabletInfo{}, fmt.Errorf(\"UUID bytes has wrong length %d, expected 16\", len(raw))\n\t\t\t}\n\t\t\tcopy(hostUUID[:], raw)\n\t\tcase string:\n\t\t\tparsed, err := ParseHostUUID(v)\n\t\t\tif err != nil {\n\t\t\t\treturn TabletInfo{}, fmt.Errorf(\"first element (hostID) cannot be parsed as UUID: %v: %w\", replica, err)\n\t\t\t}\n\t\t\thostUUID = parsed\n\t\tcase toString:\n\t\t\tparsed, err := ParseHostUUID(v.String())\n\t\t\tif err != nil {\n\t\t\t\treturn TabletInfo{}, fmt.Errorf(\"first element (hostID) cannot be parsed as UUID: %v: %w\", replica, err)\n\t\t\t}\n\t\t\thostUUID = parsed\n\t\tdefault:\n\t\t\treturn TabletInfo{}, fmt.Errorf(\"first element (hostID) of replica is not UUID: %v\", replica)\n\t\t}\n\t\ttabletReplicas = append(tabletReplicas, ReplicaInfo{hostUUID, shardId})\n\t}\n\n\treturn TabletInfo{\n\t\tkeyspaceName: b.KeyspaceName,\n\t\ttableName:    b.TableName,\n\t\tfirstToken:   b.FirstToken,\n\t\tlastToken:    b.LastToken,\n\t\treplicas:     tabletReplicas,\n\t}, nil\n}\n\n// TabletInfo represents a tablet with its token range and replica set.\ntype TabletInfo struct {\n\tkeyspaceName string\n\ttableName    string\n\treplicas     []ReplicaInfo\n\tfirstToken   int64\n\tlastToken    int64\n}\n\nfunc (t TabletInfo) KeyspaceName() string {\n\treturn t.keyspaceName\n}\n\nfunc (t TabletInfo) FirstToken() int64 {\n\treturn t.firstToken\n}\n\nfunc (t TabletInfo) LastToken() int64 {\n\treturn t.lastToken\n}\n\nfunc (t TabletInfo) TableName() string {\n\treturn t.tableName\n}\n\nfunc (t TabletInfo) Replicas() []ReplicaInfo {\n\tresult := make([]ReplicaInfo, len(t.replicas))\n\tcopy(result, t.replicas)\n\treturn result\n}\n\n// ReplicasUnsafe returns the raw replica slice without copying.\nfunc (t TabletInfo) ReplicasUnsafe() []ReplicaInfo {\n\treturn t.replicas\n}\n\ntype TabletInfoList []TabletInfo\n\n// TabletEntry is the per-table representation of a tablet, without keyspace/table names.\ntype TabletEntry struct {\n\treplicas   []ReplicaInfo\n\tfirstToken int64\n\tlastToken  int64\n}\n\ntype TabletEntryList []TabletEntry\n\n// Replicas returns a copy of the replica list for this entry.\nfunc (e TabletEntry) Replicas() []ReplicaInfo {\n\tresult := make([]ReplicaInfo, len(e.replicas))\n\tcopy(result, e.replicas)\n\treturn result\n}\n\n// ReplicasUnsafe returns the raw replica slice without copying.\nfunc (e TabletEntry) ReplicasUnsafe() []ReplicaInfo {\n\treturn e.replicas\n}\n\nfunc (e TabletEntry) FirstToken() int64 {\n\treturn e.firstToken\n}\n\nfunc (e TabletEntry) LastToken() int64 {\n\treturn e.lastToken\n}\n\n// findOverlapRange returns the start and tailStart indices for entries\n// overlapping with the token range [firstToken, lastToken].\n// start is the first overlapping entry; tailStart is the first entry\n// after the overlap region.\nfunc (t TabletEntryList) findOverlapRange(firstToken, lastToken int64) (start, tailStart int) {\n\tif len(t) == 0 {\n\t\treturn 0, 0\n\t}\n\n\tl := 0\n\tr := len(t)\n\n\tl1, r1 := l, r\n\tl2, r2 := l1, r1\n\n\tfor l1 < r1 {\n\t\tmid := l1 + (r1-l1)/2\n\t\tif t[mid].firstToken < firstToken {\n\t\t\tl1 = mid + 1\n\t\t} else {\n\t\t\tr1 = mid\n\t\t}\n\t}\n\tstart = l1\n\n\t// Adjust start backward if the previous entry overlaps.\n\tif start > l && t[start-1].lastToken > firstToken {\n\t\tstart = start - 1\n\t}\n\n\tfor l2 < r2 {\n\t\tmid := l2 + (r2-l2)/2\n\t\tif t[mid].lastToken < lastToken {\n\t\t\tl2 = mid + 1\n\t\t} else {\n\t\t\tr2 = mid\n\t\t}\n\t}\n\tend := l2\n\tif end < len(t) && t[end].firstToken > lastToken {\n\t\tend = end - 1\n\t}\n\tif end >= len(t) {\n\t\tend = len(t) - 1\n\t}\n\n\tif start <= end && end >= 0 {\n\t\ttailStart = end + 1\n\t} else {\n\t\ttailStart = start\n\t}\n\treturn start, tailStart\n}\n\n// addEntry inserts a single entry into the sorted list, replacing any overlapping ranges.\n// Returns a new slice without mutating the original.\nfunc (t TabletEntryList) addEntry(e TabletEntry) TabletEntryList {\n\tstart, tailStart := t.findOverlapRange(e.firstToken, e.lastToken)\n\tresult := make(TabletEntryList, 0, start+1+(len(t)-tailStart))\n\tresult = append(result, t[:start]...)\n\tresult = append(result, e)\n\tresult = append(result, t[tailStart:]...)\n\treturn result\n}\n\n// bulkAddEntries inserts a sorted batch of entries, replacing any overlapping ranges.\n// Returns a new slice without mutating the original.\n// The entries must be sorted by firstToken, then lastToken. Entries may have\n// gaps between them or overlap each other within the batch; existing entries\n// that fall in gaps between batch entries are preserved. Intra-batch overlaps\n// are resolved by letting later entries replace earlier ones.\nfunc (t TabletEntryList) bulkAddEntries(entries TabletEntryList) TabletEntryList {\n\tif len(entries) == 0 {\n\t\treturn t\n\t}\n\n\t// Resolve intra-batch overlaps: later entries replace earlier ones.\n\tdeduped := make(TabletEntryList, 0, len(entries))\n\tfor _, e := range entries {\n\t\t// Drop any previously added entries that the current one overlaps.\n\t\tfor len(deduped) > 0 && deduped[len(deduped)-1].firstToken >= e.firstToken {\n\t\t\tdeduped = deduped[:len(deduped)-1]\n\t\t}\n\t\t// If the last kept entry partially overlaps, drop it too.\n\t\tif len(deduped) > 0 && deduped[len(deduped)-1].lastToken > e.firstToken {\n\t\t\tdeduped = deduped[:len(deduped)-1]\n\t\t}\n\t\tdeduped = append(deduped, e)\n\t}\n\tentries = deduped\n\n\t// Merge the existing list (t) with the batch (entries).\n\t// Both are sorted by firstToken. For each batch entry we remove any\n\t// overlapping existing entries; existing entries that sit in gaps\n\t// between batch entries are preserved.\n\tresult := make(TabletEntryList, 0, len(t)+len(entries))\n\tti := 0 // index into t (existing list)\n\n\tfor _, e := range entries {\n\t\t// Copy existing entries that come entirely before this batch entry.\n\t\tfor ti < len(t) && t[ti].lastToken <= e.firstToken && t[ti].firstToken < e.firstToken {\n\t\t\tresult = append(result, t[ti])\n\t\t\tti++\n\t\t}\n\t\t// Skip existing entries that overlap with this batch entry.\n\t\tfor ti < len(t) && t[ti].firstToken < e.lastToken && t[ti].lastToken > e.firstToken {\n\t\t\tti++\n\t\t}\n\t\tresult = append(result, e)\n\t}\n\t// Append remaining existing entries that come after all batch entries.\n\tresult = append(result, t[ti:]...)\n\treturn result\n}\n\n// findEntryForToken performs a binary search within [l, r) to find the entry\n// covering the given token. Returns false if no such entry exists.\nfunc (t TabletEntryList) findEntryForToken(token int64, l int, r int) (TabletEntry, bool) {\n\tif l < 0 || r > len(t) || l > r {\n\t\treturn TabletEntry{}, false\n\t}\n\tif l == r {\n\t\treturn TabletEntry{}, false\n\t}\n\n\tfor l < r {\n\t\tm := l + (r-l)/2\n\t\tif t[m].lastToken < token {\n\t\t\tl = m + 1\n\t\t} else {\n\t\t\tr = m\n\t\t}\n\t}\n\tif l >= len(t) {\n\t\treturn TabletEntry{}, false\n\t}\n\tif t[l].firstToken > token {\n\t\treturn TabletEntry{}, false\n\t}\n\treturn t[l], true\n}\n\n// removeEntriesWithHost returns a new list excluding entries with a replica on the given host.\nfunc (t TabletEntryList) removeEntriesWithHost(hostID HostUUID) TabletEntryList {\n\thasMatch := false\n\tfor _, e := range t {\n\t\tfor _, r := range e.replicas {\n\t\t\tif r.hostId == hostID {\n\t\t\t\thasMatch = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif hasMatch {\n\t\t\tbreak\n\t\t}\n\t}\n\tif !hasMatch {\n\t\treturn t\n\t}\n\n\tresult := make(TabletEntryList, 0, len(t))\n\tfor _, e := range t {\n\t\texclude := false\n\t\tfor _, r := range e.replicas {\n\t\t\tif r.hostId == hostID {\n\t\t\t\texclude = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !exclude {\n\t\t\tresult = append(result, e)\n\t\t}\n\t}\n\treturn result\n}\n\n// toEntry converts a TabletInfo to a TabletEntry.\nfunc (t TabletInfo) toEntry() TabletEntry {\n\treturn TabletEntry{\n\t\treplicas:   slices.Clone(t.replicas),\n\t\tfirstToken: t.firstToken,\n\t\tlastToken:  t.lastToken,\n\t}\n}\n\n// toTabletInfo converts a TabletEntry back to a TabletInfo.\nfunc (e TabletEntry) toTabletInfo(keyspace, table string) TabletInfo {\n\treturn TabletInfo{\n\t\tkeyspaceName: keyspace,\n\t\ttableName:    table,\n\t\treplicas:     slices.Clone(e.replicas),\n\t\tfirstToken:   e.firstToken,\n\t\tlastToken:    e.lastToken,\n\t}\n}\n\n// tableKey identifies a specific table within a keyspace.\ntype tableKey struct {\n\tkeyspace string\n\ttable    string\n}\n\n// tableTablets holds a per-table sorted tablet list with copy-on-write semantics.\ntype tableTablets struct {\n\tlist atomic.Pointer[TabletEntryList] // stores TabletEntryList for this table\n}\n\nfunc newTableTablets() *tableTablets {\n\ttt := &tableTablets{}\n\tempty := make(TabletEntryList, 0)\n\ttt.list.Store(&empty)\n\treturn tt\n}\n\nfunc (tt *tableTablets) store(list TabletEntryList) {\n\ttt.list.Store(&list)\n}\n\n// tabletOp is an operation processed by the writer goroutine.\ntype tabletOp interface {\n\texecute(c *CowTabletList)\n}\n\ntype opAddTablet struct {\n\ttablet TabletInfo\n}\n\nfunc (op opAddTablet) execute(c *CowTabletList) { c.doAddTablet(op.tablet) }\n\ntype opBulkAddTablets struct {\n\ttablets TabletInfoList\n}\n\nfunc (op opBulkAddTablets) execute(c *CowTabletList) { c.doBulkAddTablets(op.tablets) }\n\ntype opRemoveHost struct {\n\thostID HostUUID\n}\n\nfunc (op opRemoveHost) execute(c *CowTabletList) { c.doRemoveTabletsWithHost(op.hostID) }\n\ntype opRemoveKeyspace struct {\n\tkeyspace string\n}\n\nfunc (op opRemoveKeyspace) execute(c *CowTabletList) { c.doRemoveTabletsWithKeyspace(op.keyspace) }\n\ntype opRemoveTable struct {\n\tkeyspace string\n\ttable    string\n}\n\nfunc (op opRemoveTable) execute(c *CowTabletList) { c.doRemoveTabletsWithTable(op.keyspace, op.table) }\n\ntype opFlush struct {\n\tdone chan struct{}\n}\n\nfunc (op opFlush) execute(_ *CowTabletList) { close(op.done) }\n\n// opQueueBufferSize is the capacity of the writer goroutine's operation queue.\nconst opQueueBufferSize = 4096\n\n// opQueue manages a single writer goroutine with safe send/close/flush coordination.\ntype opQueue struct {\n\tcachedItem tabletOp\n\tops        chan tabletOp\n\tquit       chan struct{}\n\tstopped    chan struct{}\n\twaiters    *sync.Cond\n\tsenders    int\n\tcloseOnce  sync.Once\n\tlifecycle  sync.Mutex\n\tclosed     bool\n}\n\nfunc newOpQueue() *opQueue {\n\tq := &opQueue{\n\t\tops:     make(chan tabletOp, opQueueBufferSize),\n\t\tquit:    make(chan struct{}),\n\t\tstopped: make(chan struct{}),\n\t}\n\tq.waiters = sync.NewCond(&q.lifecycle)\n\treturn q\n}\n\nfunc (q *opQueue) next() tabletOp {\n\tif q.cachedItem != nil {\n\t\titem := q.cachedItem\n\t\tq.cachedItem = nil\n\t\treturn item\n\t}\n\tvar op tabletOp\n\tselect {\n\tcase <-q.quit:\n\t\t{\n\t\t\treturn nil\n\t\t}\n\tcase op = <-q.ops:\n\t\topAdd, ok := op.(opAddTablet)\n\t\tif !ok {\n\t\t\treturn op\n\t\t}\n\t\tbulkOp := opBulkAddTablets{\n\t\t\ttablets: make(TabletInfoList, 0, 1),\n\t\t}\n\t\tbulkOp.tablets = append(bulkOp.tablets, opAdd.tablet)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase op = <-q.ops:\n\t\t\t\topAdd, ok = op.(opAddTablet)\n\t\t\t\tif !ok {\n\t\t\t\t\tq.cachedItem = op\n\t\t\t\t\treturn bulkOp\n\t\t\t\t}\n\t\t\t\tbulkOp.tablets = append(bulkOp.tablets, opAdd.tablet)\n\t\t\tdefault:\n\t\t\t\treturn bulkOp\n\t\t\t}\n\t\t}\n\t}\n}\n\n// run is the writer goroutine loop.\nfunc (q *opQueue) run(process func(tabletOp)) {\n\tdefer close(q.stopped)\n\tfor {\n\t\top := q.next()\n\t\tif op == nil {\n\t\t\treturn\n\t\t}\n\t\tprocess(op)\n\t}\n}\n\n// close stops the writer goroutine after draining in-flight senders.\nfunc (q *opQueue) close() {\n\tq.closeOnce.Do(func() {\n\t\tq.lifecycle.Lock()\n\t\tq.closed = true\n\t\tfor q.senders > 0 {\n\t\t\tq.waiters.Wait()\n\t\t}\n\t\tclose(q.quit)\n\t\tq.lifecycle.Unlock()\n\t})\n\t<-q.stopped\n}\n\n// flush blocks until all previously submitted operations have been processed.\nfunc (q *opQueue) flush() {\n\tdone := make(chan struct{})\n\tif !q.beginSend() {\n\t\treturn\n\t}\n\tdefer q.endSend()\n\tsent := false\n\tselect {\n\tcase q.ops <- opFlush{done: done}:\n\t\tsent = true\n\tcase <-q.quit:\n\t}\n\tif !sent {\n\t\treturn\n\t}\n\n\tselect {\n\tcase <-done:\n\tcase <-q.stopped:\n\t}\n}\n\n// send enqueues an operation.\nfunc (q *opQueue) send(op tabletOp) {\n\tif !q.beginSend() {\n\t\treturn\n\t}\n\tdefer q.endSend()\n\tselect {\n\tcase q.ops <- op:\n\tcase <-q.quit:\n\t}\n}\n\nfunc (q *opQueue) beginSend() bool {\n\tq.lifecycle.Lock()\n\tdefer q.lifecycle.Unlock()\n\tif q.closed {\n\t\treturn false\n\t}\n\tq.senders++\n\treturn true\n}\n\nfunc (q *opQueue) endSend() {\n\tq.lifecycle.Lock()\n\tq.senders--\n\tif q.senders == 0 {\n\t\tq.waiters.Broadcast()\n\t}\n\tq.lifecycle.Unlock()\n}\n\n// tableMap is the type stored inside the atomic pointer.\ntype tableMap = map[tableKey]*tableTablets\n\n// CowTabletList stores tablets partitioned by keyspace/table.\n// All writes are serialized through a single writer goroutine; reads are lock-free.\n// Write operations are asynchronous; use Flush() for read-your-writes consistency.\ntype CowTabletList struct {\n\ttables atomic.Pointer[tableMap]\n\tqueue  *opQueue\n}\n\n// NewCowTabletList creates a new CowTabletList and starts its writer goroutine.\n// The caller must call Close() when done to stop the writer goroutine.\nfunc NewCowTabletList() *CowTabletList {\n\tc := &CowTabletList{\n\t\tqueue: newOpQueue(),\n\t}\n\tempty := make(tableMap)\n\tc.tables.Store(&empty)\n\tgo c.queue.run(func(op tabletOp) { op.execute(c) })\n\treturn c\n}\n\n// Close stops the writer goroutine after draining all pending operations.\nfunc (c *CowTabletList) Close() {\n\tif c == nil {\n\t\treturn\n\t}\n\tc.queue.close()\n}\n\n// Flush blocks until all previously submitted write operations have been processed.\nfunc (c *CowTabletList) Flush() {\n\tif c == nil {\n\t\treturn\n\t}\n\tc.queue.flush()\n}\n\n// cloneTableMap returns a shallow copy of the current table map.\nfunc (c *CowTabletList) cloneTableMap() tableMap {\n\told := *c.tables.Load()\n\tm := make(tableMap, len(old)+1)\n\tfor k, v := range old {\n\t\tm[k] = v\n\t}\n\treturn m\n}\n\n// getOrCreateTable returns the tableTablets for the given key, creating it if needed.\nfunc (c *CowTabletList) getOrCreateTable(key tableKey) *tableTablets {\n\tcurrent := *c.tables.Load()\n\ttt := current[key]\n\tif tt != nil {\n\t\treturn tt\n\t}\n\ttt = newTableTablets()\n\tm := c.cloneTableMap()\n\tm[key] = tt\n\tc.tables.Store(&m)\n\treturn tt\n}\n\nfunc (c *CowTabletList) doAddTablet(tablet TabletInfo) {\n\tif tablet.keyspaceName == \"\" || tablet.tableName == \"\" {\n\t\treturn\n\t}\n\tkey := tableKey{tablet.keyspaceName, tablet.tableName}\n\ttt := c.getOrCreateTable(key)\n\ttt.store((*tt.list.Load()).addEntry(tablet.toEntry()))\n}\n\nfunc (c *CowTabletList) doBulkAddTablets(tablets TabletInfoList) {\n\tif len(tablets) == 0 {\n\t\treturn\n\t}\n\n\tgroups := make(map[tableKey]TabletInfoList)\n\tfor _, t := range tablets {\n\t\tif t.keyspaceName == \"\" || t.tableName == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tkey := tableKey{t.keyspaceName, t.tableName}\n\t\tgroups[key] = append(groups[key], t)\n\t}\n\tfor key, group := range groups {\n\t\tsort.Slice(group, func(i, j int) bool {\n\t\t\tif group[i].FirstToken() != group[j].FirstToken() {\n\t\t\t\treturn group[i].FirstToken() < group[j].FirstToken()\n\t\t\t}\n\t\t\treturn group[i].LastToken() < group[j].LastToken()\n\t\t})\n\t\tentries := make(TabletEntryList, len(group))\n\t\tfor i, t := range group {\n\t\t\tentries[i] = t.toEntry()\n\t\t}\n\t\ttt := c.getOrCreateTable(key)\n\t\ttt.store((*tt.list.Load()).bulkAddEntries(entries))\n\t}\n}\n\nfunc (c *CowTabletList) doRemoveTabletsWithHost(hostID HostUUID) {\n\tcurrent := *c.tables.Load()\n\tneedsMapUpdate := false\n\tfor _, tt := range current {\n\t\told := tt.list.Load()\n\t\tnewList := (*old).removeEntriesWithHost(hostID)\n\t\tif len(newList) != len(*old) {\n\t\t\ttt.store(newList)\n\t\t\tif len(newList) == 0 {\n\t\t\t\tneedsMapUpdate = true\n\t\t\t}\n\t\t}\n\t}\n\tif needsMapUpdate {\n\t\tnewMap := make(tableMap, len(current))\n\t\tfor k, v := range current {\n\t\t\tif len(*v.list.Load()) > 0 {\n\t\t\t\tnewMap[k] = v\n\t\t\t}\n\t\t}\n\t\tc.tables.Store(&newMap)\n\t}\n}\n\nfunc (c *CowTabletList) doRemoveTabletsWithKeyspace(keyspace string) {\n\tcurrent := *c.tables.Load()\n\thasKey := false\n\tfor k := range current {\n\t\tif k.keyspace == keyspace {\n\t\t\thasKey = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !hasKey {\n\t\treturn\n\t}\n\tnewMap := make(tableMap, len(current))\n\tfor k, v := range current {\n\t\tif k.keyspace != keyspace {\n\t\t\tnewMap[k] = v\n\t\t}\n\t}\n\tc.tables.Store(&newMap)\n}\n\nfunc (c *CowTabletList) doRemoveTabletsWithTable(keyspace, table string) {\n\tcurrent := *c.tables.Load()\n\tkey := tableKey{keyspace, table}\n\tif _, exists := current[key]; !exists {\n\t\treturn\n\t}\n\tnewMap := make(tableMap, len(current))\n\tfor k, v := range current {\n\t\tif k != key {\n\t\t\tnewMap[k] = v\n\t\t}\n\t}\n\tc.tables.Store(&newMap)\n}\n\n// --- Public read methods ---\n\n// getTable returns the tableTablets for the given key, or nil if not found.\nfunc (c *CowTabletList) getTable(key tableKey) *tableTablets {\n\tcurrent := *c.tables.Load()\n\treturn current[key]\n}\n\n// Get returns a flat TabletInfoList containing all tablets across all tables.\n//\n// Deprecated: Use [CowTabletList.GetTableTablets] for per-table lookups or\n// [CowTabletList.ForEach] to iterate without aggregating into a flat list.\nfunc (c *CowTabletList) Get() TabletInfoList {\n\tif c == nil {\n\t\treturn nil\n\t}\n\tcurrent := *c.tables.Load()\n\ttype snap struct {\n\t\tkey  tableKey\n\t\tlist TabletEntryList\n\t}\n\tsnaps := make([]snap, 0, len(current))\n\ttotal := 0\n\tfor key, tt := range current {\n\t\tl := *tt.list.Load()\n\t\tsnaps = append(snaps, snap{key, l})\n\t\ttotal += len(l)\n\t}\n\tresult := make(TabletInfoList, 0, total)\n\tfor _, s := range snaps {\n\t\tfor _, e := range s.list {\n\t\t\tresult = append(result, e.toTabletInfo(s.key.keyspace, s.key.table))\n\t\t}\n\t}\n\treturn result\n}\n\n// GetTableTablets returns a copy of the tablet list for the specified keyspace and table.\n// Returns nil if no tablets exist for the given combination.\nfunc (c *CowTabletList) GetTableTablets(keyspace, table string) TabletEntryList {\n\tif c == nil {\n\t\treturn nil\n\t}\n\ttt := c.getTable(tableKey{keyspace, table})\n\tif tt == nil {\n\t\treturn nil\n\t}\n\tsnapshot := *tt.list.Load()\n\tif len(snapshot) == 0 {\n\t\treturn nil\n\t}\n\tresult := make(TabletEntryList, len(snapshot))\n\tcopy(result, snapshot)\n\treturn result\n}\n\n// ForEach iterates over all keyspace/table pairs and their tablet entry lists,\n// calling fn for each one. Iteration stops early if fn returns false.\n// The returned TabletEntryList is a shallow copy; do not mutate entries or their replica slices.\nfunc (c *CowTabletList) ForEach(fn func(keyspace, table string, entries TabletEntryList) bool) {\n\tif c == nil || fn == nil {\n\t\treturn\n\t}\n\tcurrent := *c.tables.Load()\n\tfor key, tt := range current {\n\t\tsnapshot := *tt.list.Load()\n\t\tif len(snapshot) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tentries := make(TabletEntryList, len(snapshot))\n\t\tcopy(entries, snapshot)\n\t\tif !fn(key.keyspace, key.table, entries) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// FindReplicasForToken returns a copy of the replica set for the given token.\nfunc (c *CowTabletList) FindReplicasForToken(keyspace, table string, token int64) []ReplicaInfo {\n\ttl, ok := c.FindTabletForToken(keyspace, table, token)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn tl.Replicas()\n}\n\n// FindReplicasUnsafeForToken returns the replica set for the given token without copying.\nfunc (c *CowTabletList) FindReplicasUnsafeForToken(keyspace, table string, token int64) []ReplicaInfo {\n\ttl, ok := c.FindTabletForToken(keyspace, table, token)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn tl.ReplicasUnsafe()\n}\n\n// FindTabletForToken locates the tablet covering the given token. Returns false if not found.\nfunc (c *CowTabletList) FindTabletForToken(keyspace, table string, token int64) (TabletEntry, bool) {\n\tif c == nil {\n\t\treturn TabletEntry{}, false\n\t}\n\ttt := c.getTable(tableKey{keyspace, table})\n\tif tt == nil {\n\t\treturn TabletEntry{}, false\n\t}\n\tentries := *tt.list.Load()\n\tif len(entries) == 0 {\n\t\treturn TabletEntry{}, false\n\t}\n\treturn entries.findEntryForToken(token, 0, len(entries))\n}\n\n// --- Public write methods ---\n\n// sendOp sends an operation to the writer goroutine.\nfunc (c *CowTabletList) sendOp(op tabletOp) {\n\tif c == nil {\n\t\treturn\n\t}\n\tc.queue.send(op)\n}\n\n// AddTablet queues a single tablet addition.\nfunc (c *CowTabletList) AddTablet(tablet TabletInfo) {\n\tc.sendOp(opAddTablet{tablet: tablet})\n}\n\n// BulkAddTablets queues a batch tablet addition.\nfunc (c *CowTabletList) BulkAddTablets(tablets TabletInfoList) {\n\tc.sendOp(opBulkAddTablets{tablets: tablets})\n}\n\n// RemoveTabletsWithHost queues removal of all tablets with replicas on the specified host.\nfunc (c *CowTabletList) RemoveTabletsWithHost(hostID HostUUID) {\n\tc.sendOp(opRemoveHost{hostID: hostID})\n}\n\n// RemoveTabletsWithKeyspace queues removal of all tablets for the given keyspace.\nfunc (c *CowTabletList) RemoveTabletsWithKeyspace(keyspace string) {\n\tc.sendOp(opRemoveKeyspace{keyspace: keyspace})\n}\n\n// RemoveTabletsWithTable queues removal of all tablets for the specified table.\nfunc (c *CowTabletList) RemoveTabletsWithTable(keyspace string, table string) {\n\tc.sendOp(opRemoveTable{keyspace: keyspace, table: table})\n}\n"
  },
  {
    "path": "tablets/tablets_bench_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage tablets\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync/atomic\"\n\t\"testing\"\n)\n\nconst tabletsCountMedium = 1500\n\n// BenchmarkFindReplicasUnsafeForToken measures the pure lookup+replica-return\n// path for a prepopulated CowTabletList.\nfunc BenchmarkFindReplicasUnsafeForToken(b *testing.B) {\n\tfor _, numTablets := range []int{1500, 10000} {\n\t\tb.Run(fmt.Sprintf(\"Tablets%d\", numTablets), func(b *testing.B) {\n\t\t\tconst rf = 3\n\t\t\tconst hostsCount = 6\n\t\t\thosts := GenerateHostUUIDs(hostsCount)\n\t\t\ttl := NewCowTabletList()\n\t\t\tdefer tl.Close()\n\n\t\t\ttl.BulkAddTablets(createTablets(\"ks\", \"tbl\", hosts, rf, numTablets, int64(numTablets)))\n\t\t\ttl.Flush()\n\t\t\truntime.GC()\n\t\t\tb.ResetTimer()\n\t\t\tb.ReportAllocs()\n\n\t\t\trnd := getThreadSafeRnd()\n\t\t\tb.RunParallel(func(pb *testing.PB) {\n\t\t\t\tfor pb.Next() {\n\t\t\t\t\ttoken := rnd.Int63()\n\t\t\t\t\treplicas := tl.FindReplicasUnsafeForToken(\"ks\", \"tbl\", token)\n\t\t\t\t\tif len(replicas) != rf {\n\t\t\t\t\t\t// Token may fall in a gap; that's fine for benchmarking.\n\t\t\t\t\t\t_ = replicas\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n}\n\ntype opConfig struct {\n\topRemoveKeyspace int64\n\topRemoveTable    int64\n\topRemoveHost     int64\n}\n\nfunc BenchmarkCowTabletList(b *testing.B) {\n\tconst (\n\t\trf = 3\n\t)\n\tb.Run(\"Parallel-10\", func(b *testing.B) {\n\t\trunCowTabletListTestSuit(b, \"ManyTables\", 6, 10, rf, 1500, 5)\n\t\trunCowTabletListTestSuit(b, \"SingleTable\", 6, 10, rf, 1500, 0)\n\t})\n\n\tb.Run(\"SingleThread\", func(b *testing.B) {\n\t\trunCowTabletListTestSuit(b, \"ManyTables\", 6, 1, rf, 1500, 5)\n\t\trunCowTabletListTestSuit(b, \"SingleTable\", 6, 1, rf, 1500, 0)\n\t})\n}\n\nfunc runCowTabletListTestSuit(b *testing.B, name string, hostsCount, parallelism, rf, totalTablets, extraTables int) {\n\tb.Run(name, func(b *testing.B) {\n\n\t\tb.Run(\"New\", func(b *testing.B) {\n\t\t\trunSingleCowTabletListTest(b, hostsCount, parallelism, rf, totalTablets, extraTables, false, opConfig{\n\t\t\t\topRemoveKeyspace: -1,\n\t\t\t\topRemoveHost:     -1,\n\t\t\t\topRemoveTable:    -1,\n\t\t\t})\n\t\t})\n\n\t\tb.Run(\"Prepopulated\", func(b *testing.B) {\n\t\t\trunSingleCowTabletListTest(b, hostsCount, parallelism, rf, totalTablets, extraTables, true, opConfig{\n\t\t\t\topRemoveKeyspace: -1,\n\t\t\t\topRemoveHost:     -1,\n\t\t\t\topRemoveTable:    -1,\n\t\t\t})\n\t\t})\n\n\t\tb.Run(\"RemoveHost\", func(b *testing.B) {\n\t\t\trunSingleCowTabletListTest(b, hostsCount, parallelism, rf, totalTablets, extraTables, true, opConfig{\n\t\t\t\topRemoveKeyspace: -1,\n\t\t\t\topRemoveTable:    -1,\n\t\t\t\topRemoveHost:     1000, // Every 1000 query is remove host, to measure congestion\n\t\t\t})\n\t\t})\n\n\t\tb.Run(\"RemoveTable\", func(b *testing.B) {\n\t\t\trunSingleCowTabletListTest(b, hostsCount, parallelism, rf, totalTablets, extraTables, true, opConfig{\n\t\t\t\topRemoveKeyspace: -1,\n\t\t\t\topRemoveHost:     -1,\n\t\t\t\topRemoveTable:    1000, // Every 1000 query is remove table, to measure congestion\n\t\t\t})\n\t\t})\n\n\t\tb.Run(\"RemoveKeyspace\", func(b *testing.B) {\n\t\t\trunSingleCowTabletListTest(b, hostsCount, parallelism, rf, totalTablets, extraTables, true, opConfig{\n\t\t\t\topRemoveHost:     -1,\n\t\t\t\topRemoveTable:    -1,\n\t\t\t\topRemoveKeyspace: 1000, // Every 1000 query is remove keyspace, to measure congestion\n\t\t\t})\n\t\t})\n\t})\n}\n\nfunc runSingleCowTabletListTest(b *testing.B, hostsCount, parallelism, rf, totalTablets, extraTables int, prepopulate bool, ratios opConfig) {\n\ttokenRangeCount64 := int64(totalTablets)\n\thosts := GenerateHostUUIDs(hostsCount)\n\ttargetKS := \"kstarget\"\n\ttargetTable := \"ttarget\"\n\tremoveKs := \"ksremove\"\n\tremoveTable := \"tremove\"\n\trepGen := NewReplicaSetGenerator(hosts, rf)\n\treadyTablets := createTablets(removeKs, removeTable, hosts, rf, totalTablets, tokenRangeCount64)\n\tb.SetParallelism(parallelism)\n\ttl := NewCowTabletList()\n\tdefer tl.Close()\n\trnd := getThreadSafeRnd()\n\topID := atomic.Int64{}\n\n\tif prepopulate {\n\t\ttl.BulkAddTablets(createTablets(targetKS, targetTable, hosts, rf, totalTablets, tokenRangeCount64))\n\t}\n\n\tfor i := 0; i < extraTables; i++ {\n\t\ttl.BulkAddTablets(createTablets(targetKS, fmt.Sprintf(\"table-%d\", i), hosts, rf, totalTablets, tokenRangeCount64))\n\t}\n\n\ttl.Flush()\n\truntime.GC()\n\tb.ResetTimer()\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tid := opID.Add(1)\n\t\t\ttoken := rnd.Int63()\n\t\t\ttablet, found := tl.FindTabletForToken(targetKS, targetTable, token)\n\t\t\tif found || tablet.lastToken < token || tablet.firstToken > token {\n\t\t\t\t// If there is no tablet for token, emulate update, same way it is usually happening\n\t\t\t\tfirstToken := (token / tokenRangeCount64) * tokenRangeCount64\n\t\t\t\tlastToken := firstToken + tokenRangeCount64\n\t\t\t\ttl.AddTablet(TabletInfo{\n\t\t\t\t\tkeyspaceName: targetKS,\n\t\t\t\t\ttableName:    targetTable,\n\t\t\t\t\tfirstToken:   firstToken,\n\t\t\t\t\tlastToken:    lastToken,\n\t\t\t\t\treplicas:     repGen.Next(),\n\t\t\t\t})\n\t\t\t}\n\t\t\tif ratios.opRemoveTable == 0 || ((ratios.opRemoveTable != -1) && id%ratios.opRemoveTable == 0) {\n\t\t\t\ttl.BulkAddTablets(readyTablets)\n\t\t\t\ttl.RemoveTabletsWithTable(targetKS, removeTable)\n\t\t\t}\n\t\t\tif ratios.opRemoveKeyspace == 0 || ((ratios.opRemoveKeyspace != -1) && id%ratios.opRemoveKeyspace == 0) {\n\t\t\t\ttl.BulkAddTablets(readyTablets)\n\t\t\t\ttl.RemoveTabletsWithKeyspace(removeKs)\n\t\t\t}\n\t\t\tif ratios.opRemoveHost == 0 || ((ratios.opRemoveHost != -1) && id%ratios.opRemoveHost == 0) {\n\t\t\t\ttl.RemoveTabletsWithHost(hosts[rnd.Intn(len(hosts))])\n\t\t\t}\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "tablets/tablets_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage tablets\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\nfunc TestFindEntryForToken(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"ExactLastToken\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -100, lastToken: 0},\n\t\t\t{firstToken: 0, lastToken: 100},\n\t\t}\n\t\tentry, ok := entries.findEntryForToken(0, 0, len(entries))\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected entry for token at exact lastToken boundary\")\n\t\t}\n\t\tif entry.lastToken != 0 {\n\t\t\tt.Fatalf(\"expected lastToken=0, got %d\", entry.lastToken)\n\t\t}\n\t})\n\n\tt.Run(\"ExactFirstToken\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -100, lastToken: 0},\n\t\t\t{firstToken: 0, lastToken: 100},\n\t\t}\n\t\tentry, ok := entries.findEntryForToken(-100, 0, len(entries))\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected entry for token at exact firstToken boundary\")\n\t\t}\n\t\tif entry.firstToken != -100 {\n\t\t\tt.Fatalf(\"expected firstToken=-100, got %d\", entry.firstToken)\n\t\t}\n\t})\n\n\tt.Run(\"BeyondAll\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -100, lastToken: 0},\n\t\t\t{firstToken: 0, lastToken: 100},\n\t\t}\n\t\t_, ok := entries.findEntryForToken(200, 0, len(entries))\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for token beyond all tablets\")\n\t\t}\n\t})\n\n\tt.Run(\"BeforeAll\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -100, lastToken: 0},\n\t\t\t{firstToken: 0, lastToken: 100},\n\t\t}\n\t\t_, ok := entries.findEntryForToken(-200, 0, len(entries))\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for token before all tablets\")\n\t\t}\n\t})\n\n\tt.Run(\"InGap\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -200, lastToken: -100},\n\t\t\t{firstToken: 100, lastToken: 200},\n\t\t}\n\t\t_, ok := entries.findEntryForToken(0, 0, len(entries))\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for token in gap between non-contiguous tablets\")\n\t\t}\n\t})\n\n\tt.Run(\"EmptyList\", func(t *testing.T) {\n\t\tentries := TabletEntryList{}\n\t\t_, ok := entries.findEntryForToken(0, 0, 0)\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for empty entry list\")\n\t\t}\n\t})\n\n\tt.Run(\"SingleEntry\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -50, lastToken: 50},\n\t\t}\n\n\t\t_, ok := entries.findEntryForToken(0, 0, len(entries))\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected entry for token inside single entry\")\n\t\t}\n\n\t\t_, ok = entries.findEntryForToken(-50, 0, len(entries))\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected entry for token at firstToken of single entry\")\n\t\t}\n\n\t\t_, ok = entries.findEntryForToken(50, 0, len(entries))\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected entry for token at lastToken of single entry\")\n\t\t}\n\n\t\t_, ok = entries.findEntryForToken(-51, 0, len(entries))\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for token before single entry\")\n\t\t}\n\n\t\t_, ok = entries.findEntryForToken(51, 0, len(entries))\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for token after single entry\")\n\t\t}\n\t})\n\n\tt.Run(\"InvalidBounds\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{testHostUUID(\"host1\"), 0}}},\n\t\t}\n\n\t\ttestCases := []struct {\n\t\t\tname string\n\t\t\tl, r int\n\t\t}{\n\t\t\t{\"negative l\", -1, 1},\n\t\t\t{\"r beyond length\", 0, 10},\n\t\t\t{\"l > r\", 1, 0},\n\t\t\t{\"both invalid\", -1, 10},\n\t\t\t{\"l == r (empty range)\", 0, 0},\n\t\t}\n\n\t\tfor _, tc := range testCases {\n\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\tresult, ok := entries.findEntryForToken(50, tc.l, tc.r)\n\t\t\t\tif ok {\n\t\t\t\t\tt.Errorf(\"expected nil for invalid bounds l=%d r=%d, got %+v\", tc.l, tc.r, result)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"SingleTokenTablet\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -100, lastToken: -50},\n\t\t\t{firstToken: 42, lastToken: 42},\n\t\t\t{firstToken: 100, lastToken: 200},\n\t\t}\n\n\t\tentry, ok := entries.findEntryForToken(42, 0, len(entries))\n\t\tif !ok {\n\t\t\tt.Fatal(\"expected entry for single-token tablet\")\n\t\t}\n\t\tif entry.firstToken != 42 || entry.lastToken != 42 {\n\t\t\tt.Fatalf(\"expected [42,42], got [%d,%d]\", entry.firstToken, entry.lastToken)\n\t\t}\n\n\t\t_, ok = entries.findEntryForToken(41, 0, len(entries))\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for token just before single-token tablet\")\n\t\t}\n\n\t\t_, ok = entries.findEntryForToken(43, 0, len(entries))\n\t\tif ok {\n\t\t\tt.Fatal(\"expected nil for token just after single-token tablet\")\n\t\t}\n\t})\n}\n\nfunc TestFindOverlapRange(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"ContiguousBoundary\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: 0, lastToken: 100, replicas: []ReplicaInfo{{testHostUUID(\"host1\"), 0}}},\n\t\t\t{firstToken: 200, lastToken: 300, replicas: []ReplicaInfo{{testHostUUID(\"host2\"), 1}}},\n\t\t}\n\n\t\tstart, tailStart := entries.findOverlapRange(100, 200)\n\n\t\tif start != 1 {\n\t\t\tt.Errorf(\"expected start=1 for contiguous boundary, got %d\", start)\n\t\t}\n\t\tif tailStart != 2 {\n\t\t\tt.Errorf(\"expected tailStart=2 for contiguous boundary, got %d\", tailStart)\n\t\t}\n\t})\n\n\tt.Run(\"ExtremeValues\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: math.MinInt64, lastToken: 0, replicas: []ReplicaInfo{{testHostUUID(\"host1\"), 0}}},\n\t\t\t{firstToken: 0, lastToken: math.MaxInt64, replicas: []ReplicaInfo{{testHostUUID(\"host2\"), 1}}},\n\t\t}\n\n\t\tstart, tailStart := entries.findOverlapRange(math.MinInt64, math.MaxInt64)\n\t\tif start != 0 {\n\t\t\tt.Errorf(\"expected start=0 for full range overlap, got %d\", start)\n\t\t}\n\t\tif tailStart != 2 {\n\t\t\tt.Errorf(\"expected tailStart=2 for full range overlap, got %d\", tailStart)\n\t\t}\n\n\t\tstart, tailStart = entries.findOverlapRange(math.MinInt64, -100)\n\t\tif start != 0 {\n\t\t\tt.Errorf(\"expected start=0 for MinInt64 range, got %d\", start)\n\t\t}\n\t\tif tailStart != 1 {\n\t\t\tt.Errorf(\"expected tailStart=1 for MinInt64 range, got %d\", tailStart)\n\t\t}\n\n\t\tstart, tailStart = entries.findOverlapRange(100, math.MaxInt64)\n\t\tif start != 1 {\n\t\t\tt.Errorf(\"expected start=1 for MaxInt64 range, got %d\", start)\n\t\t}\n\t\tif tailStart != 2 {\n\t\t\tt.Errorf(\"expected tailStart=2 for MaxInt64 range, got %d\", tailStart)\n\t\t}\n\t})\n\n\tt.Run(\"SingleEntry\", func(t *testing.T) {\n\t\tentries := TabletEntryList{\n\t\t\t{firstToken: -100, lastToken: 100, replicas: []ReplicaInfo{{testHostUUID(\"host1\"), 0}}},\n\t\t}\n\n\t\tstart, tailStart := entries.findOverlapRange(-50, 50)\n\t\tif start != 0 || tailStart != 1 {\n\t\t\tt.Errorf(\"expected start=0 tailStart=1 for overlapping range, got start=%d tailStart=%d\", start, tailStart)\n\t\t}\n\n\t\tstart, tailStart = entries.findOverlapRange(-200, 200)\n\t\tif start != 0 || tailStart != 1 {\n\t\t\tt.Errorf(\"expected start=0 tailStart=1 for extended range, got start=%d tailStart=%d\", start, tailStart)\n\t\t}\n\n\t\tstart, tailStart = entries.findOverlapRange(-200, -150)\n\t\tif start != 0 || tailStart != 0 {\n\t\t\tt.Errorf(\"expected start=0 tailStart=0 for range before, got start=%d tailStart=%d\", start, tailStart)\n\t\t}\n\n\t\tstart, tailStart = entries.findOverlapRange(150, 200)\n\t\tif start != 1 || tailStart != 1 {\n\t\t\tt.Errorf(\"expected start=1 tailStart=1 for range after, got start=%d tailStart=%d\", start, tailStart)\n\t\t}\n\n\t\tstart, tailStart = entries.findOverlapRange(-100, -50)\n\t\tif start != 0 || tailStart != 1 {\n\t\t\tt.Errorf(\"expected start=0 tailStart=1 for range sharing firstToken, got start=%d tailStart=%d\", start, tailStart)\n\t\t}\n\n\t\tstart, tailStart = entries.findOverlapRange(100, 200)\n\t\tif start != 1 || tailStart != 1 {\n\t\t\tt.Errorf(\"expected start=1 tailStart=1 for contiguous range at lastToken, got start=%d tailStart=%d\", start, tailStart)\n\t\t}\n\t})\n\n\tt.Run(\"SingleTokenTablet\", func(t *testing.T) {\n\t\tentries := TabletEntryList{}\n\t\tstart, tailStart := entries.findOverlapRange(42, 42)\n\t\tif start != 0 || tailStart != 0 {\n\t\t\tt.Errorf(\"empty list: expected start=0 tailStart=0, got start=%d tailStart=%d\", start, tailStart)\n\t\t}\n\n\t\tentries = TabletEntryList{\n\t\t\t{firstToken: 40, lastToken: 50},\n\t\t}\n\t\tstart, tailStart = entries.findOverlapRange(42, 42)\n\t\tif start != 0 || tailStart != 1 {\n\t\t\tt.Errorf(\"contained: expected start=0 tailStart=1, got start=%d tailStart=%d\", start, tailStart)\n\t\t}\n\n\t\tentries = TabletEntryList{\n\t\t\t{firstToken: 0, lastToken: 42},\n\t\t}\n\t\tstart, tailStart = entries.findOverlapRange(42, 42)\n\t\tif start != 1 || tailStart != 1 {\n\t\t\tt.Errorf(\"adjacent: expected start=1 tailStart=1, got start=%d tailStart=%d\", start, tailStart)\n\t\t}\n\t})\n}\n\nfunc TestAddEntry(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"SingleTokenTablet\", func(t *testing.T) {\n\t\ttl := TabletEntryList{}\n\t\ttl = tl.addEntry(TabletEntry{firstToken: 42, lastToken: 42})\n\t\tif len(tl) != 1 || tl[0].firstToken != 42 || tl[0].lastToken != 42 {\n\t\t\tt.Fatalf(\"expected single [42,42] entry, got %v\", tl)\n\t\t}\n\n\t\ttl = TabletEntryList{\n\t\t\t{firstToken: -100, lastToken: -50},\n\t\t\t{firstToken: 100, lastToken: 200},\n\t\t}\n\t\ttl = tl.addEntry(TabletEntry{firstToken: 42, lastToken: 42})\n\t\tif len(tl) != 3 {\n\t\t\tt.Fatalf(\"expected 3 entries, got %d\", len(tl))\n\t\t}\n\t\tif tl[1].firstToken != 42 || tl[1].lastToken != 42 {\n\t\t\tt.Fatalf(\"expected middle entry [42,42], got [%d,%d]\", tl[1].firstToken, tl[1].lastToken)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "testdata/pki/ca.cnf",
    "content": "[req]\ndefault_bits = 2048\nprompt = no\ndefault_md = sha256\ndistinguished_name = dn\n\n[req_ext]\nbasicConstraints = CA:TRUE\nkeyUsage = digitalSignature, keyCertSign\n\n[dn]\nCN = ca\n"
  },
  {
    "path": "testdata/pki/cassandra.cnf",
    "content": "[req]\ndefault_bits = 2048\nprompt = no\ndefault_md = sha256\ndistinguished_name = dn\n\n[dn]\nCN = cassandra\n\n[req_ext]\nbasicConstraints = CA:FALSE\nkeyUsage = digitalSignature, keyEncipherment\nsubjectAltName = @alt_names\n\n[alt_names]\nURI = spiffe://test.cassandra.apache.org/cassandra-gocql-driver/integrationTest/cassandra\n"
  },
  {
    "path": "testdata/pki/generate_certs.sh",
    "content": "#! /bin/bash\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# This script generates the various certificates used for integration\n# tests.  All certificates are created with a validity of 3650 days,\n# or 10 years.  Therefore, this only needs to be used sparingly,\n# although could eventually be repurposed to regenerate certificates\n# as part of setting up the integration test harness. \n\nset -eux\n\n# How long certificates should be considered valid, 100 years\nVALIDITY=36500\n\n# Generate 4096-bit unencrypted RSA private key using aes256\nfunction generatePrivateKey() {\n    base=$1\n    rm -fv ${base}.key || true\n    echo \"Generating private key ${base}.key\"\n    # Generate Private Key\n    openssl genrsa -aes256 -out ${base}.key -passout pass:cassandra 4096\n    echo \"Decrypting ${base}.key\"\n    # Decrypt Private Key\n    openssl rsa -in ${base}.key -out ${base}.key -passin pass:cassandra\n}\n\n# Generate a X509 Certificate signed by the generated CA\nfunction generateCASignedCert() {\n    base=$1\n    rm -fv ${base}.csr ${base}.crt || true\n    # Generate Certificate Signing Request\n    echo \"Generating certificate signing request ${base}.csr\"\n    openssl req -new -key ${base}.key -out ${base}.csr -config ${base}.cnf\n    # Generate Certificate using CA\n    echo \"Generating certificate ${base}.crt\"\n    openssl x509 -req -in ${base}.csr -CA ca.crt -CAkey ca.key \\\n                 -CAcreateserial -out ${base}.crt -days $VALIDITY \\\n                 -extensions req_ext -extfile ${base}.cnf -text\n    rm -fv ${base}.csr || true\n}\n\n# CA\n# Generate CA that signs both gocql and cassandra certs\ngeneratePrivateKey ca\n# Generate CA Certificate\necho \"Generating CA certificate ca.crt\"\nrm -fv ca.crt || true\nopenssl req -x509 -new -nodes -key ca.key -days $VALIDITY \\\n            -out ca.crt -config ca.cnf -text\n\n# Import CA certificate into JKS truststore so it can be used by Cassandra.\necho \"Generating truststore .truststore for Cassandra\"\nrm -fv .truststore || true\nkeytool -import -keystore .truststore -trustcacerts \\\n        -file ca.crt -alias ca -storetype JKS \\\n        -storepass cassandra -noprompt\n\n# GoCQL\n# Generate CA-signed certificate for GoCQL client for integration tests\ngeneratePrivateKey gocql\ngenerateCASignedCert gocql\n\n# Cassandra \n# Generate CA-signed certificate for Cassandra\ngeneratePrivateKey cassandra\ngenerateCASignedCert cassandra\n\n# Import cassandra private key and certificate into a PKCS12 keystore\n# and to a JKS keystore so it can be used by cassandra.\necho \"Generating cassandra.p12 and .keystore for Cassandra\"\nrm -fv cassandra.p12 || true\nopenssl pkcs12 -export -in cassandra.crt -inkey cassandra.key \\\n               -out cassandra.p12 -name cassandra \\\n               -CAfile ca.crt -caname ca \\\n               -password pass:cassandra \\\n               -noiter -nomaciter\n\nrm -fv .keystore || true\nkeytool -importkeystore -srckeystore cassandra.p12 -srcstoretype PKCS12 \\\n\t-srcstorepass cassandra -srcalias cassandra \\\n\t-destkeystore .keystore -deststoretype JKS \\\n\t-deststorepass cassandra -destalias cassandra\n"
  },
  {
    "path": "testdata/pki/gocql.cnf",
    "content": "[req]\ndefault_bits = 2048\nprompt = no\ndefault_md = sha256\ndistinguished_name = dn\n\n[dn]\nCN = gocql\n\n[req_ext]\nbasicConstraints = CA:FALSE\nkeyUsage = digitalSignature, keyEncipherment\nsubjectAltName = @alt_names\n\n[alt_names]\nURI = spiffe://test.cassandra.apache.org/cassandra-gocql-driver/integrationTest/gocql\n"
  },
  {
    "path": "testdata/recreate/aggregates.cql",
    "content": "CREATE KEYSPACE gocqlx_aggregates WITH replication = {\n  'class': 'NetworkTopologyStrategy',\n  'replication_factor': '2'\n};\n\nCREATE FUNCTION gocqlx_aggregates.avgstate(\n  state tuple<int, double>,\n  val double)\nCALLED ON NULL INPUT\nRETURNS frozen<tuple<int, double>>\nLANGUAGE lua\nAS $$\n  return { state[1]+1, state[2]+val }\n  $$;\n\nCREATE FUNCTION gocqlx_aggregates.avgfinal(\n  state tuple<int, double>)\nCALLED ON NULL INPUT\nRETURNS double\nLANGUAGE lua\nas $$\n  r=0\n  r=state[2]\n  r=r/state[1]\n  return r\n  $$;\n\nCREATE AGGREGATE gocqlx_aggregates.average(double)\nSFUNC avgstate STYPE tuple<int, double>\nFINALFUNC avgfinal\nINITCOND (0,0.0);\n"
  },
  {
    "path": "testdata/recreate/aggregates_golden.cql",
    "content": "CREATE KEYSPACE gocqlx_aggregates WITH replication = {'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'datacenter1': '2'} AND durable_writes = true;\nCREATE FUNCTION gocqlx_aggregates.avgfinal(state frozen<tuple<int, double>>)\nCALLED ON NULL INPUT\nRETURNS double\nLANGUAGE lua\nAS $$\n  r=0\n  r=state[2]\n  r=r/state[1]\n  return r\n  \n$$;\nCREATE FUNCTION gocqlx_aggregates.avgstate(state frozen<tuple<int, double>>, val double)\nCALLED ON NULL INPUT\nRETURNS frozen<tuple<int, double>>\nLANGUAGE lua\nAS $$\n  return { state[1]+1, state[2]+val }\n  \n$$;\nCREATE AGGREGATE gocqlx_aggregates.average(double)\nSFUNC avgstate\nSTYPE frozen<tuple<int, double>>\nFINALFUNC avgfinal\nINITCOND (0, 0);"
  },
  {
    "path": "testdata/recreate/index.cql",
    "content": "CREATE KEYSPACE gocqlx_idx WITH replication = {\n  'class': 'NetworkTopologyStrategy',\n  'replication_factor': '2'\n};\n\nCREATE TABLE gocqlx_idx.menus (\n    location text,\n    name text,\n    price float,\n    dish_type text,\n    PRIMARY KEY(location, name)\n);\n\nCREATE INDEX ON gocqlx_idx.menus(name);\n"
  },
  {
    "path": "testdata/recreate/index_golden.cql",
    "content": "CREATE KEYSPACE gocqlx_idx WITH replication = {'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'datacenter1': '2'} AND durable_writes = true;\nCREATE TABLE gocqlx_idx.menus (\n    location text,\n    name text,\n    dish_type text,\n    price float,\n    PRIMARY KEY (location, name)\n) WITH CLUSTERING ORDER BY (name ASC)\n    AND bloom_filter_fp_chance = 0.01\n    AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}\n    AND comment = ''\n    AND compaction = {'class': 'SizeTieredCompactionStrategy'}\n    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n    AND crc_check_chance = 1\n    AND default_time_to_live = 0\n    AND gc_grace_seconds = 864000\n    AND max_index_interval = 2048\n    AND memtable_flush_period_in_ms = 0\n    AND min_index_interval = 128\n    AND speculative_retry = '99.0PERCENTILE'\n    AND paxos_grace_seconds = 864000\n    AND tombstone_gc = {'mode': 'timeout', 'propagation_delay_in_seconds': '3600'};\nCREATE INDEX menus_name_idx ON gocqlx_idx.menus(name);\n"
  },
  {
    "path": "testdata/recreate/keyspace.cql",
    "content": "CREATE KEYSPACE gocqlx_keyspace WITH replication = {\n  'class': 'NetworkTopologyStrategy',\n  'replication_factor': '2'\n};\n"
  },
  {
    "path": "testdata/recreate/keyspace_golden.cql",
    "content": "CREATE KEYSPACE gocqlx_keyspace WITH replication = {'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'datacenter1': '2'} AND durable_writes = true;"
  },
  {
    "path": "testdata/recreate/materialized_views.cql",
    "content": "CREATE KEYSPACE gocqlx_mv WITH replication = {\n  'class': 'NetworkTopologyStrategy',\n  'replication_factor': '2'\n};\n\nCREATE TABLE gocqlx_mv.mv_buildings (\n    name text,\n    city text,\n    built int,\n    meters int,\n    PRIMARY KEY (name)\n);\n\nCREATE MATERIALIZED VIEW gocqlx_mv.mv_building_by_city AS\n    SELECT * FROM mv_buildings\n    WHERE city IS NOT NULL\n    PRIMARY KEY(city, name);\n\nCREATE MATERIALIZED VIEW gocqlx_mv.mv_building_by_city2 AS\n    SELECT meters FROM mv_buildings\n    WHERE city IS NOT NULL\n    PRIMARY KEY(city, name);\n"
  },
  {
    "path": "testdata/recreate/materialized_views_golden.cql",
    "content": "CREATE KEYSPACE gocqlx_mv WITH replication = {'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'datacenter1': '2'} AND durable_writes = true;\nCREATE TABLE gocqlx_mv.mv_buildings (\n    name text,\n    built int,\n    city text,\n    meters int,\n    PRIMARY KEY (name)\n) WITH bloom_filter_fp_chance = 0.01\n    AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}\n    AND comment = ''\n    AND compaction = {'class': 'SizeTieredCompactionStrategy'}\n    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n    AND crc_check_chance = 1\n    AND default_time_to_live = 0\n    AND gc_grace_seconds = 864000\n    AND max_index_interval = 2048\n    AND memtable_flush_period_in_ms = 0\n    AND min_index_interval = 128\n    AND speculative_retry = '99.0PERCENTILE'\n    AND paxos_grace_seconds = 864000\n    AND tombstone_gc = {'mode': 'timeout', 'propagation_delay_in_seconds': '3600'};\nCREATE MATERIALIZED VIEW gocqlx_mv.mv_building_by_city AS\n    SELECT city, name, built, meters\n    FROM gocqlx_mv.mv_buildings\n    WHERE city IS NOT null\n    PRIMARY KEY (city, name)\n    WITH CLUSTERING ORDER BY (name ASC)\n    AND bloom_filter_fp_chance = 0.01\n    AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}\n    AND comment = ''\n    AND compaction = {'class': 'SizeTieredCompactionStrategy'}\n    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n    AND crc_check_chance = 1\n    AND default_time_to_live = 0\n    AND gc_grace_seconds = 864000\n    AND max_index_interval = 2048\n    AND memtable_flush_period_in_ms = 0\n    AND min_index_interval = 128\n    AND speculative_retry = '99.0PERCENTILE'\n    AND paxos_grace_seconds = 864000\n    AND tombstone_gc = {'mode': 'timeout', 'propagation_delay_in_seconds': '3600'};\nCREATE MATERIALIZED VIEW gocqlx_mv.mv_building_by_city2 AS\n    SELECT city, name, meters\n    FROM gocqlx_mv.mv_buildings\n    WHERE city IS NOT null\n    PRIMARY KEY (city, name)\n    WITH CLUSTERING ORDER BY (name ASC)\n    AND bloom_filter_fp_chance = 0.01\n    AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}\n    AND comment = ''\n    AND compaction = {'class': 'SizeTieredCompactionStrategy'}\n    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n    AND crc_check_chance = 1\n    AND default_time_to_live = 0\n    AND gc_grace_seconds = 864000\n    AND max_index_interval = 2048\n    AND memtable_flush_period_in_ms = 0\n    AND min_index_interval = 128\n    AND speculative_retry = '99.0PERCENTILE'\n    AND paxos_grace_seconds = 864000\n    AND tombstone_gc = {'mode': 'timeout', 'propagation_delay_in_seconds': '3600'};\n"
  },
  {
    "path": "testdata/recreate/scylla_encryption_options_golden.json",
    "content": "{\"cipher_algorithm\":\"AES/ECB/PKCS5Padding\",\"secret_key_strength\":128,\"key_provider\":\"LocalFileSystemKeyProviderFactory\",\"secret_key_file\":\"/etc/scylla/encryption_keys/data_encryption_keys\"}"
  },
  {
    "path": "testdata/recreate/secondary_index.cql",
    "content": "CREATE KEYSPACE gocqlx_sec_idx WITH replication = {\n  'class': 'NetworkTopologyStrategy',\n  'replication_factor': '2'\n};\n\n\nCREATE TABLE gocqlx_sec_idx.menus (\n    location text,\n    name text,\n    price float,\n    dish_type text,\n    PRIMARY KEY(location, name)\n);\n\nCREATE INDEX ON gocqlx_sec_idx.menus((location), name);\n"
  },
  {
    "path": "testdata/recreate/secondary_index_golden.cql",
    "content": "CREATE KEYSPACE gocqlx_sec_idx WITH replication = {'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'datacenter1': '2'} AND durable_writes = true;\nCREATE TABLE gocqlx_sec_idx.menus (\n    location text,\n    name text,\n    dish_type text,\n    price float,\n    PRIMARY KEY (location, name)\n) WITH CLUSTERING ORDER BY (name ASC)\n    AND bloom_filter_fp_chance = 0.01\n    AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}\n    AND comment = ''\n    AND compaction = {'class': 'SizeTieredCompactionStrategy'}\n    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n    AND crc_check_chance = 1\n    AND default_time_to_live = 0\n    AND gc_grace_seconds = 864000\n    AND max_index_interval = 2048\n    AND memtable_flush_period_in_ms = 0\n    AND min_index_interval = 128\n    AND speculative_retry = '99.0PERCENTILE'\n    AND paxos_grace_seconds = 864000\n    AND tombstone_gc = {'mode': 'timeout', 'propagation_delay_in_seconds': '3600'};\nCREATE INDEX menus_name_idx ON gocqlx_sec_idx.menus((location), name);\n"
  },
  {
    "path": "testdata/recreate/table.cql",
    "content": "CREATE KEYSPACE gocqlx_table WITH replication = {\n  'class': 'NetworkTopologyStrategy',\n  'replication_factor': '2'\n};\n\nCREATE TABLE gocqlx_table.monkeySpecies (\n    species text PRIMARY KEY,\n    common_name text,\n    population varint,\n    average_size int\n) WITH comment='Important biological records';\n\nCREATE TABLE gocqlx_table.timeline (\n    userid uuid,\n    posted_month int,\n    posted_time uuid,\n    body text,\n    posted_by text,\n    PRIMARY KEY (userid, posted_month, posted_time)\n) WITH compaction = { 'class' : 'LeveledCompactionStrategy' };\n\nCREATE TABLE gocqlx_table.loads (\n    machine inet,\n    cpu int,\n    mtime timeuuid,\n    load float,\n    PRIMARY KEY ((machine, cpu), mtime)\n) WITH CLUSTERING ORDER BY (mtime DESC)\n    AND caching = {'keys':'ALL', 'rows_per_partition':'NONE'}\n    AND compaction = {'compaction_window_size': '14',\n    \t\t\t\t  'compaction_window_unit': 'DAYS',\n    \t\t\t\t  'class': 'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy'};\n\nCREATE TABLE gocqlx_table.users_picture (\n    userid uuid,\n    pictureid uuid,\n    body text static,\n    posted_by text,\n    PRIMARY KEY (userid, pictureid, posted_by)\n) WITH compression = {'sstable_compression': 'LZ4Compressor'};\n"
  },
  {
    "path": "testdata/recreate/table_golden.cql",
    "content": "CREATE KEYSPACE gocqlx_table WITH replication = {'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'datacenter1': '2'} AND durable_writes = true;\nCREATE TABLE gocqlx_table.loads (\n    machine inet,\n    cpu int,\n    mtime timeuuid,\n    load float,\n    PRIMARY KEY ((machine, cpu), mtime)\n) WITH CLUSTERING ORDER BY (mtime DESC)\n    AND bloom_filter_fp_chance = 0.01\n    AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}\n    AND comment = ''\n    AND compaction = {'class': 'TimeWindowCompactionStrategy', 'compaction_window_size': '14', 'compaction_window_unit': 'DAYS'}\n    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n    AND crc_check_chance = 1\n    AND default_time_to_live = 0\n    AND gc_grace_seconds = 864000\n    AND max_index_interval = 2048\n    AND memtable_flush_period_in_ms = 0\n    AND min_index_interval = 128\n    AND speculative_retry = '99.0PERCENTILE'\n    AND paxos_grace_seconds = 864000\n    AND tombstone_gc = {'mode': 'timeout', 'propagation_delay_in_seconds': '3600'};\nCREATE TABLE gocqlx_table.monkeyspecies (\n    species text,\n    average_size int,\n    common_name text,\n    population varint,\n    PRIMARY KEY (species)\n) WITH bloom_filter_fp_chance = 0.01\n    AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}\n    AND comment = 'Important biological records'\n    AND compaction = {'class': 'SizeTieredCompactionStrategy'}\n    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n    AND crc_check_chance = 1\n    AND default_time_to_live = 0\n    AND gc_grace_seconds = 864000\n    AND max_index_interval = 2048\n    AND memtable_flush_period_in_ms = 0\n    AND min_index_interval = 128\n    AND speculative_retry = '99.0PERCENTILE'\n    AND paxos_grace_seconds = 864000\n    AND tombstone_gc = {'mode': 'timeout', 'propagation_delay_in_seconds': '3600'};\nCREATE TABLE gocqlx_table.timeline (\n    userid uuid,\n    posted_month int,\n    posted_time uuid,\n    body text,\n    posted_by text,\n    PRIMARY KEY (userid, posted_month, posted_time)\n) WITH CLUSTERING ORDER BY (posted_month ASC, posted_time ASC)\n    AND bloom_filter_fp_chance = 0.01\n    AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}\n    AND comment = ''\n    AND compaction = {'class': 'LeveledCompactionStrategy'}\n    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n    AND crc_check_chance = 1\n    AND default_time_to_live = 0\n    AND gc_grace_seconds = 864000\n    AND max_index_interval = 2048\n    AND memtable_flush_period_in_ms = 0\n    AND min_index_interval = 128\n    AND speculative_retry = '99.0PERCENTILE'\n    AND paxos_grace_seconds = 864000\n    AND tombstone_gc = {'mode': 'timeout', 'propagation_delay_in_seconds': '3600'};\nCREATE TABLE gocqlx_table.users_picture (\n    userid uuid,\n    pictureid uuid,\n    posted_by text,\n    body text STATIC,\n    PRIMARY KEY (userid, pictureid, posted_by)\n) WITH CLUSTERING ORDER BY (pictureid ASC, posted_by ASC)\n    AND bloom_filter_fp_chance = 0.01\n    AND caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}\n    AND comment = ''\n    AND compaction = {'class': 'SizeTieredCompactionStrategy'}\n    AND compression = {'sstable_compression': 'org.apache.cassandra.io.compress.LZ4Compressor'}\n    AND crc_check_chance = 1\n    AND default_time_to_live = 0\n    AND gc_grace_seconds = 864000\n    AND max_index_interval = 2048\n    AND memtable_flush_period_in_ms = 0\n    AND min_index_interval = 128\n    AND speculative_retry = '99.0PERCENTILE'\n    AND paxos_grace_seconds = 864000\n    AND tombstone_gc = {'mode': 'timeout', 'propagation_delay_in_seconds': '3600'};\n"
  },
  {
    "path": "testdata/recreate/udt.cql",
    "content": "CREATE KEYSPACE gocqlx_udt WITH replication = {\n  'class': 'NetworkTopologyStrategy',\n  'replication_factor': '2'\n};\n\nCREATE TYPE gocqlx_udt.phone (\n    country_code int,\n    number text\n);\n\nCREATE TYPE gocqlx_udt.address (\n    street text,\n    city text,\n    zip text,\n    phones map<text, frozen<phone>>\n);\n"
  },
  {
    "path": "testdata/recreate/udt_golden.cql",
    "content": "CREATE KEYSPACE gocqlx_udt WITH replication = {'class': 'org.apache.cassandra.locator.NetworkTopologyStrategy', 'datacenter1': '2'} AND durable_writes = true;\nCREATE TYPE gocqlx_udt.phone (\n    country_code int,\n    number text\n);\nCREATE TYPE gocqlx_udt.address (\n    street text,\n    city text,\n    zip text,\n    phones map<text, frozen<phone>>\n);"
  },
  {
    "path": "tests/bench/bench_marshal_test.go",
    "content": "package bench_test\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"math/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/brianvoe/gofakeit/v6\"\n\t\"github.com/gocql/gocql\"\n)\n\nfunc generateRandomBinaryData(size int) []byte {\n\trnd := rand.New(rand.NewSource(100))\n\trandomBuffer := make([]byte, size)\n\tio.ReadAtLeast(rnd, randomBuffer, size)\n\treturn randomBuffer\n}\n\ntype RandomData struct {\n\tID        string `json:\"id\"`\n\tFirstName string `json:\"first_name\"`\n\tLastName  string `json:\"last_name\"`\n\tEmail     string `json:\"email\"`\n\tCity      string `json:\"city\"`\n\tState     string `json:\"state\"`\n\tZip       string `json:\"zip\"`\n\tPhone     string `json:\"phone\"`\n}\n\nfunc generateRandomJSON(size int) string {\n\tgofakeit.Seed(100)\n\tvar jsonData []byte\n\tvar randomData []RandomData\n\tcurrentLength := 0\n\n\tfor currentLength < size {\n\t\tdata := RandomData{\n\t\t\tID:        gofakeit.UUID(),\n\t\t\tFirstName: gofakeit.FirstName(),\n\t\t\tLastName:  gofakeit.LastName(),\n\t\t\tEmail:     gofakeit.Email(),\n\t\t\tCity:      gofakeit.City(),\n\t\t\tState:     gofakeit.State(),\n\t\t\tZip:       gofakeit.Zip(),\n\t\t\tPhone:     gofakeit.Phone(),\n\t\t}\n\t\trandomData = append(randomData, data)\n\n\t\ttempData, _ := json.Marshal(randomData)\n\t\tcurrentLength = len(tempData)\n\t\tjsonData = tempData\n\t}\n\n\treturn string(jsonData)\n}\n\nfunc BenchmarkSerialization(b *testing.B) {\n\tb.Run(\"SimpleTypes\", func(b *testing.B) {\n\t\tb.Run(\"Int\", func(b *testing.B) {\n\t\t\ttType := gocql.NewNativeType(4, gocql.TypeInt)\n\t\t\tvar val int = 42\n\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tvar unmarshaled int\n\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\n\t\tcases := []struct {\n\t\t\tname string\n\t\t\tsize int\n\t\t}{\n\t\t\t{\"Small-100b\", 100},\n\t\t\t{\"Medium-1kb\", 1024},\n\t\t\t{\"Big-1M\", 1024 * 1024},\n\t\t}\n\n\t\tfor _, c := range cases {\n\t\t\tb.Run(\"Blob\"+c.name, func(b *testing.B) {\n\t\t\t\ttType := gocql.NewNativeType(4, gocql.TypeBlob)\n\t\t\t\tval := generateRandomBinaryData(c.size)\n\t\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t\tvar unmarshaled []byte\n\t\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t})\n\t\t}\n\n\t\tfor _, c := range cases {\n\t\t\tb.Run(\"Text\"+c.name, func(b *testing.B) {\n\t\t\t\ttType := gocql.NewNativeType(4, gocql.TypeText)\n\t\t\t\tval := generateRandomJSON(c.size)\n\t\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t\tvar unmarshaled string\n\t\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t})\n\t\t}\n\n\t\tb.Run(\"UUID\", func(b *testing.B) {\n\t\t\ttType := gocql.NewNativeType(4, gocql.TypeUUID)\n\t\t\tval := gocql.UUID{}\n\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tvar unmarshaled gocql.UUID\n\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\n\t\tb.Run(\"Duration\", func(b *testing.B) {\n\t\t\ttType := gocql.NewNativeType(4, gocql.TypeDuration)\n\t\t\tval := gocql.Duration{Nanoseconds: 300000000000}\n\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tvar unmarshaled gocql.Duration\n\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\n\t\tb.Run(\"Timestamp\", func(b *testing.B) {\n\t\t\ttType := gocql.NewNativeType(4, gocql.TypeTimestamp)\n\t\t\tval := time.Now()\n\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tvar unmarshaled time.Time\n\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t})\n\n\tb.Run(\"ComplexTypes\", func(b *testing.B) {\n\t\tb.Run(\"List\", func(b *testing.B) {\n\t\t\ttType := gocql.CollectionType{\n\t\t\t\tNativeType: gocql.NewNativeType(4, gocql.TypeList),\n\t\t\t\tElem:       gocql.NewNativeType(4, gocql.TypeText),\n\t\t\t}\n\t\t\tval := []string{\"foo\", \"bar\", \"baz\"}\n\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tvar unmarshaled []string\n\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\n\t\tb.Run(\"Map\", func(b *testing.B) {\n\t\t\ttType := gocql.CollectionType{\n\t\t\t\tNativeType: gocql.NewNativeType(4, gocql.TypeMap),\n\t\t\t\tKey:        gocql.NewNativeType(4, gocql.TypeVarchar),\n\t\t\t\tElem:       gocql.NewNativeType(4, gocql.TypeInt),\n\t\t\t}\n\t\t\tval := map[string]int{\"a\": 1, \"b\": 2}\n\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tvar unmarshaled map[string]int\n\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\n\t\tb.Run(\"Set\", func(b *testing.B) {\n\t\t\ttType := gocql.CollectionType{\n\t\t\t\tNativeType: gocql.NewNativeType(4, gocql.TypeSet),\n\t\t\t\tElem:       gocql.NewNativeType(4, gocql.TypeInt),\n\t\t\t}\n\t\t\tval := map[int]struct{}{1: {}, 2: {}}\n\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tvar unmarshaled []int\n\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\n\t\tb.Run(\"UDT\", func(b *testing.B) {\n\t\t\ttype MyUDT struct {\n\t\t\t\tID    gocql.UUID\n\t\t\t\tName  string\n\t\t\t\tValue int\n\t\t\t}\n\n\t\t\tval := MyUDT{\n\t\t\t\tID:    gocql.UUID{},\n\t\t\t\tName:  \"test udt\",\n\t\t\t\tValue: 123,\n\t\t\t}\n\n\t\t\ttType := gocql.UDTTypeInfo{\n\t\t\t\tNativeType: gocql.NewNativeType(4, gocql.TypeUDT),\n\t\t\t\tName:       \"myudt\",\n\t\t\t\tKeySpace:   \"myks\",\n\t\t\t\tElements: []gocql.UDTField{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"id\",\n\t\t\t\t\t\tType: gocql.NewNativeType(4, gocql.TypeUUID),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\tType: gocql.NewNativeType(4, gocql.TypeText),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"value\",\n\t\t\t\t\t\tType: gocql.NewNativeType(4, gocql.TypeInt),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tvar unmarshaled MyUDT\n\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\n\t\tb.Run(\"Tuple\", func(b *testing.B) {\n\t\t\tval := struct {\n\t\t\t\tField1 int\n\t\t\t\tField2 string\n\t\t\t}{\n\t\t\t\tField1: 1,\n\t\t\t\tField2: \"test tuple\",\n\t\t\t}\n\n\t\t\ttType := gocql.TupleTypeInfo{\n\t\t\t\tNativeType: gocql.NewNativeType(4, gocql.TypeTuple),\n\t\t\t\tElems: []gocql.TypeInfo{\n\t\t\t\t\tgocql.NewNativeType(4, gocql.TypeInt),\n\t\t\t\t\tgocql.NewNativeType(4, gocql.TypeText),\n\t\t\t\t},\n\t\t\t}\n\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tvar unmarshaled struct {\n\t\t\t\t\t\tField1 int\n\t\t\t\t\t\tField2 string\n\t\t\t\t\t}\n\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t})\n\n\tb.Run(\"NestedTypes\", func(b *testing.B) {\n\t\tb.Run(\"3-lvl\", func(b *testing.B) {\n\t\t\ttype MyUDT struct {\n\t\t\t\tID    gocql.UUID\n\t\t\t\tName  string\n\t\t\t\tValue int\n\t\t\t}\n\n\t\t\tval := []map[string]MyUDT{\n\t\t\t\t{\n\t\t\t\t\t\"key1\": {ID: gocql.UUID{}, Name: \"name1\", Value: 123},\n\t\t\t\t\t\"key2\": {ID: gocql.UUID{}, Name: \"name2\", Value: 456},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"key3\": {ID: gocql.UUID{}, Name: \"name3\", Value: 789},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\ttType := gocql.CollectionType{\n\t\t\t\tNativeType: gocql.NewNativeType(4, gocql.TypeList),\n\t\t\t\tElem: gocql.CollectionType{\n\t\t\t\t\tNativeType: gocql.NewNativeType(4, gocql.TypeMap),\n\t\t\t\t\tKey:        gocql.NewNativeType(4, gocql.TypeText),\n\t\t\t\t\tElem: gocql.UDTTypeInfo{\n\t\t\t\t\t\tNativeType: gocql.NewNativeType(4, gocql.TypeUDT),\n\t\t\t\t\t\tName:       \"myudt\",\n\t\t\t\t\t\tKeySpace:   \"myks\",\n\t\t\t\t\t\tElements: []gocql.UDTField{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"id\",\n\t\t\t\t\t\t\t\tType: gocql.NewNativeType(4, gocql.TypeUUID),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t\t\tType: gocql.NewNativeType(4, gocql.TypeText),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"value\",\n\t\t\t\t\t\t\t\tType: gocql.NewNativeType(4, gocql.TypeInt),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tb.Run(\"Marshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t_, err := gocql.Marshal(tType, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tmarshaled, err := gocql.Marshal(tType, val)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tb.Run(\"Unmarshal\", func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tvar unmarshaled []map[string]MyUDT\n\t\t\t\t\terr = gocql.Unmarshal(tType, marshaled, &unmarshaled)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t})\n}\n"
  },
  {
    "path": "tests/bench/bench_single_conn_test.go",
    "content": "package bench_test\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/dialer/recorder\"\n\t\"github.com/gocql/gocql/dialer/replayer\"\n)\n\nfunc InitializeCluster() error {\n\tcluster := gocql.NewCluster(\"192.168.100.11\")\n\tcluster.Consistency = gocql.Quorum\n\n\tfallback := gocql.RoundRobinHostPolicy()\n\tcluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback)\n\n\texecutor, err := gocql.NewSingleHostQueryExecutor(cluster)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create executor: %v\", err)\n\t}\n\tdefer executor.Close()\n\n\tkeyspace := \"single_conn_bench\"\n\n\terr = executor.Exec(`DROP KEYSPACE IF EXISTS ` + keyspace)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to drop keyspace: %v\", err)\n\t}\n\n\terr = executor.Exec(fmt.Sprintf(`CREATE KEYSPACE %s WITH replication = {'class' : 'NetworkTopologyStrategy','replication_factor' : 1}`, keyspace))\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create keyspace: %v\", err)\n\t}\n\n\tif err = executor.Exec(fmt.Sprintf(`CREATE TABLE %s.%s (pk int, ck int, v text, PRIMARY KEY (pk));\n\t`, keyspace, \"table1\")); err != nil {\n\t\treturn fmt.Errorf(\"unable to create table: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc RecordSelectTraffic(size int, dir string) error {\n\tcluster := gocql.NewCluster(\"192.168.100.11\")\n\tcluster.Consistency = gocql.Quorum\n\n\tcluster.Dialer = recorder.NewRecordDialer(dir)\n\n\tfallback := gocql.RoundRobinHostPolicy()\n\tcluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback)\n\n\texecutor, err := gocql.NewSingleHostQueryExecutor(cluster)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create executor: %v\", err)\n\t}\n\tdefer executor.Close()\n\n\tfor i := 0; i < size; i++ {\n\t\titer := executor.Iter(`SELECT v FROM single_conn_bench.table1 WHERE pk = ?;`, i)\n\t\tvar name string\n\t\tfor iter.Scan(&name) {\n\t\t\tif name[:4] != \"Name\" {\n\t\t\t\treturn fmt.Errorf(\"got wrong value for name: %s\", name)\n\t\t\t}\n\t\t}\n\t\tif err := iter.Close(); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to close iterator: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc RecordInsertTraffic(size int, dir string) error {\n\tcluster := gocql.NewCluster(\"192.168.100.11\")\n\tcluster.Consistency = gocql.Quorum\n\n\tcluster.Dialer = recorder.NewRecordDialer(dir)\n\n\tfallback := gocql.RoundRobinHostPolicy()\n\tcluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback)\n\n\texecutor, err := gocql.NewSingleHostQueryExecutor(cluster)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create executor: %v\", err)\n\t}\n\tdefer executor.Close()\n\n\tfor i := 0; i < size; i++ {\n\t\terr = executor.Exec(`INSERT INTO single_conn_bench.table1 (pk, ck, v) VALUES (?, ?, ?);`, i, i%5, fmt.Sprintf(\"Name_%d\", i))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to insert: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc BenchmarkSingleConnectionSelect(b *testing.B) {\n\tcluster := gocql.NewCluster(\"192.168.100.11\")\n\tcluster.Consistency = gocql.Quorum\n\n\tcluster.Dialer = replayer.NewReplayDialer(\"rec_select\")\n\n\tfallback := gocql.RoundRobinHostPolicy()\n\tcluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback)\n\n\texecutor, err := gocql.NewSingleHostQueryExecutor(cluster)\n\tif err != nil {\n\t\tb.Fatalf(\"failed to create executor: %v\", err)\n\t}\n\tdefer executor.Close()\n\n\tb.Run(\"Select\", func(b *testing.B) {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tb.Run(\"Case\"+strconv.Itoa(i), func(b *testing.B) {\n\t\t\t\tfor j := 0; j < b.N; j++ {\n\t\t\t\t\t_ = executor.Iter(`SELECT v FROM single_conn_bench.table1 WHERE pk = ?;`, i)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc BenchmarkSingleConnectionInsert(b *testing.B) {\n\tcluster := gocql.NewCluster(\"192.168.100.11\")\n\tcluster.Consistency = gocql.Quorum\n\n\tcluster.Dialer = replayer.NewReplayDialer(\"rec_insert\")\n\n\tfallback := gocql.RoundRobinHostPolicy()\n\tcluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback)\n\n\texecutor, err := gocql.NewSingleHostQueryExecutor(cluster)\n\tif err != nil {\n\t\tb.Fatalf(\"failed to create executor: %v\", err)\n\t}\n\tdefer executor.Close()\n\n\tb.Run(\"Insert\", func(b *testing.B) {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tb.Run(\"Case\"+strconv.Itoa(i), func(b *testing.B) {\n\t\t\t\tfor j := 0; j < b.N; j++ {\n\t\t\t\t\terr = executor.Exec(`INSERT INTO single_conn_bench.table1 (pk, ck, v) VALUES (?, ?, ?);`, i, i%5, fmt.Sprintf(\"Name_%d\", i))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tb.Fatalf(\"failed to insert: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc TestMain(m *testing.M) {\n\tupdate := flag.Bool(\"update-golden\", false, \"Update golden files\")\n\tflag.Parse()\n\tif *update {\n\t\terr := InitializeCluster()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"failed to initialize cluster: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr = RecordInsertTraffic(10, \"rec_insert\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"failed to record insert traffic: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr = RecordSelectTraffic(10, \"rec_select\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"failed to record select traffic: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tos.Exit(m.Run())\n}\n"
  },
  {
    "path": "tests/bench/bench_vector_public_test.go",
    "content": "package bench_test\n\nimport (\n\t\"encoding/binary\"\n\t\"math\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n)\n\nconst vectorProto = 4\nconst apacheCassandraTypePrefix = \"org.apache.cassandra.db.marshal.\"\nconst vectorTypePrefix = apacheCassandraTypePrefix + \"VectorType(\" + apacheCassandraTypePrefix + \"FloatType, \"\nconst vectorTypeSuffix = \")\"\n\nfunc makeFloatVectorType(dim int) gocql.VectorType {\n\tdimStr := strconv.Itoa(dim)\n\treturn gocql.VectorType{\n\t\tNativeType: gocql.NewCustomType(\n\t\t\tvectorProto,\n\t\t\tgocql.TypeCustom,\n\t\t\tvectorTypePrefix+dimStr+vectorTypeSuffix,\n\t\t),\n\t\tSubType:    gocql.NewNativeType(vectorProto, gocql.TypeFloat),\n\t\tDimensions: dim,\n\t}\n}\n\nfunc BenchmarkVectorMarshalFloat32Public(b *testing.B) {\n\tdims := []int{128, 384, 768, 1536}\n\n\tfor _, dim := range dims {\n\t\tdimStr := strconv.Itoa(dim)\n\t\tb.Run(\"dim_\"+dimStr, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\n\t\t\tvec := make([]float32, dim)\n\t\t\tfor i := range vec {\n\t\t\t\tvec[i] = float32(i) * 0.1\n\t\t\t}\n\n\t\t\tinfo := makeFloatVectorType(dim)\n\n\t\t\tb.SetBytes(int64(dim * 4))\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := gocql.Marshal(info, vec); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc BenchmarkVectorUnmarshalFloat32Public(b *testing.B) {\n\tdims := []int{128, 384, 768, 1536}\n\n\tfor _, dim := range dims {\n\t\tdimStr := strconv.Itoa(dim)\n\t\tb.Run(\"dim_\"+dimStr, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\n\t\t\tdata := make([]byte, dim*4)\n\t\t\tfor i := 0; i < dim; i++ {\n\t\t\t\tbinary.BigEndian.PutUint32(data[i*4:], math.Float32bits(float32(i)*0.1))\n\t\t\t}\n\n\t\t\tinfo := makeFloatVectorType(dim)\n\t\t\tvar result []float32\n\n\t\t\tb.SetBytes(int64(dim * 4))\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif err := gocql.Unmarshal(info, data, &result); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc BenchmarkVectorRoundTripPublic(b *testing.B) {\n\tdims := []int{128, 384, 768, 1536}\n\n\tfor _, dim := range dims {\n\t\tdimStr := strconv.Itoa(dim)\n\t\tb.Run(\"dim_\"+dimStr, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\n\t\t\tsrcVec := make([]float32, dim)\n\t\t\tfor i := range srcVec {\n\t\t\t\tsrcVec[i] = float32(i) * 0.1\n\t\t\t}\n\n\t\t\tinfo := makeFloatVectorType(dim)\n\t\t\tvar dstVec []float32\n\n\t\t\tb.SetBytes(int64(dim * 4 * 2))\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tdata, err := gocql.Marshal(info, srcVec)\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif err := gocql.Unmarshal(info, data, &dstVec); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/bench/go.mod",
    "content": "module github.com/gocql/gocql/bench_test\n\ngo 1.25.0\n\nrequire (\n\tgithub.com/brianvoe/gofakeit/v6 v6.28.0\n\tgithub.com/gocql/gocql v1.7.0\n)\n\nrequire (\n\tgithub.com/google/uuid v1.6.0 // indirect\n\tgithub.com/klauspost/compress v1.18.5 // indirect\n\tgolang.org/x/sync v0.20.0 // indirect\n\tgopkg.in/inf.v0 v0.9.1 // indirect\n)\n\nreplace github.com/gocql/gocql => ../..\n"
  },
  {
    "path": "tests/bench/go.sum",
    "content": "github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4=\ngithub.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=\ngithub.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngo.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=\ngo.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=\ngolang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA=\ngolang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs=\ngolang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=\ngolang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=\ngopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=\ngopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\nsigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=\nsigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=\n"
  },
  {
    "path": "tests/bench/rec_insert/192.168.100.11:9042-0Reads",
    "content": "{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABCR5FsunQ0R7+r+IQBJDGRzAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABDu2cDanQ0R7wuYHo4KKzsUAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAInmaEnRIR7y3XxXaiIG8xAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAEFnxKnRMR73PNIzDsRApMAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAb5TUUnRMR717g5pvokC/kAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAxAe3ynRMR766bODzKESb3AAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABCND7fSnRMR7xa0C3LDZrpzAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABCxO0fonRMR73j1wwJQHdWrAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABDvMx4YnRMR76NfXEL1hL0aAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABBD0FVsnRQR7zNx10SKYLAmAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABBjRpfanRQR73UZ2qUmi5vUAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABC1lYC0nRQR74XiOWDrg7z6AAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATEAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABADC04KnRUR74eTL5lMntPqAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABBIX9jMnR8R77IoLTLjoWSOAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABCrS9PmnR8R73xgJaYWggcvAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABDOO9JmnR8R7+ZrEBJHQVPvAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABADHwB6nSAR76qRwPvy1BuoAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABBbUOgSnSAR7xG1Imw8wjVzAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABDFBq2MnSAR7xkFw6f0PNUWAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAADcsinSER7wlwGUhVdvaXAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAqC91qnSER70M1po5LqfR8AAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABB4RbfknSER73p3mZ3s7fVcAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABCSpEDinSER7zv6d1XvBXKLAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABYAAAABAAQHnu5NL7K3LWoBiT3dKFi9QAAAAEAAAADAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAJjawAJAAF2AA0AAAAEAAAAAA==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAAEAAAAAQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAAEAAAAAQ==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAAEAAAAAQ==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAAEAAAAAQ==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAAEAAAAAQ==\"}\n"
  },
  {
    "path": "tests/bench/rec_insert/192.168.100.11:9042-0Writes",
    "content": "{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlK/1uU/\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZSv9bsbw==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZSv9bukQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZSv9bv/A==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZSv9byNA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZSv9bzwQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZSv9b1IA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZSv9b2bg==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZSv9b3sQ==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZSv9b5UQ==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZSv9b6fg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4w\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlLJIrmb\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZSySLE3Q==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZSySLINA==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZSySLKbQ==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZSySLNVA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZSySLQFg==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZSySLSvA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZSySLVzw==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZSySLYTw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZSySLbwQ==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZSySLd9A==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlMyHKee\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTMhyv7A==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTMhyzSA==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTMhy1jQ==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTMhy4/g==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTMhy7gA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTMhy9lA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTMhzCyQ==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTMhzElw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTMhzGzw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTMhzIcw==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVyAA5EUklWRVJfVkVSU0lPTgAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNLQnk9\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTS0KLYg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTS0KQ3g==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTS0KVaA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTS0KYdA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTS0KbOg==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTS0KfVw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTS0KjSA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTS0Kmnw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTS0Kqsg==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTS0KtNw==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNNo7wJ\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTTaPCAw==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTTaPD2A==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTTaPFMw==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTTaPG0w==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTTaPIaQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTTaPJ0g==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTTaPLVA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTTaPMww==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTTaPOVg==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTTaPP9Q==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4w\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNPwG3Q\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTT8B6Kg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTT8CALQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTT8CDAg==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTT8CGiA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTT8CJgA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTT8CM/g==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTT8CRKQ==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTT8CT5g==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTT8CXTQ==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTT8Cawg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNY9MP5\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTWPTKKA==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTWPTNJQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTWPTOjA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTWPTSTQ==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTWPTUAA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTWPTVkw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTWPTaFA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTWPTbcg==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTWPTc8w==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTWPTePw==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNcksmc\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTXJLb0A==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTXJLfTQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTXJLkCg==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTXJLpWQ==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTXJLuoA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTXJLzqA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTXJL6EQ==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTXJL+Ww==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTXJMEHw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTXJMHeQ==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNixTSQ\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTYsVAzA==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTYsVFpg==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTYsVIoA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTYsVMcQ==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTYsVQhQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTYsVVQw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTYsVY/g==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTYsVfBQ==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTYsViPQ==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTYsVkoQ==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNrO079\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTaztZSQ==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTaztdBQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTaztf8A==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTazti5g==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTaztmBA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTaztoSw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTaztqcw==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTaztsRQ==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTaztuew==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTaztwLg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNuYLG5\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTbmC4rg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTbmC66A==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTbmC8vA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTbmC/ZA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTbmDB5Q==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTbmDD+g==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTbmDGLA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTbmDIiw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTbmDLFw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTbmDNFw==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlN2m+QB\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTdpvzYw==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTdpv3Ag==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTdpv5hg==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTdpv9WQ==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTdpwB2A==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTdpwFlQ==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTdpwITg==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTdpwMUA==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTdpwQ/Q==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTdpwVHA==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlN+WsOm\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZTflrNKg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZTflrQRw==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZTflrTKw==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZTflrVnQ==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZTflrXjg==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZTflrZsQ==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZTflrb1A==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZTflrd6Q==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZTflrgWg==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZTflrkxA==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSFSYWe\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZUhUmJIw==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZUhUmKjg==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZUhUmLMg==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZUhUmMAg==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZUhUmM0g==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZUhUmNgQ==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZUhUmOQA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZUhUmO7g==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZUhUmP3g==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZUhUmQkQ==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSPLfhY\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZUjy383g==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZUjy3+Gg==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZUjy3/PQ==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZUjy4AJQ==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZUjy4BQw==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZUjy4Cqg==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZUjy4D5g==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZUjy4FJQ==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZUjy4Hxw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZUjy4I9Q==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSSrE+n\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZUkqxSYQ==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZUkqxTKQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZUkqxTyg==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZUkqxUaA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZUkqxVAQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZUkqxVlA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZUkqxWKg==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZUkqxWvA==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZUkqxXTQ==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZUkqxX7A==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSX9kQB\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZUl/ZIsQ==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZUl/ZKKA==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZUl/ZLIA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZUl/ZNeA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZUl/ZOyg==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZUl/ZP3Q==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZUl/ZRhw==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZUl/ZSiA==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZUl/ZTTQ==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZUl/ZUNw==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4w\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSgyAkQ\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZUoMgMEA==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZUoMgNwQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZUoMgOeg==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZUoMgPng==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZUoMgQWQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZUoMgRDw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZUoMgR1Q==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZUoMgSpw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZUoMgTog==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZUoMgU+g==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSrWjpD\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZUq1o+Bw==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZUq1o/Qg==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZUq1pADA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZUq1pA3w==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZUq1pBig==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZUq1pCOw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZUq1pC+g==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZUq1pDkg==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZUq1pEGg==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZUq1pEmg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVy\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSxQVBh\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZUsUFTig==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZUsUFUcg==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZUsUFVMg==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZUsUFV+Q==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZUsUFWsQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZUsUFXXw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZUsUFYDA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZUsUFYwA==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZUsUFZaA==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZUsUFaDA==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlS1dFJz\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZUtXRVRw==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZUtXRWKA==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZUtXRW9Q==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZUtXRY/g==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZUtXRZ8g==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZUtXRc7g==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZUtXRd0A==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZUtXRemw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZUtXRfQQ==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZUtXRhPg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVy\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlS9RuYh\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZUvUbotg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZUvUbpew==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZUvUbqOw==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZUvUbq6A==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZUvUbrfg==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZUvUbsEQ==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZUvUbssQ==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZUvUbtRg==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZUvUbt3w==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZUvUbucQ==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlS/6guU\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAABGAAAAQklOU0VSVCBJTlRPIHNpbmdsZV9jb25uX2JlbmNoLnRhYmxlMSAocGssIGNrLCB2KSBWQUxVRVMgKD8sID8sID8pOw==\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAAAAAAQAAAAAAAAABk5hbWVfMAAAE4gABiZUv+oZhg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAQAAAAQAAAABAAAABk5hbWVfMQAAE4gABiZUv+ofUQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAgAAAAQAAAACAAAABk5hbWVfMgAAE4gABiZUv+okCg==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAAAwAAAAQAAAADAAAABk5hbWVfMwAAE4gABiZUv+orXA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABAAAAAQAAAAEAAAABk5hbWVfNAAAE4gABiZUv+ozZg==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABQAAAAQAAAAAAAAABk5hbWVfNQAAE4gABiZUv+o7MA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABgAAAAQAAAABAAAABk5hbWVfNgAAE4gABiZUv+o9OA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAABwAAAAQAAAACAAAABk5hbWVfNwAAE4gABiZUv+o/7g==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACAAAAAQAAAADAAAABk5hbWVfOAAAE4gABiZUv+pCqw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAA9ABAee7k0vsrctagGJPd0oWL1AAElAAMAAAAEAAAACQAAAAQAAAAEAAAABk5hbWVfOQAAE4gABiZUv+pE+g==\"}\n"
  },
  {
    "path": "tests/bench/rec_select/192.168.100.11:9042-0Reads",
    "content": "{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABCR5FsunQ0R7+r+IQBJDGRzAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABDu2cDanQ0R7wuYHo4KKzsUAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAInmaEnRIR7y3XxXaiIG8xAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAEFnxKnRMR73PNIzDsRApMAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAb5TUUnRMR717g5pvokC/kAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAxAe3ynRMR766bODzKESb3AAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABCND7fSnRMR7xa0C3LDZrpzAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABCxO0fonRMR73j1wwJQHdWrAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABDvMx4YnRMR76NfXEL1hL0aAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABBD0FVsnRQR7zNx10SKYLAmAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABBjRpfanRQR73UZ2qUmi5vUAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABC1lYC0nRQR74XiOWDrg7z6AAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABADC04KnRUR74eTL5lMntPqAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABBIX9jMnR8R77IoLTLjoWSOAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABCrS9PmnR8R73xgJaYWggcvAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABDOO9JmnR8R7+ZrEBJHQVPvAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABADHwB6nSAR76qRwPvy1BuoAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABBbUOgSnSAR7xG1Imw8wjVzAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABDFBq2MnSAR7xkFw6f0PNUWAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAADcsinSER7wlwGUhVdvaXAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABAqC91qnSER70M1po5LqfR8AAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABB4RbfknSER73p3mZ3s7fVcAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n{\"stream_id\":1,\"data\":\"hAAAAQYAAAHSAAwAC0NPTVBSRVNTSU9OAAIAA2x6NAAGc25hcHB5AAtDUUxfVkVSU0lPTgABAAUzLjMuMQAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwABAClMV1RfT1BUSU1JWkFUSU9OX01FVEFfQklUX01BU0s9MjE0NzQ4MzY0OAAQU0NZTExBX05SX1NIQVJEUwABAAEyABJTQ1lMTEFfUEFSVElUSU9ORVIAAQArb3JnLmFwYWNoZS5jYXNzYW5kcmEuZGh0Lk11cm11cjNQYXJ0aXRpb25lcgAXU0NZTExBX1JBVEVfTElNSVRfRVJST1IAAQAQRVJST1JfQ09ERT02MTQ0MAAMU0NZTExBX1NIQVJEAAEAATAAGVNDWUxMQV9TSEFSRElOR19BTEdPUklUSE0AAQAYYmlhc2VkLXRva2VuLXJvdW5kLXJvYmluABpTQ1lMTEFfU0hBUkRJTkdfSUdOT1JFX01TQgABAAIxMgAXU0NZTExBX1NIQVJEX0FXQVJFX1BPUlQAAQAFMTkwNDIAG1NDWUxMQV9TSEFSRF9BV0FSRV9QT1JUX1NTTAABAAUxOTE0MgASVEFCTEVUU19ST1VUSU5HX1YxAAEAAA==\"}\n{\"stream_id\":64,\"data\":\"hAAAQAIAAAAA\"}\n{\"stream_id\":128,\"data\":\"hAAAgAgAAAf3AAAAAgAAAAAAAAASAAZzeXN0ZW0ABWxvY2FsAANrZXkADQAGc3lzdGVtAAVsb2NhbAAMYm9vdHN0cmFwcGVkAA0ABnN5c3RlbQAFbG9jYWwAEWJyb2FkY2FzdF9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADGNsdXN0ZXJfbmFtZQANAAZzeXN0ZW0ABWxvY2FsAAtjcWxfdmVyc2lvbgANAAZzeXN0ZW0ABWxvY2FsAAtkYXRhX2NlbnRlcgANAAZzeXN0ZW0ABWxvY2FsABFnb3NzaXBfZ2VuZXJhdGlvbgAJAAZzeXN0ZW0ABWxvY2FsAAdob3N0X2lkAAwABnN5c3RlbQAFbG9jYWwADmxpc3Rlbl9hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwAF25hdGl2ZV9wcm90b2NvbF92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3BhcnRpdGlvbmVyAA0ABnN5c3RlbQAFbG9jYWwABHJhY2sADQAGc3lzdGVtAAVsb2NhbAAPcmVsZWFzZV92ZXJzaW9uAA0ABnN5c3RlbQAFbG9jYWwAC3JwY19hZGRyZXNzABAABnN5c3RlbQAFbG9jYWwADnNjaGVtYV92ZXJzaW9uAAwABnN5c3RlbQAFbG9jYWwAEnN1cHBvcnRlZF9mZWF0dXJlcwANAAZzeXN0ZW0ABWxvY2FsAAZ0b2tlbnMAIgANAAZzeXN0ZW0ABWxvY2FsAAx0cnVuY2F0ZWRfYXQAIQAMAAMAAAABAAAABWxvY2FsAAAACUNPTVBMRVRFRAAAAATAqGQLAAAAAAAAAAUzLjMuMQAAAAtkYXRhY2VudGVyMQAAAARnLL6fAAAAEA9mbU1OFUflmhq/DEnf3pkAAAAEwKhkCwAAAAE0AAAAK29yZy5hcGFjaGUuY2Fzc2FuZHJhLmRodC5NdXJtdXIzUGFydGl0aW9uZXIAAAAFcmFjazEAAAAFMy4wLjgAAAAEwKhkCwAAABCSpEDinSER7zv6d1XvBXKLAAAEyUFHR1JFR0FURV9TVE9SQUdFX09QVElPTlMsQUxURVJOQVRPUl9UVEwsQ0RDLENEQ19HRU5FUkFUSU9OU19WMixDT0xMRUNUSU9OX0lOREVYSU5HLENPTVBVVEVEX0NPTFVNTlMsQ09SUkVDVF9DT1VOVEVSX09SREVSLENPUlJFQ1RfSURYX1RPS0VOX0lOX1NFQ09OREFSWV9JTkRFWCxDT1JSRUNUX05PTl9DT01QT1VORF9SQU5HRV9UT01CU1RPTkVTLENPUlJFQ1RfU1RBVElDX0NPTVBBQ1RfSU5fTUMsQ09VTlRFUlMsRElHRVNUX0ZPUl9OVUxMX1ZBTFVFUyxESUdFU1RfSU5TRU5TSVRJVkVfVE9fRVhQSVJZLERJR0VTVF9NVUxUSVBBUlRJVElPTl9SRUFELEVNUFRZX1JFUExJQ0FfTVVUQVRJT05fUEFHRVMsRU1QVFlfUkVQTElDQV9QQUdFUyxHUk9VUDBfU0NIRU1BX1ZFUlNJT05JTkcsSElOVEVEX0hBTkRPRkZfU0VQQVJBVEVfQ09OTkVDVElPTixIT1NUX0lEX0JBU0VEX0hJTlRFRF9IQU5ET0ZGLElOREVYRVMsTEFSR0VfQ09MTEVDVElPTl9ERVRFQ1RJT04sTEFSR0VfUEFSVElUSU9OUyxMQV9TU1RBQkxFX0ZPUk1BVCxMV1QsTUFURVJJQUxJWkVEX1ZJRVdTLE1DX1NTVEFCTEVfRk9STUFULE1EX1NTVEFCTEVfRk9STUFULE1FX1NTVEFCTEVfRk9STUFULE5PTkZST1pFTl9VRFRTLFBBUkFMTEVMSVpFRF9BR0dSRUdBVElPTixQRVJfVEFCTEVfQ0FDSElORyxQRVJfVEFCTEVfUEFSVElUSU9ORVJTLFJBTkdFX1NDQU5fREFUQV9WQVJJQU5ULFJBTkdFX1RPTUJTVE9ORVMsUkFOR0VfVE9NQlNUT05FX0FORF9ERUFEX1JPV1NfREVURUNUSU9OLFJPTEVTLFJPV19MRVZFTF9SRVBBSVIsU0NIRU1BX0NPTU1JVExPRyxTQ0hFTUFfVEFCTEVTX1YzLFNFQ09OREFSWV9JTkRFWEVTX09OX1NUQVRJQ19DT0xVTU5TLFNFUEFSQVRFX1BBR0VfU0laRV9BTkRfU0FGRVRZX0xJTUlULFNUUkVBTV9XSVRIX1JQQ19TVFJFQU0sU1VQUE9SVFNfQ09OU0lTVEVOVF9UT1BPTE9HWV9DSEFOR0VTLFNVUFBPUlRTX1JBRlRfQ0xVU1RFUl9NQU5BR0VNRU5ULFRBQkxFX0RJR0VTVF9JTlNFTlNJVElWRV9UT19FWFBJUlksVE9NQlNUT05FX0dDX09QVElPTlMsVFJVTkNBVElPTl9UQUJMRSxUWVBFRF9FUlJPUlNfSU5fUkVBRF9SUEMsVURBLFVEQV9OQVRJVkVfUEFSQUxMRUxJWkVEX0FHR1JFR0FUSU9OLFVERixVTkJPVU5ERURfUkFOR0VfVE9NQlNUT05FUyxVVUlEX1NTVEFCTEVfSURFTlRJRklFUlMsVklFV19WSVJUVUFMX0NPTFVNTlMsV1JJVEVfRkFJTFVSRV9SRVBMWSxYWEhBU0gAAAAcAAAAAQAAABQtNDI0MjU1ODEzNTU4OTc4OTE0M/////8=\"}\n{\"stream_id\":192,\"data\":\"hAAAwAIAAAAA\"}\n{\"stream_id\":256,\"data\":\"hAABAAgAAABtAAAABAAQqkp70wQOeh4OLIlDiVT63wAAAAEAAAABAAAAAQAAABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAJwawAJAAAAAAAAAAEAEXNpbmdsZV9jb25uX2JlbmNoAAZ0YWJsZTEAAXYADQ==\"}\n{\"stream_id\":320,\"data\":\"hAABQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMA==\"}\n{\"stream_id\":384,\"data\":\"hAABgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMQ==\"}\n{\"stream_id\":448,\"data\":\"hAABwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMg==\"}\n{\"stream_id\":512,\"data\":\"hAACAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfMw==\"}\n{\"stream_id\":576,\"data\":\"hAACQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNA==\"}\n{\"stream_id\":640,\"data\":\"hAACgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNQ==\"}\n{\"stream_id\":704,\"data\":\"hAACwAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNg==\"}\n{\"stream_id\":768,\"data\":\"hAADAAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfNw==\"}\n{\"stream_id\":832,\"data\":\"hAADQAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOA==\"}\n{\"stream_id\":896,\"data\":\"hAADgAgAAAA6AAAAAgAAAAAAAAABABFzaW5nbGVfY29ubl9iZW5jaAAGdGFibGUxAAF2AA0AAAABAAAABk5hbWVfOQ==\"}\n"
  },
  {
    "path": "tests/bench/rec_select/192.168.100.11:9042-0Writes",
    "content": "{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVy\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlK/1wHd\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZSv9cJhg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZSv9cLoA==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZSv9cNCQ==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZSv9cPsg==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZSv9cScQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZSv9cUrg==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZSv9cWfA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZSv9cY2A==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZSv9cbLA==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZSv9cdFA==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVyAA5EUklWRVJfVkVSU0lPTgAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlLJIujg\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZSySLzLA==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZSySL2SQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZSySL5dQ==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZSySL8Vg==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZSySL/vg==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZSySMCsw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZSySMGKA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZSySMJtQ==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZSySMNcQ==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZSySMQGw==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlMyHNa+\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTMhzh5Q==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTMhzkoA==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTMhznXA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTMhzqyw==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTMhzuDw==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTMhzxSg==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTMhz23Q==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTMhz6aw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTMhz+RA==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTMh0BLg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNLQr3A\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTS0LPsg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTS0LUmg==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTS0LYsA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTS0Ld3Q==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTS0LimA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTS0LnSg==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTS0LrUg==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTS0Lupg==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTS0LzLw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTS0L4rg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVy\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNNo9bE\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTTaPc2A==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTTaPeyw==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTTaPgwg==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTTaPirQ==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTTaPkUA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTTaPl6Q==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTTaPnXQ==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTTaPo8Q==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTTaPqYg==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTTaPsAA==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNPwKVZ\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTT8CyRg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTT8C1bw==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTT8C38Q==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTT8C/ow==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTT8DJuw==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTT8DNaw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTT8DR1Q==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTT8DWKQ==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTT8DaSw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTT8DdLg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNY9Oej\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTWPTwFw==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTWPTyWg==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTWPT0QA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTWPT29g==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTWPT5XA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTWPT7tw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTWPT93A==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTWPT/rQ==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTWPUBTw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTWPUCsQ==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVy\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNckxhK\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTXJMlvg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTXJMpiw==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTXJMs8A==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTXJMx6Q==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTXJM2Kw==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTXJM6bA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTXJM/CQ==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTXJNDYA==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTXJNJKA==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTXJNMRA==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVyAA5EUklWRVJfVkVSU0lPTgAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNixW30\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTYsV4Dw==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTYsV7Zg==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTYsV9zA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTYsWFYw==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTYsWO9w==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTYsWRiw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTYsWU8w==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTYsWX5g==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTYsWbOA==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTYsWd0w==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNrO3sz\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTazuEAw==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTazuGiQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTazuJJA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTazuL5w==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTazuOzA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTazuR8g==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTazuV2w==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTazuYnA==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTazuaVw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTazub6A==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlNuYNWp\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTbmDdoA==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTbmDf2g==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTbmDhhw==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTbmDjbQ==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTbmDleQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTbmDnGw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTbmDpEA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTbmDq9w==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTbmDtVQ==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTbmDv6Q==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlN2nCMJ\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTdpwtsA==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTdpwwcQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTdpwyvA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTdpw1Zg==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTdpw4AA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTdpw6+g==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTdpw9Lg==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTdpxAGQ==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTdpxCTw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTdpxFIw==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlN+Wu8/\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZTflr6IQ==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZTflr8nA==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZTflr/AQ==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZTflsBuw==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZTflsEGA==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZTflsGFg==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZTflsICA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZTflsKaw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZTflsL+g==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZTflsNgg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSFSZPF\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZUhUmWTg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZUhUmXBw==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZUhUmXlQ==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZUhUmYPg==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZUhUmY1Q==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZUhUmZZQ==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZUhUmaDw==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZUhUmang==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZUhUmbMA==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZUhUmbtQ==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSPLhBy\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZUjy4W0A==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZUjy4YUA==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZUjy4ZWA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZUjy4aiA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZUjy4bZQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZUjy4cbQ==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZUjy4eAw==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZUjy4fTg==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZUjy4hhw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZUjy4jJA==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAAAAtDUUxfVkVSU0lPTgAFMy4wLjAAC0RSSVZFUl9OQU1FABVTY3lsbGFEQiBHb0NRTCBEcml2ZXIADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSSrFr8\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZUkqxdHA==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZUkqxduw==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZUkqxeRg==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZUkqxfBg==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZUkqxfoQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZUkqxgMA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZUkqxgxA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZUkqxhVw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZUkqxh8A==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZUkqxifA==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVy\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSX9lny\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZUl/ZdvQ==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZUl/ZevQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZUl/ZfcQ==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZUl/Zh+w==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZUl/Zi3Q==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZUl/ZnXA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZUl/Zoaw==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZUl/ZpKw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZUl/ZqHg==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZUl/Zrig==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSgyBhC\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZUoMgbag==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZUoMgcRA==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZUoMgdCA==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZUoMgdwg==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZUoMgeig==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZUoMgfSQ==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZUoMghKQ==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZUoMgiFw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZUoMgjdw==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZUoMgkVg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSrWkg6\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZUq1pMFg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZUq1pNEw==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZUq1pNyw==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZUq1pOpA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZUq1pPnQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZUq1pQaw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZUq1pRLA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZUq1pR+w==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZUq1pStQ==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZUq1pTjA==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlSxQV1z\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZUsUFgFg==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZUsUFg4w==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZUsUFhkQ==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZUsUFiWg==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZUsUFi/Q==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZUsUFjkA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZUsUFkLA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZUsUFktg==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZUsUFlTA==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZUsUFl1g==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYADkRSSVZFUl9WRVJTSU9OAAAAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVy\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlS1dGUY\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZUtXRoSQ==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZUtXRo5A==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZUtXRpZw==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZUtXRqCA==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZUtXRqwQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZUtXRrkw==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZUtXRsRw==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZUtXRtpw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZUtXRuXg==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZUtXRvCg==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAC0NRTF9WRVJTSU9OAAUzLjAuMAALRFJJVkVSX05BTUUAFVNjeWxsYURCIEdvQ1FMIERyaXZlcgAORFJJVkVSX1ZFUlNJT04AAAAcU0NZTExBX0xXVF9BRERfTUVUQURBVEFfTUFSSwApTFdUX09QVElNSVpBVElPTl9NRVRBX0JJVF9NQVNLPTIxNDc0ODM2NDgAF1NDWUxMQV9SQVRFX0xJTUlUX0VSUk9SAAAAElRBQkxFVFNfUk9VVElOR19WMQAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlS9RvD0\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZUvUbzow==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZUvUb0VQ==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZUvUb0/w==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZUvUb1zg==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZUvUb2Zg==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZUvUb3BA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZUvUb3ow==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZUvUb4RQ==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZUvUb44g==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZUvUb5nw==\"}\n{\"stream_id\":1,\"data\":\"BAAAAQUAAAAA\"}\n{\"stream_id\":64,\"data\":\"BAAAQAEAAADGAAYAHFNDWUxMQV9MV1RfQUREX01FVEFEQVRBX01BUksAKUxXVF9PUFRJTUlaQVRJT05fTUVUQV9CSVRfTUFTSz0yMTQ3NDgzNjQ4ABdTQ1lMTEFfUkFURV9MSU1JVF9FUlJPUgAAABJUQUJMRVRTX1JPVVRJTkdfVjEAAAALQ1FMX1ZFUlNJT04ABTMuMC4wAAtEUklWRVJfTkFNRQAVU2N5bGxhREIgR29DUUwgRHJpdmVyAA5EUklWRVJfVkVSU0lPTgAA\"}\n{\"stream_id\":128,\"data\":\"BAAAgAcAAAA/AAAALFNFTEVDVCAqIEZST00gc3lzdGVtLmxvY2FsIFdIRVJFIGtleT0nbG9jYWwnAAEkAAATiAAGJlS/6kxJ\"}\n{\"stream_id\":192,\"data\":\"BAAAwAsAAAAxAAMAD1RPUE9MT0dZX0NIQU5HRQANU1RBVFVTX0NIQU5HRQANU0NIRU1BX0NIQU5HRQ==\"}\n{\"stream_id\":256,\"data\":\"BAABAAkAAAA4AAAANFNFTEVDVCB2IEZST00gc2luZ2xlX2Nvbm5fYmVuY2gudGFibGUxIFdIRVJFIHBrID0gPzs=\"}\n{\"stream_id\":320,\"data\":\"BAABQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAAAAE4gABiZUv+pUrQ==\"}\n{\"stream_id\":384,\"data\":\"BAABgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAQAAE4gABiZUv+pW3A==\"}\n{\"stream_id\":448,\"data\":\"BAABwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAgAAE4gABiZUv+pYrQ==\"}\n{\"stream_id\":512,\"data\":\"BAACAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAAAwAAE4gABiZUv+pa2A==\"}\n{\"stream_id\":576,\"data\":\"BAACQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABAAAE4gABiZUv+pcoQ==\"}\n{\"stream_id\":640,\"data\":\"BAACgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABQAAE4gABiZUv+peKA==\"}\n{\"stream_id\":704,\"data\":\"BAACwAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABgAAE4gABiZUv+pfvA==\"}\n{\"stream_id\":768,\"data\":\"BAADAAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAABwAAE4gABiZUv+phMw==\"}\n{\"stream_id\":832,\"data\":\"BAADQAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACAAAE4gABiZUv+pioA==\"}\n{\"stream_id\":896,\"data\":\"BAADgAoAAAArABCqSnvTBA56Hg4siUOJVPrfAAElAAEAAAAEAAAACQAAE4gABiZUv+pjuw==\"}\n"
  },
  {
    "path": "tests/serialization/marshal_0_unset_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n)\n\nfunc TestMarshalUnsetColumn(t *testing.T) {\n\tt.Parallel()\n\n\ttype tCase struct {\n\t\ttp      gocql.TypeInfo\n\t\tnilData bool\n\t\terr     bool\n\t}\n\n\telem := gocql.NewNativeType(3, gocql.TypeSmallInt)\n\tcases := []tCase{\n\t\t{gocql.NewNativeType(4, gocql.TypeBoolean), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeTinyInt), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeSmallInt), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeInt), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeBigInt), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeCounter), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeVarint), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeFloat), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeDouble), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeDecimal), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeVarchar), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeText), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeBlob), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeAscii), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeUUID), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeTimeUUID), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeInet), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeTime), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeTimestamp), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeDate), true, false},\n\t\t{gocql.NewNativeType(4, gocql.TypeDuration), true, false},\n\n\t\t{gocql.NewCollectionType(gocql.NewNativeType(3, gocql.TypeList), nil, elem), true, false},\n\t\t{gocql.NewCollectionType(gocql.NewNativeType(3, gocql.TypeSet), nil, elem), true, false},\n\n\t\t{gocql.NewCollectionType(gocql.NewNativeType(3, gocql.TypeMap), elem, elem), true, false},\n\n\t\t{gocql.NewUDTType(3, \"udt1\", \"\", gocql.UDTField{Name: \"1\", Type: elem}), true, true},\n\t\t{gocql.NewTupleType(gocql.NewNativeType(3, gocql.TypeTuple), elem), true, true},\n\t}\n\n\tfor _, expected := range cases {\n\t\tdata, err := gocql.Marshal(expected.tp, gocql.UnsetValue)\n\t\tif expected.nilData && data != nil {\n\t\t\tt.Errorf(\"marshallig unsetColumn for the cqltype %s should return nil data\", expected.tp.Type())\n\t\t}\n\t\tif !expected.nilData && data == nil {\n\t\t\tt.Errorf(\"marshallig unsetColumn for the cqltype %s should return not nil data\", expected.tp.Type())\n\t\t}\n\t\tif expected.err && err == nil {\n\t\t\tt.Errorf(\"marshallig unsetColumn for the cqltype %s should return an error\", expected.tp.Type())\n\t\t}\n\t\tif !expected.err && err != nil {\n\t\t\tt.Errorf(\"marshallig unsetColumn for the cqltype %s should not return an error\", expected.tp.Type())\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_10_decimal_corrupt_test.go",
    "content": "package serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"gopkg.in/inf.v0\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/decimal\"\n)\n\nfunc TestMarshalDecimalCorrupt(t *testing.T) {\n\ttype testSuite struct {\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t\tname      string\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeDecimal)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.decimal\",\n\t\t\tmarshal:   decimal.Marshal,\n\t\t\tunmarshal: decimal.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\"1s2\", \"1s\", \"-1s\", \",1\", \"0,1\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x7f\"),\n\t\t\t\tValues: mod.Values{*inf.NewDec(0, 0), \"\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_data+\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\xff\\x80\"),\n\t\t\t\tValues: mod.Values{*inf.NewDec(0, 0), \"\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_data-\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{*inf.NewDec(0, 0), \"\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\"),\n\t\t\t\tValues: mod.Values{*inf.NewDec(0, 0), \"\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data2\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_10_decimal_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"testing\"\n\n\t\"gopkg.in/inf.v0\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/decimal\"\n)\n\nfunc TestMarshalDecimal(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeDecimal)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.decimal\",\n\t\t\tmarshal:   decimal.Marshal,\n\t\t\tunmarshal: decimal.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tgetValues := func(scale inf.Scale, unscaled ...int64) mod.Values {\n\t\tout := make(mod.Values, 2)\n\t\tswitch len(unscaled) {\n\t\tcase 0:\n\t\t\tpanic(\"unscaled should be\")\n\t\tcase 1:\n\t\t\tout[0] = *inf.NewDec(unscaled[0], scale)\n\t\t\tout[1] = fmt.Sprintf(\"%d;%d\", scale, unscaled[0])\n\t\tdefault:\n\t\t\tbg := new(big.Int)\n\t\t\tfor _, u := range unscaled {\n\t\t\t\tbg = bg.Add(bg, big.NewInt(u))\n\t\t\t}\n\t\t\tout[0] = *inf.NewDecBig(bg, scale)\n\t\t\tout[1] = fmt.Sprintf(\"%d;%s\", scale, bg.String())\n\t\t}\n\t\treturn out\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   nil,\n\t\t\t\tValues: mod.Values{(*inf.Dec)(nil), \"\"}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   nil,\n\t\t\t\tValues: mod.Values{*inf.NewDec(0, 0), \"\"}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   make([]byte, 0),\n\t\t\t\tValues: getValues(0, 0).AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: getValues(0, 0).AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: getValues(0, math.MaxInt64).AddVariants(mod.All...),\n\t\t\t}.Run(\"scale0_maxInt64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x01\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: getValues(1, math.MaxInt64).AddVariants(mod.All...),\n\t\t\t}.Run(\"scale+1_maxInt64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\xff\\xff\\xff\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: getValues(-1, math.MaxInt64).AddVariants(mod.All...),\n\t\t\t}.Run(\"scale-1_maxInt64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x7f\\xff\\xff\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: getValues(math.MaxInt32, math.MaxInt64).AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt32_maxInt64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x80\\x00\\x00\\x00\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: getValues(math.MinInt32, math.MaxInt64).AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt32_maxInt64\", t, marshal, unmarshal)\n\n\t\t\tscale := inf.Scale(math.MaxInt16)\n\t\t\tt.Run(\"scaleMaxInt16\", func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x01\"),\n\t\t\t\t\tValues: getValues(scale, 1).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, -1).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"-1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x7f\"),\n\t\t\t\t\tValues: getValues(scale, 127).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt8\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x80\"),\n\t\t\t\t\tValues: getValues(scale, -128).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt8\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\x80\"),\n\t\t\t\t\tValues: getValues(scale, 128).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt8+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\xff\\x7f\"),\n\t\t\t\t\tValues: getValues(scale, -129).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt8-1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x7f\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 32767).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt16\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x80\\x00\"),\n\t\t\t\t\tValues: getValues(scale, -32768).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt16\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\x80\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 32768).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt16+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\xff\\x7f\\xff\"),\n\t\t\t\t\tValues: getValues(scale, -32769).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt16-1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x7f\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 8388607).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt24\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x80\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, -8388608).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt24\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\x80\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 8388608).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt24+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\xff\\x7f\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, -8388609).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt24-1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x7f\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 2147483647).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt32\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x80\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, -2147483648).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt32\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\x80\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 2147483648).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt32+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\xff\\x7f\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, -2147483649).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt32-1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x7f\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 549755813887).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt40\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x80\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, -549755813888).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt40\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\x80\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 549755813888).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt40+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\xff\\x7f\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, -549755813889).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt40-1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 140737488355327).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt48\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x80\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, -140737488355328).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt48\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\x80\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 140737488355328).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt48+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, -140737488355329).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt48-1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 36028797018963967).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt56\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x80\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, -36028797018963968).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt56\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 36028797018963968).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt56+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, -36028797018963969).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt56-1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 9223372036854775807).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt64\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, -9223372036854775808).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt64\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 9223372036854775807, 1).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxInt64+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, -9223372036854775808, -1).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"minInt64-1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 255).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint8\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x01\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 256).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint8+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 65535).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint16\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x01\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 65536).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint16+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 16777215).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint24\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x01\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 16777216).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint24+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 4294967295).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint32\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x01\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 4294967296).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint32+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 1099511627775).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint40\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x01\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 1099511627776).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint40+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 281474976710655).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint48\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x01\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 281474976710656).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint48+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 72057594037927935).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint56\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 72057594037927936).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint56+1\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tValues: getValues(scale, 9223372036854775807, 9223372036854775807, 1).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint64\", t, marshal, unmarshal)\n\n\t\t\t\tserialization.PositiveSet{\n\t\t\t\t\tData:   []byte(\"\\x00\\x00\\x7f\\xff\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\t\tValues: getValues(scale, 9223372036854775807, 9223372036854775807, 2).AddVariants(mod.All...),\n\t\t\t\t}.Run(\"maxUint64+1\", t, marshal, unmarshal)\n\t\t\t})\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_11_texts_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/blob\"\n\t\"github.com/gocql/gocql/serialization/text\"\n\t\"github.com/gocql/gocql/serialization/varchar\"\n)\n\nfunc TestMarshalTexts(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := []testSuite{\n\t\t{\n\t\t\tname:      \"serialization.varchar\",\n\t\t\tmarshal:   varchar.Marshal,\n\t\t\tunmarshal: varchar.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname:      \"serialization.text\",\n\t\t\tmarshal:   text.Marshal,\n\t\t\tunmarshal: text.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname:      \"serialization.blob\",\n\t\t\tmarshal:   blob.Marshal,\n\t\t\tunmarshal: blob.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob.varchar\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(gocql.NewNativeType(4, gocql.TypeVarchar), i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(gocql.NewNativeType(4, gocql.TypeVarchar), bytes, i)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"glob.text\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(gocql.NewNativeType(4, gocql.TypeText), i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(gocql.NewNativeType(4, gocql.TypeText), bytes, i)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"glob.blob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(gocql.NewNativeType(4, gocql.TypeBlob), i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(gocql.NewNativeType(4, gocql.TypeBlob), bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t([]byte)(nil),\n\t\t\t\t\t(*[]byte)(nil),\n\t\t\t\t\t(*string)(nil),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   nil,\n\t\t\t\tValues: mod.Values{\"\"}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   make([]byte, 0),\n\t\t\t\tValues: mod.Values{make([]byte, 0), \"\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"$test text string$\"),\n\t\t\t\tValues: mod.Values{[]byte(\"$test text string$\"), \"$test text string$\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"text\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_12_ascii_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/ascii\"\n)\n\nfunc TestMarshalAsciiMustFail(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeAscii)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.ascii\",\n\t\t\tmarshal:   ascii.Marshal,\n\t\t\tunmarshal: ascii.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte{255},\n\t\t\t\tValues: mod.Values{[]byte{}, \"\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte{127, 255, 127},\n\t\t\t\tValues: mod.Values{[]byte{}, \"\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_data2\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_12_ascii_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/ascii\"\n)\n\nfunc TestMarshalAscii(t *testing.T) {\n\ttType := gocql.NewNativeType(4, gocql.TypeAscii)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.int\",\n\t\t\tmarshal:   ascii.Marshal,\n\t\t\tunmarshal: ascii.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t([]byte)(nil),\n\t\t\t\t\t(*[]byte)(nil),\n\t\t\t\t\t(*string)(nil),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   nil,\n\t\t\t\tValues: mod.Values{\"\"}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   make([]byte, 0),\n\t\t\t\tValues: mod.Values{make([]byte, 0), \"\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"test text string\"),\n\t\t\t\tValues: mod.Values{[]byte(\"test text string\"), \"test text string\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"text\", t, nil, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_13_uuids_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/timeuuid\"\n\t\"github.com/gocql/gocql/serialization/uuid\"\n)\n\nfunc TestMarshalUUIDsMustFail(t *testing.T) {\n\tt.Parallel()\n\n\ttTypes := []gocql.NativeType{\n\t\tgocql.NewNativeType(4, gocql.TypeUUID),\n\t\tgocql.NewNativeType(4, gocql.TypeTimeUUID),\n\t}\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [4]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.uuid\",\n\t\t\tmarshal:   uuid.Marshal,\n\t\t\tunmarshal: uuid.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob.uuid\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tTypes[0], i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tTypes[0], bytes, i)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:      \"serialization.timeuuid\",\n\t\t\tmarshal:   timeuuid.Marshal,\n\t\t\tunmarshal: timeuuid.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname:      \"glob.timeuuid\",\n\t\t\tmarshal:   func(i any) ([]byte, error) { return gocql.Marshal(tTypes[1], i) },\n\t\t\tunmarshal: func(bytes []byte, i any) error { return gocql.Unmarshal(tTypes[1], bytes, i) },\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"b6b77c23-c776-40ff-828d-a385f3e8a2aff\",\n\t\t\t\t\t\"00000000-0000-0000-0000-0000000000000\",\n\t\t\t\t\t[]byte{182, 183, 124, 35, 199, 118, 64, 255, 130, 141, 163, 133, 243, 232, 162, 175, 175},\n\t\t\t\t\t[]byte{00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\t\t[17]byte{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_vals\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"b6b77c23-c776-40ff-828d-a385f3e8a2a\",\n\t\t\t\t\t\"00000000-0000-0000-0000-00000000000\",\n\t\t\t\t\t[]byte{182, 183, 124, 35, 199, 118, 64, 255, 130, 141, 163, 133, 243, 232, 162},\n\t\t\t\t\t[]byte{00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\t\t[15]byte{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_vals\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"b6b77c@3-c776-40ff-828d-a385f3e8a2a\",\n\t\t\t\t\t\"00000000-0000-0000-0000-0#0000000000\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xb6\\xb7\\x7c\\x23\\xc7\\x76\\x40\\xff\\x82\\x8d\\xa3\\x85\\xf3\\xe8\\xa2\\xaf\\xaf\"),\n\t\t\t\tValues: mod.Values{\"\", make([]byte, 0), [16]byte{}, gocql.UUID{}}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xb6\\xb7\\x7c\\x23\\xc7\\x76\\x40\\xff\\x82\\x8d\\xa3\\x85\\xf3\\xe8\\xa2\"),\n\t\t\t\tValues: mod.Values{\"\", make([]byte, 0), [16]byte{}, gocql.UUID{}}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\"),\n\t\t\t\tValues: mod.Values{\"\", make([]byte, 0), [16]byte{}, gocql.UUID{}}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data2\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_13_uuids_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/timeuuid\"\n\t\"github.com/gocql/gocql/serialization/uuid\"\n)\n\nfunc TestMarshalUUIDs(t *testing.T) {\n\tt.Parallel()\n\n\ttTypes := []gocql.NativeType{\n\t\tgocql.NewNativeType(4, gocql.TypeUUID),\n\t\tgocql.NewNativeType(4, gocql.TypeTimeUUID),\n\t}\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [4]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.uuid\",\n\t\t\tmarshal:   uuid.Marshal,\n\t\t\tunmarshal: uuid.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob.uuid\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tTypes[0], i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tTypes[0], bytes, i)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:      \"serialization.timeuuid\",\n\t\t\tmarshal:   timeuuid.Marshal,\n\t\t\tunmarshal: timeuuid.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname:      \"glob.timeuuid\",\n\t\t\tmarshal:   func(i any) ([]byte, error) { return gocql.Marshal(tTypes[1], i) },\n\t\t\tunmarshal: func(bytes []byte, i any) error { return gocql.Unmarshal(tTypes[1], bytes, i) },\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t([]byte)(nil), (*[]byte)(nil),\n\t\t\t\t\t\"\", (*string)(nil),\n\t\t\t\t\t(*[16]byte)(nil),\n\t\t\t\t\t(*gocql.UUID)(nil),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[16]byte{},\n\t\t\t\t\tgocql.UUID{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"00000000-0000-0000-0000-000000000000\",\n\t\t\t\t\tmake([]byte, 0),\n\t\t\t\t\t[16]byte{},\n\t\t\t\t\tgocql.UUID{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\t\t[16]byte{},\n\t\t\t\t\tgocql.UUID{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xe9\\x39\\xf5\\x2a\\xd6\\x90\\x11\\xef\\x9c\\xd2\\x02\\x42\\xac\\x12\\x00\\x02\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"e939f52a-d690-11ef-9cd2-0242ac120002\",\n\t\t\t\t\t[]byte{233, 57, 245, 42, 214, 144, 17, 239, 156, 210, 2, 66, 172, 18, 0, 2},\n\t\t\t\t\t[16]byte{233, 57, 245, 42, 214, 144, 17, 239, 156, 210, 2, 66, 172, 18, 0, 2},\n\t\t\t\t\tgocql.UUID{233, 57, 245, 42, 214, 144, 17, 239, 156, 210, 2, 66, 172, 18, 0, 2},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"uuid\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"ffffffff-ffff-ffff-ffff-ffffffffffff\",\n\t\t\t\t\t[]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},\n\t\t\t\t\t[16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},\n\t\t\t\t\tgocql.UUID{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"max\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n\nfunc TestMarshalTimeUUID(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeTimeUUID)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [4]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.timeuuid\",\n\t\t\tmarshal:   timeuuid.Marshal,\n\t\t\tunmarshal: timeuuid.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname:      \"glob.timeuuid\",\n\t\t\tmarshal:   func(i any) ([]byte, error) { return gocql.Marshal(tType, i) },\n\t\t\tunmarshal: func(bytes []byte, i any) error { return gocql.Unmarshal(tType, bytes, i) },\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"00000000-0000-0000-0000-000000000000\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zero\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\x1f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"ffffffff-ffff-1fff-ffff-ffffffffffff\",\n\t\t\t\t\t[]byte{255, 255, 255, 255, 255, 255, 31, 255, 255, 255, 255, 255, 255, 255, 255, 255},\n\t\t\t\t\t[16]byte{255, 255, 255, 255, 255, 255, 31, 255, 255, 255, 255, 255, 255, 255, 255, 255},\n\t\t\t\t\tgocql.UUID{255, 255, 255, 255, 255, 255, 31, 255, 255, 255, 255, 255, 255, 255, 255, 255},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"max\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_14_inet_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/inet\"\n)\n\nfunc TestMarshalsInetMustFail(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeInet)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.inet\",\n\t\t\tmarshal:   inet.Marshal,\n\t\t\tunmarshal: inet.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"192.168.0.1.1\",\n\t\t\t\t\tnet.IP{192, 168, 0, 1, 1},\n\t\t\t\t\t[]byte{192, 168, 0, 1, 1},\n\t\t\t\t\t[5]byte{192, 168, 0, 1, 1},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_valsV4\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"fe80:cd00:0:cde:1257:0:211e:729cc\",\n\t\t\t\t\tnet.IP(\"\\xb6\\xb7\\x7c\\x23\\xc7\\x76\\x40\\xff\\x82\\x8d\\xa3\\x85\\xf3\\xe8\\xa2\\xaf\\xaf\"),\n\t\t\t\t\t[]byte(\"\\xb6\\xb7\\x7c\\x23\\xc7\\x76\\x40\\xff\\x82\\x8d\\xa3\\x85\\xf3\\xe8\\xa2\\xaf\\xaf\"),\n\t\t\t\t\t[17]byte{254, 128, 205, 0, 0, 0, 12, 222, 18, 87, 0, 0, 33, 30, 114, 156, 156},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_valsV6\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"192.168.0\",\n\t\t\t\t\tnet.IP{192, 168, 0},\n\t\t\t\t\t[]byte{192, 168, 0},\n\t\t\t\t\t[3]byte{192, 168, 0},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_valsV4\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"fe80:cd00:0:cde:1257:0:211e\",\n\t\t\t\t\tnet.IP(\"\\xb6\\xb7\\x7c\\x23\\xc7\\x76\\x40\\xff\\x82\\x8d\\xa3\\x85\\xf3\\xe8\\xa2\"),\n\t\t\t\t\t[]byte(\"\\xb6\\xb7\\x7c\\x23\\xc7\\x76\\x40\\xff\\x82\\x8d\\xa3\\x85\\xf3\\xe8\\xa2\"),\n\t\t\t\t\t[15]byte{254, 128, 205, 0, 0, 0, 12, 222, 18, 87, 0, 0, 33, 30, 114},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_valsV6\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"b6b77c@3-c776-40ff-828d-a385f3e8a2a\",\n\t\t\t\t\t\"00000000-0000-0000-0000-0#0000000000\",\n\t\t\t\t\t\"192.168.a.1\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte{192, 168, 0, 1, 1},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"\",\n\t\t\t\t\tnet.IP{},\n\t\t\t\t\t[]byte{},\n\t\t\t\t\t[4]byte{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_dataV4\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\xb6\\xb7\\x7c\\x23\\xc7\\x76\\x40\\xff\\x82\\x8d\\xa3\\x85\\xf3\\xe8\\xa2\\xaf\\xaf\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"\",\n\t\t\t\t\tnet.IP{},\n\t\t\t\t\t[]byte{},\n\t\t\t\t\t[16]byte{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_dataV6\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\xb6\\xb7\\x7c\\x23\\xc7\\x76\\x40\\xff\\x82\\x8d\\xa3\\x85\\xf3\\xe8\\xa2\\xaf\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[4]byte{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_dataV6Array4\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte{192, 168, 0},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"\",\n\t\t\t\t\tnet.IP{},\n\t\t\t\t\t[]byte{},\n\t\t\t\t\t[4]byte{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_dataV4\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\xb6\\xb7\\x7c\\x23\\xc7\\x76\\x40\\xff\\x82\\x8d\\xa3\\x85\\xf3\\xe8\\xa2\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"\",\n\t\t\t\t\tnet.IP{},\n\t\t\t\t\t[]byte{},\n\t\t\t\t\t[16]byte{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_dataV6\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_14_inet_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/inet\"\n)\n\nfunc TestMarshalsInet(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeInet)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.inet\",\n\t\t\tmarshal:   inet.Marshal,\n\t\t\tunmarshal: inet.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t([]byte)(nil),\n\t\t\t\t\t(*[]byte)(nil),\n\t\t\t\t\t(*[4]byte)(nil),\n\t\t\t\t\t(*[16]byte)(nil),\n\t\t\t\t\t(net.IP)(nil),\n\t\t\t\t\t(*net.IP)(nil),\n\t\t\t\t\t\"\",\n\t\t\t\t\t(*string)(nil),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[4]byte{},\n\t\t\t\t\t[16]byte{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tmake([]byte, 0),\n\t\t\t\t\t[4]byte{},\n\t\t\t\t\t[16]byte{},\n\t\t\t\t\tmake(net.IP, 0),\n\t\t\t\t\t\"0.0.0.0\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte{0, 0, 0, 0},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"0.0.0.0\",\n\t\t\t\t\t[]byte{0, 0, 0, 0},\n\t\t\t\t\tnet.IP{0, 0, 0, 0},\n\t\t\t\t\t[4]byte{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"v4zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte{0, 0, 0, 0},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[16]byte{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"v4zerosUnmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte{192, 168, 0, 1},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"192.168.0.1\",\n\t\t\t\t\t[]byte{192, 168, 0, 1},\n\t\t\t\t\tnet.IP{192, 168, 0, 1},\n\t\t\t\t\t[4]byte{192, 168, 0, 1},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"v4\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte{192, 168, 0, 1},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[16]byte{192, 168, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"v4unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte{255, 255, 255, 255},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"255.255.255.255\",\n\t\t\t\t\t[]byte{255, 255, 255, 255},\n\t\t\t\t\tnet.IP{255, 255, 255, 255},\n\t\t\t\t\t[4]byte{255, 255, 255, 255},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"v4max\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte{255, 255, 255, 255},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[16]byte{255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"v4maxUnmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"::\",\n\t\t\t\t\t[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\t\tnet.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\t\t[16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"v6zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[4]byte{0, 0, 0, 0},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"v6zerosUnmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xfe\\x80\\xcd\\x00\\x00\\x00\\x0c\\xde\\x12\\x57\\x00\\x00\\x21\\x1e\\x72\\x9c\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"fe80:cd00:0:cde:1257:0:211e:729c\",\n\t\t\t\t\t[]byte(\"\\xfe\\x80\\xcd\\x00\\x00\\x00\\x0c\\xde\\x12\\x57\\x00\\x00\\x21\\x1e\\x72\\x9c\"),\n\t\t\t\t\tnet.IP(\"\\xfe\\x80\\xcd\\x00\\x00\\x00\\x0c\\xde\\x12\\x57\\x00\\x00\\x21\\x1e\\x72\\x9c\"),\n\t\t\t\t\t[16]byte{254, 128, 205, 0, 0, 0, 12, 222, 18, 87, 0, 0, 33, 30, 114, 156},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"v6\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\",\n\t\t\t\t\t[]byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\tnet.IP(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\t\t[16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"v6max\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_15_time_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/cqltime\"\n)\n\nfunc TestMarshalTimeCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeTime)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.cqltime\",\n\t\t\tmarshal:   cqltime.Marshal,\n\t\t\tunmarshal: cqltime.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\t// marshal, unmarshal of all supported `go types` does not return an error on all type of corruption.\n\t\t\t//brokenTypes := serialization.GetTypes(int64(0), (*int64)(nil), mod.Int64(0), (*mod.Int64)(nil), time.Duration(0), (*time.Duration)(nil))\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(86400000000000), time.Duration(86400000000000),\n\t\t\t\t\tint64(86500000000000), time.Duration(86500000000000),\n\t\t\t\t\tint64(math.MaxInt64), time.Duration(math.MaxInt64),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_vals\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-1), time.Duration(-1),\n\t\t\t\t\tint64(math.MinInt8), time.Duration(math.MinInt8),\n\t\t\t\t\tint64(math.MinInt16), time.Duration(math.MinInt16),\n\t\t\t\t\tint64(math.MinInt32), time.Duration(math.MinInt32),\n\t\t\t\t\tint64(math.MinInt64), time.Duration(math.MinInt64),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x4e\\x94\\x91\\x4e\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Duration(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data_len\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x4e\\x94\\x91\\x4e\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Duration(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data_len1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Duration(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data_len2\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x4e\\x94\\x91\\x4f\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Duration(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Duration(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data2\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Duration(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Duration(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data2\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_15_time_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/cqltime\"\n)\n\nfunc TestMarshalsTime(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeTime)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.cqltime\",\n\t\t\tmarshal:   cqltime.Marshal,\n\t\t\tunmarshal: cqltime.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t(*int64)(nil), (*time.Duration)(nil),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Duration(0),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Duration(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Duration(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x4e\\x94\\x91\\x4e\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(86399999999999), time.Duration(86399999999999),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"max\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_16_timestamp_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/timestamp\"\n)\n\nfunc TestMarshalTimestampCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeTimestamp)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.timestamp\",\n\t\t\tmarshal:   timestamp.Marshal,\n\t\t\tunmarshal: timestamp.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\ttime.Date(292278994, 8, 17, 7, 12, 55, 808*1000000, time.UTC),\n\t\t\t\t\ttime.Date(292278994, 8, 17, 7, 12, 56, 807*1000000, time.UTC),\n\t\t\t\t\ttime.Date(292278994, 8, 17, 7, 13, 55, 807*1000000, time.UTC),\n\t\t\t\t\ttime.Date(292278994, 8, 17, 8, 12, 55, 807*1000000, time.UTC),\n\t\t\t\t\ttime.Date(292278994, 8, 18, 7, 12, 55, 807*1000000, time.UTC),\n\t\t\t\t\ttime.Date(292278994, 9, 17, 7, 12, 55, 807*1000000, time.UTC),\n\t\t\t\t\ttime.Date(292278995, 8, 17, 7, 12, 55, 807*1000000, time.UTC),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_vals\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\ttime.Date(-292275055, 5, 16, 16, 47, 4, 191*1000000, time.UTC),\n\t\t\t\t\ttime.Date(-292275055, 5, 16, 16, 47, 3, 192*1000000, time.UTC),\n\t\t\t\t\ttime.Date(-292275055, 5, 16, 16, 46, 4, 192*1000000, time.UTC),\n\t\t\t\t\ttime.Date(-292275055, 5, 16, 15, 47, 4, 192*1000000, time.UTC),\n\t\t\t\t\ttime.Date(-292275055, 5, 15, 16, 47, 4, 192*1000000, time.UTC),\n\t\t\t\t\ttime.Date(-292275055, 4, 16, 16, 47, 4, 192*1000000, time.UTC),\n\t\t\t\t\ttime.Date(-292275056, 5, 16, 16, 47, 4, 192*1000000, time.UTC),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Time{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Time{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Time{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data2\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_16_timestamp_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/timestamp\"\n)\n\nfunc TestMarshalsTimestamp(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeTimestamp)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.timestamp\",\n\t\t\tmarshal:   timestamp.Marshal,\n\t\t\tunmarshal: timestamp.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tzeroTimestamp := time.Unix(0, 0).UTC()\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t(*int64)(nil), (*time.Time)(nil),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Time{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Time{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), zeroTimestamp,\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(math.MaxInt64), time.UnixMilli(math.MaxInt64).UTC(),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"max\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(math.MinInt64), time.UnixMilli(math.MinInt64).UTC(),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"min\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_17_date_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/date\"\n)\n\nfunc TestMarshalDateCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeDate)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.date\",\n\t\t\tmarshal:   date.Marshal,\n\t\t\tunmarshal: date.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\ttime.Date(5881580, 7, 12, 0, 0, 0, 0, time.UTC).UnixMilli(),\n\t\t\t\t\ttime.Date(5881580, 8, 11, 0, 0, 0, 0, time.UTC).UnixMilli(),\n\t\t\t\t\ttime.Date(5881581, 7, 11, 0, 0, 0, 0, time.UTC).UnixMilli(),\n\t\t\t\t\ttime.Date(5883581, 12, 20, 0, 0, 0, 0, time.UTC).UnixMilli(),\n\t\t\t\t\t\"5881580-07-12\", \"5881580-08-11\", \"5881581-07-11\", \"9223372036854775807-07-12\",\n\t\t\t\t\ttime.Date(5881580, 7, 12, 0, 0, 0, 0, time.UTC).UTC(),\n\t\t\t\t\ttime.Date(5881580, 8, 11, 0, 0, 0, 0, time.UTC).UTC(),\n\t\t\t\t\ttime.Date(5881581, 7, 11, 0, 0, 0, 0, time.UTC).UTC(),\n\t\t\t\t\ttime.Date(5883581, 12, 20, 0, 0, 0, 0, time.UTC).UTC(),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_vals\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\ttime.Date(-5877641, 06, 22, 0, 0, 0, 0, time.UTC).UnixMilli(),\n\t\t\t\t\ttime.Date(-5877641, 05, 23, 0, 0, 0, 0, time.UTC).UnixMilli(),\n\t\t\t\t\ttime.Date(-5877642, 06, 23, 0, 0, 0, 0, time.UTC).UnixMilli(),\n\t\t\t\t\ttime.Date(-5887641, 06, 23, 0, 0, 0, 0, time.UTC).UnixMilli(),\n\t\t\t\t\t\"-5877641-06-22\", \"-5877641-05-23\", \"-5877642-06-23\", \"-9223372036854775807-07-12\",\n\t\t\t\t\ttime.Date(-5877641, 06, 22, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\ttime.Date(-5877641, 05, 23, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\ttime.Date(-5877642, 06, 23, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\ttime.Date(-5887641, 06, 23, 0, 0, 0, 0, time.UTC),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_vals\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"a1580-07-11\", \"1970-0d-11\", \"02-11\", \"1970-11\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Time{}, \"\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x4e\\x94\\x91\\x4e\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Time{}, \"\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data2\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Time{}, \"\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(0), time.Time{}, \"\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data2\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_17_date_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/date\"\n)\n\nfunc TestMarshalsDate(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeDate)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.date\",\n\t\t\tmarshal:   date.Marshal,\n\t\t\tunmarshal: date.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tzeroDate := time.Date(-5877641, 06, 23, 0, 0, 0, 0, time.UTC).UTC()\n\tmiddleDate := time.UnixMilli(0).UTC()\n\tmaxDate := time.Date(5881580, 07, 11, 0, 0, 0, 0, time.UTC).UTC()\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t(*uint32)(nil), (*int32)(nil), (*int64)(nil), (*string)(nil), \"\", (*time.Time)(nil),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(0), int32(0), zeroDate.UnixMilli(), \"\", zeroDate,\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(0), int32(0), zeroDate.UnixMilli(), zeroDate, \"-5877641-06-23\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(0), int32(0), zeroDate.UnixMilli(), zeroDate, \"-5877641-06-23\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(1), int32(1), zeroDate.Add(time.Hour * 24).UnixMilli(), zeroDate.Add(time.Hour * 24), \"-5877641-06-24\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(1 << 31), int32(math.MinInt32), middleDate.UnixMilli(), middleDate, \"1970-01-01\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"middle\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(math.MaxUint32), int32(-1), maxDate.UnixMilli(), maxDate, \"5881580-07-11\",\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"max\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_18_duration_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n)\n\nfunc TestMarshalDurationCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeDuration)\n\n\tmarshal := func(i any) ([]byte, error) { return gocql.Marshal(tType, i) }\n\tunmarshal := func(bytes []byte, i any) error {\n\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t}\n\n\tserialization.NegativeMarshalSet{\n\t\tValues: mod.Values{\n\t\t\t\"23123113f\", \"sda\",\n\t\t\tgocql.Duration{Months: -1, Days: 1, Nanoseconds: 0},\n\t\t\tgocql.Duration{Months: -1, Days: 1, Nanoseconds: 1},\n\t\t\tgocql.Duration{Months: -1, Days: 1, Nanoseconds: -1},\n\t\t\tgocql.Duration{Months: -1, Days: -1, Nanoseconds: 1},\n\t\t\tgocql.Duration{Months: -1, Days: 0, Nanoseconds: 1},\n\t\t\tgocql.Duration{Months: 1, Days: -1, Nanoseconds: 0},\n\t\t\tgocql.Duration{Months: 1, Days: -1, Nanoseconds: 1},\n\t\t\tgocql.Duration{Months: 1, Days: -1, Nanoseconds: -1},\n\t\t\tgocql.Duration{Months: 1, Days: 1, Nanoseconds: -1},\n\t\t\tgocql.Duration{Months: 1, Days: 0, Nanoseconds: -1},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"corrupt_vals\", t, marshal)\n\n\tserialization.NegativeMarshalSet{\n\t\tValues: mod.Values{\n\t\t\t\"178956971y7mo306783378w1d2562047h47m16.854775807s\",\n\t\t\t\"178956970y8mo306783378w1d2562047h47m16.854775807s\",\n\t\t\t\"178956970y7mo306783379w1d2562047h47m16.854775807s\",\n\t\t\t\"178956970y7mo306783378w2d2562047h47m16.854775807s\",\n\t\t\t\"178956970y7mo306783378w1d2562048h47m16.854775807s\",\n\t\t\t\"178956970y7mo306783378w1d2562047h48m16.854775807s\",\n\t\t\t\"178956970y7mo306783378w1d2562047h47m17.854775807s\",\n\t\t\t\"178956970y7mo306783378w1d2562047h47m16.854775808s\",\n\n\t\t\t\"-178956971y8mo306783378w2d2562047h47m16.854775808s\",\n\t\t\t\"-178956970y9mo306783378w2d2562047h47m16.854775808s\",\n\t\t\t\"-178956970y8mo306783379w2d2562047h47m16.854775808s\",\n\t\t\t\"-178956970y8mo306783378w3d2562047h47m16.854775808s\",\n\t\t\t\"-178956970y8mo306783378w2d2562048h47m16.854775808s\",\n\t\t\t\"-178956970y8mo306783378w2d2562047h48m16.854775808s\",\n\t\t\t\"-178956970y8mo306783378w2d2562047h47m17.854775808s\",\n\t\t\t\"-178956970y8mo306783378w2d2562047h47m16.854775809s\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_vals\", t, marshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\xf1\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_month1\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\xf1\\x00\\x00\\x00\\x01\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_month2\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\xf1\\x00\\x00\\x00\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_day1\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\xf1\\x00\\x00\\x00\\x01\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_day2\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x01\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_nano1\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x01\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_nano2\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x01\\x00\\x41\\xfd\\xfc\\x9b\\xc5\\xc4\\x9e\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_nano3\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\xc3\\x41\\xfd\\xfc\\x9b\\xc5\\xc4\\x9e\\x00\\x01\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_nano4\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_len1\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfe\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_len2\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfd\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_data_len3\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_len1\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_len2\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\xf0\\xff\\xff\\xff\\xfe\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_len2\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\xf0\\xff\\xff\\xff\\xfe\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_len3\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_len_nanos\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\xf0\\xff\\xff\\xff\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_len_days\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\xf0\\xff\\xff\\xff\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_len_months\", t, unmarshal)\n}\n"
  },
  {
    "path": "tests/serialization/marshal_18_duration_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n)\n\nfunc TestMarshalsDuration(t *testing.T) {\n\ttType := gocql.NewNativeType(4, gocql.TypeDuration)\n\n\tconst nanoDay = 24 * 60 * 60 * 1000 * 1000 * 1000\n\n\tmarshal := func(i any) ([]byte, error) { return gocql.Marshal(tType, i) }\n\tunmarshal := func(bytes []byte, i any) error { return gocql.Unmarshal(tType, bytes, i) }\n\n\tserialization.PositiveSet{\n\t\tData: nil,\n\t\tValues: mod.Values{\n\t\t\t(*int64)(nil), (*time.Duration)(nil), (*string)(nil), \"\", (*gocql.Duration)(nil),\n\t\t}.AddVariants(mod.CustomType),\n\t\tBrokenUnmarshalTypes: serialization.GetTypes(int64(0)),\n\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: nil,\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), gocql.Duration{},\n\t\t}.AddVariants(mod.CustomType),\n\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: make([]byte, 0),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"0s\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tint64(0), time.Duration(0), \"0s\", gocql.Duration{},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t// sets for months\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x02\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 1, Days: 0, Nanoseconds: 0},\n\t\t\t\"1mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"months1\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x01\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: -1, Days: 0, Nanoseconds: 0},\n\t\t\t\"-1mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"months-1\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x80\\xfe\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: math.MaxInt8, Days: 0, Nanoseconds: 0},\n\t\t\t\"10y7mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"monthsMaxInt8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x80\\xff\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: math.MinInt8, Days: 0, Nanoseconds: 0},\n\t\t\t\"-10y8mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"monthsMinInt8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x81\\xfe\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: math.MaxUint8, Days: 0, Nanoseconds: 0},\n\t\t\t\"21y3mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"monthsMaxUint8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x81\\xfd\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: -math.MaxUint8, Days: 0, Nanoseconds: 0},\n\t\t\t\"-21y3mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"monthsMinUint8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\xc0\\xff\\xfe\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: math.MaxInt16, Days: 0, Nanoseconds: 0},\n\t\t\t\"2730y7mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"monthsMaxInt16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\xc0\\xff\\xff\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: math.MinInt16, Days: 0, Nanoseconds: 0},\n\t\t\t\"-2730y8mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"monthsMinInt16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\xc1\\xff\\xfe\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: math.MaxUint16, Days: 0, Nanoseconds: 0},\n\t\t\t\"5461y3mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"monthsMaxUint16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\xc1\\xff\\xfd\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: -math.MaxUint16, Days: 0, Nanoseconds: 0},\n\t\t\t\"-5461y3mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"monthsMinUint16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\xf0\\xff\\xff\\xff\\xfe\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: math.MaxInt32, Days: 0, Nanoseconds: 0},\n\t\t\t\"178956970y7mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"monthsMaxInt32\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\xf0\\xff\\xff\\xff\\xff\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: math.MinInt32, Days: 0, Nanoseconds: 0},\n\t\t\t\"-178956970y8mo\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"monthsMinInt32\", t, marshal, unmarshal)\n\n\t// sets for days\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x02\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: 1, Nanoseconds: 0},\n\t\t\t\"1d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"days1\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x01\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: -1, Nanoseconds: 0},\n\t\t\t\"-1d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"days-1\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x80\\xfe\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: math.MaxInt8, Nanoseconds: 0},\n\t\t\t\"18w1d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"daysMaxInt8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x80\\xff\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: math.MinInt8, Nanoseconds: 0},\n\t\t\t\"-18w2d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"daysMinInt8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x81\\xfe\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: math.MaxUint8, Nanoseconds: 0},\n\t\t\t\"36w3d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"daysMaxUint8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x81\\xfd\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: -math.MaxUint8, Nanoseconds: 0},\n\t\t\t\"-36w3d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"daysMinUint8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\xc0\\xff\\xfe\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: math.MaxInt16, Nanoseconds: 0},\n\t\t\t\"4680w7d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"daysMaxInt16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\xc0\\xff\\xff\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: math.MinInt16, Nanoseconds: 0},\n\t\t\t\"-4681w1d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"daysMinInt16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\xc1\\xff\\xfe\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: math.MaxUint16, Nanoseconds: 0},\n\t\t\t\"9362w1d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"daysMaxUint16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\xc1\\xff\\xfd\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: -math.MaxUint16, Nanoseconds: 0},\n\t\t\t\"-9362w1d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"daysMinUint16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\xf0\\xff\\xff\\xff\\xfe\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: math.MaxInt32, Nanoseconds: 0},\n\t\t\t\"306783378w1d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"daysMaxInt32\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\xf0\\xff\\xff\\xff\\xff\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: math.MinInt32, Nanoseconds: 0},\n\t\t\t\"-306783378w2d\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"daysMinInt32\", t, marshal, unmarshal)\n\n\t//sets for nanoseconds\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x02\"),\n\t\tValues: mod.Values{\n\t\t\tint64(1), time.Duration(1), time.Duration(1).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: 1},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanos1\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x01\"),\n\t\tValues: mod.Values{\n\t\t\tint64(-1), time.Duration(-1), time.Duration(-1).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: -1},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanos-1\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x80\\xfe\"),\n\t\tValues: mod.Values{\n\t\t\tint64(math.MaxInt8), time.Duration(math.MaxInt8), time.Duration(math.MaxInt8).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: math.MaxInt8},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMaxInt8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x80\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tint64(math.MinInt8), time.Duration(math.MinInt8), time.Duration(math.MinInt8).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: math.MinInt8},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMinInt8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x81\\xfe\"),\n\t\tValues: mod.Values{\n\t\t\tint64(math.MaxUint8), time.Duration(math.MaxUint8), time.Duration(math.MaxUint8).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: math.MaxUint8},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMaxUint8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x81\\xfd\"),\n\t\tValues: mod.Values{\n\t\t\tint64(-math.MaxUint8), time.Duration(-math.MaxUint8), time.Duration(-math.MaxUint8).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: -math.MaxUint8},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMinUint8\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\xc0\\xff\\xfe\"),\n\t\tValues: mod.Values{\n\t\t\tint64(math.MaxInt16), time.Duration(math.MaxInt16), \"32.767µs\",\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: math.MaxInt16},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMaxInt16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\xc0\\xff\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tint64(math.MinInt16), time.Duration(math.MinInt16), time.Duration(math.MinInt16).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: math.MinInt16},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMinInt16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\xc1\\xff\\xfe\"),\n\t\tValues: mod.Values{\n\t\t\tint64(math.MaxUint16), time.Duration(math.MaxUint16), time.Duration(math.MaxUint16).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: math.MaxUint16},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMaxUint16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\xc1\\xff\\xfd\"),\n\t\tValues: mod.Values{\n\t\t\tint64(-math.MaxUint16), time.Duration(-math.MaxUint16), time.Duration(-math.MaxUint16).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: -math.MaxUint16},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMinUint16\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\xf0\\xff\\xff\\xff\\xfe\"),\n\t\tValues: mod.Values{\n\t\t\tint64(math.MaxInt32), time.Duration(math.MaxInt32), time.Duration(math.MaxInt32).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: math.MaxInt32},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMaxInt32\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\xf0\\xff\\xff\\xff\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tint64(math.MinInt32), time.Duration(math.MinInt32), time.Duration(math.MinInt32).String(),\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: math.MinInt32},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMinInt32\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfe\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: math.MaxInt64},\n\t\t\t\"2562047h47m16.854775807s\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMaxInt64\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Months: 0, Days: 0, Nanoseconds: math.MinInt64},\n\t\t\t\"-2562047h47m16.854775808s\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMinInt64\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\xc3\\x41\\xfe\\xfc\\x9b\\xc5\\xc4\\x9d\\xff\\xfe\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Days: 106751, Months: 0, Nanoseconds: 85636854775807},\n\t\t\tint64(math.MaxInt64), time.Duration(math.MaxInt64),\n\t\t\t\"15250w1d23h47m16.854775807s\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMax\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\xc3\\x41\\xfd\\xfc\\x9b\\xc5\\xc4\\x9d\\xff\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Days: -106751, Months: 0, Nanoseconds: -85636854775808},\n\t\t\tint64(math.MinInt64), time.Duration(math.MinInt64),\n\t\t\t\"-15250w1d23h47m16.854775808s\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"nanosMin\", t, marshal, unmarshal)\n\n\t// sets for full range\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x02\\x02\\x02\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Days: 1, Months: 1, Nanoseconds: 1},\n\t\t\t\"1mo1d1ns\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"111\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x01\\x01\\x01\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Days: -1, Months: -1, Nanoseconds: -1},\n\t\t\t\"-1mo1d1ns\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"-111\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\xf0\\xff\\xff\\xff\\xfe\\xf0\\xff\\xff\\xff\\xfe\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfe\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Days: math.MaxInt32, Months: math.MaxInt32, Nanoseconds: math.MaxInt64},\n\t\t\t\"178956970y7mo306783378w1d2562047h47m16.854775807s\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"max\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\xf0\\xff\\xff\\xff\\xff\\xf0\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tgocql.Duration{Days: math.MinInt32, Months: math.MinInt32, Nanoseconds: math.MinInt64},\n\t\t\t\"-178956970y8mo306783378w2d2562047h47m16.854775808s\",\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"min\", t, marshal, unmarshal)\n}\n"
  },
  {
    "path": "tests/serialization/marshal_19_list_set_v3_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n)\n\nfunc TestMarshalSetListV3Corrupt(t *testing.T) {\n\tt.Parallel()\n\n\telem := gocql.NewNativeType(3, gocql.TypeSmallInt)\n\ttTypes := []gocql.TypeInfo{\n\t\tgocql.NewCollectionType(gocql.NewNativeType(3, gocql.TypeList), nil, elem),\n\t\tgocql.NewCollectionType(gocql.NewNativeType(3, gocql.TypeSet), nil, elem),\n\t}\n\n\t// unmarshal data than bigger the normal data, does not return error.\n\tbrokenBigData := serialization.GetTypes(mod.Values{\n\t\t[]int16{}, []*int16{},\n\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t[1]int16{}, [1]*int16{},\n\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t}.AddVariants(mod.All...)...)\n\n\tbrokenBigDataSlices := serialization.GetTypes(mod.Values{\n\t\t[]int16{}, []*int16{},\n\t\t[]mod.Int16{}, []*mod.Int16{},\n\t}.AddVariants(mod.All...)...)\n\n\trefInt32 := func(v int32) *int32 { return &v }\n\trefModInt32 := func(v mod.Int32) *mod.Int32 { return &v }\n\n\tfor _, tType := range tTypes {\n\t\tmarshal := func(i any) ([]byte, error) { return gocql.Marshal(tType, i) }\n\t\tunmarshal := func(bytes []byte, i any) error {\n\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t}\n\n\t\tt.Run(tType.Type().String(), func(t *testing.T) {\n\n\t\t\tval := int32(math.MaxInt16 + 1)\n\t\t\tvalc := mod.Int32(val)\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int32{val}, []*int32{refInt32(val)},\n\t\t\t\t\t[1]int32{val}, [1]*int32{refInt32(val)},\n\t\t\t\t\t[]mod.Int32{valc}, []*mod.Int32{refModInt32(valc)},\n\t\t\t\t\t[1]mod.Int32{valc}, [1]*mod.Int32{refModInt32(valc)},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\xff\\xff\\x01\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{}, []*int16{},\n\t\t\t\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t\tBrokenTypes: brokenBigData,\n\t\t\t}.Run(\"big_data_elem1+\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{}, []*int16{},\n\t\t\t\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t\tBrokenTypes: brokenBigData,\n\t\t\t}.Run(\"big_data_zeroElem1+\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{}, []*int16{},\n\t\t\t\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t\tBrokenTypes: brokenBigDataSlices,\n\t\t\t}.Run(\"big_data_elem0+\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{}, []*int16{},\n\t\t\t\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data_elem_value-\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{}, []*int16{},\n\t\t\t\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data_elem_value--\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{}, []*int16{},\n\t\t\t\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data_elem_len-\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{}, []*int16{},\n\t\t\t\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data_elem-\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{}, []*int16{},\n\t\t\t\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data_elems-\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"nil_data_to_array\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"zero_data_to_array\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[1]int16{}, [1]*int16{},\n\t\t\t\t\t[1]mod.Int16{}, [1]*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zero_elems_to_array\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_19_list_set_v3_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n)\n\nfunc TestMarshalSetListV3(t *testing.T) {\n\tt.Parallel()\n\n\telem := gocql.NewNativeType(3, gocql.TypeSmallInt)\n\n\ttTypes := []gocql.TypeInfo{\n\t\tgocql.NewCollectionType(gocql.NewNativeType(3, gocql.TypeList), nil, elem),\n\t\tgocql.NewCollectionType(gocql.NewNativeType(3, gocql.TypeSet), nil, elem),\n\t}\n\n\t// unmarshal `zero` data return an error\n\tbrokenZeroDataUnmarshal := serialization.GetTypes(mod.Values{\n\t\t[]int16{}, []*int16{},\n\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t&[]int16{}, &[]*int16{},\n\t\t&[]mod.Int16{}, &[]*mod.Int16{},\n\t\t(*[1]int16)(nil), (*[1]*int16)(nil),\n\t\t(*[1]mod.Int16)(nil), (*[1]*mod.Int16)(nil),\n\t}.AddVariants(mod.CustomType)...)\n\n\trefInt16 := func(v int16) *int16 { return &v }\n\trefModInt16 := func(v mod.Int16) *mod.Int16 { return &v }\n\n\tfor _, tType := range tTypes {\n\t\tmarshal := func(i any) ([]byte, error) { return gocql.Marshal(tType, i) }\n\t\tunmarshal := func(bytes []byte, i any) error {\n\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t}\n\n\t\tt.Run(tType.Type().String(), func(t *testing.T) {\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t([]int16)(nil), ([]*int16)(nil),\n\t\t\t\t\t([]mod.Int16)(nil), ([]*mod.Int16)(nil),\n\t\t\t\t\t(*[]int16)(nil), (*[]*int16)(nil),\n\t\t\t\t\t(*[]mod.Int16)(nil), (*[]*mod.Int16)(nil),\n\t\t\t\t\t(*[1]int16)(nil), (*[1]*int16)(nil),\n\t\t\t\t\t(*[1]mod.Int16)(nil), (*[1]*mod.Int16)(nil),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{}, []*int16{},\n\t\t\t\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t\t\t\t&[]int16{}, &[]*int16{},\n\t\t\t\t\t&[]mod.Int16{}, &[]*mod.Int16{},\n\t\t\t\t\t(*[1]int16)(nil), (*[1]*int16)(nil),\n\t\t\t\t\t(*[1]mod.Int16)(nil), (*[1]*mod.Int16)(nil),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t\tBrokenUnmarshalTypes: brokenZeroDataUnmarshal,\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{}, []*int16{},\n\t\t\t\t\t[]mod.Int16{}, []*mod.Int16{},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zero elems\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{0}, []*int16{refInt16(0)},\n\t\t\t\t\t[]mod.Int16{0}, []*mod.Int16{refModInt16(0)},\n\t\t\t\t\t[1]int16{0}, [1]*int16{refInt16(0)},\n\t\t\t\t\t[1]mod.Int16{0}, [1]*mod.Int16{refModInt16(0)},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]{0}\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{0}, []*int16{refInt16(0)},\n\t\t\t\t\t[]mod.Int16{0}, []*mod.Int16{refModInt16(0)},\n\t\t\t\t\t[1]int16{0}, [1]*int16{refInt16(0)},\n\t\t\t\t\t[1]mod.Int16{0}, [1]*mod.Int16{refModInt16(0)},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]{zero elem}unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t[]int16{32767}, []*int16{refInt16(32767)},\n\t\t\t\t\t[]mod.Int16{32767}, []*mod.Int16{refModInt16(32767)},\n\t\t\t\t\t[1]int16{32767}, [1]*int16{refInt16(32767)},\n\t\t\t\t\t[1]mod.Int16{32767}, [1]*mod.Int16{refModInt16(32767)},\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]{max}\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_1_boolean_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql/serialization/boolean\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n)\n\nfunc TestMarshalBooleanCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeBoolean)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.boolean\",\n\t\t\tmarshal:   boolean.Marshal,\n\t\t\tunmarshal: boolean.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tfalse,\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_1_boolean_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/boolean\"\n)\n\nfunc TestMarshalBoolean(t *testing.T) {\n\tt.Parallel()\n\n\ttType := gocql.NewNativeType(4, gocql.TypeBoolean)\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.boolean\",\n\t\t\tmarshal:   boolean.Marshal,\n\t\t\tunmarshal: boolean.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   nil,\n\t\t\t\tValues: mod.Values{(*bool)(nil)}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   nil,\n\t\t\t\tValues: mod.Values{false}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   make([]byte, 0),\n\t\t\t\tValues: mod.Values{false}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x00\"),\n\t\t\t\tValues: mod.Values{false}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x01\"),\n\t\t\t\tValues: mod.Values{true}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[1]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\xff\"),\n\t\t\t\tValues: mod.Values{true}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[255]\", t, nil, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_20_map_v3_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n)\n\nfunc TestMarshalMapV3Corrupt(t *testing.T) {\n\tt.Parallel()\n\n\telem := gocql.NewNativeType(3, gocql.TypeSmallInt)\n\ttType := gocql.NewCollectionType(gocql.NewNativeType(3, gocql.TypeMap), elem, elem)\n\n\t//unmarshal data than bigger the normal data, does not return error.\n\tbrokenBigData := serialization.GetTypes(mod.Values{\n\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t}.AddVariants(mod.All...)...)\n\n\trefInt32 := func(v int32) *int32 { return &v }\n\trefModInt32 := func(v mod.Int32) *mod.Int32 { return &v }\n\n\tmarshal := func(i any) ([]byte, error) { return gocql.Marshal(tType, i) }\n\tunmarshal := func(bytes []byte, i any) error {\n\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t}\n\n\tval := int32(math.MaxInt16 + 1)\n\tvalc := mod.Int32(val)\n\tserialization.NegativeMarshalSet{\n\t\tValues: mod.Values{\n\t\t\tmap[int32]int32{val: val}, map[int32]int32{val: 0}, map[int32]int32{0: val},\n\t\t\tmap[int32]*int32{val: refInt32(val)}, map[int32]*int32{val: refInt32(0)}, map[int32]*int32{0: refInt32(val)},\n\t\t\tmap[mod.Int32]mod.Int32{valc: valc}, map[mod.Int32]mod.Int32{valc: 0}, map[mod.Int32]mod.Int32{0: valc},\n\t\t\tmap[mod.Int32]*mod.Int32{valc: refModInt32(valc)}, map[mod.Int32]*mod.Int32{valc: refModInt32(0)}, map[mod.Int32]*mod.Int32{0: refModInt32(valc)},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"big_vals\", t, marshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\xff\\xff\\x00\\x00\\x00\\x02\\xff\\xff\\x01\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t\tBrokenTypes: brokenBigData,\n\t}.Run(\"big_data_elem1+\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t\tBrokenTypes: brokenBigData,\n\t}.Run(\"big_data_zeroElem1+\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x01\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t\tBrokenTypes: brokenBigData,\n\t}.Run(\"big_data_elems0+\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\xff\\xff\\x00\\x00\\x00\\x02\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_val_value-\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\xff\\xff\\x00\\x00\\x00\\x02\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_val_len\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\xff\\xff\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_val_len-\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\xff\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_val-\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_key_value-\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_key_len\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_key_len-\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_pair-\", t, unmarshal)\n\n\tserialization.NegativeUnmarshalSet{\n\t\tData: []byte(\"\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"small_data_elems-\", t, unmarshal)\n}\n"
  },
  {
    "path": "tests/serialization/marshal_20_map_v3_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n)\n\nfunc TestMarshalMapV3(t *testing.T) {\n\tt.Parallel()\n\n\telem := gocql.NewNativeType(3, gocql.TypeSmallInt)\n\ttType := gocql.NewCollectionType(gocql.NewNativeType(3, gocql.TypeMap), elem, elem)\n\n\trefInt16 := func(v int16) *int16 { return &v }\n\trefModInt16 := func(v mod.Int16) *mod.Int16 { return &v }\n\n\tmarshal := func(i any) ([]byte, error) { return gocql.Marshal(tType, i) }\n\tunmarshal := func(bytes []byte, i any) error {\n\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t}\n\n\tserialization.PositiveSet{\n\t\tData: nil,\n\t\tValues: mod.Values{\n\t\t\t(map[int16]int16)(nil), (map[int16]*int16)(nil),\n\t\t\t(map[mod.Int16]mod.Int16)(nil), (map[mod.Int16]*mod.Int16)(nil),\n\t\t\t(*map[int16]int16)(nil), (*map[int16]*int16)(nil),\n\t\t\t(*map[mod.Int16]mod.Int16)(nil), (*map[mod.Int16]*mod.Int16)(nil),\n\t\t}.AddVariants(mod.CustomType),\n\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tmake(map[int16]int16), make(map[int16]*int16),\n\t\t\tmake(map[mod.Int16]mod.Int16), make(map[mod.Int16]*mod.Int16),\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"zero elems\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tmap[int16]int16{0: 0}, map[int16]*int16{0: refInt16(0)},\n\t\t\tmap[mod.Int16]mod.Int16{0: 0}, map[mod.Int16]*mod.Int16{0: refModInt16(0)},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"[]{zero elem}unmarshal\", t, nil, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\"),\n\t\tValues: mod.Values{\n\t\t\tmap[int16]int16{0: 0}, map[int16]*int16{0: refInt16(0)},\n\t\t\tmap[mod.Int16]mod.Int16{0: 0}, map[mod.Int16]*mod.Int16{0: refModInt16(0)},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"[]{0:0}\", t, marshal, unmarshal)\n\n\tserialization.PositiveSet{\n\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x7f\\xff\\x00\\x00\\x00\\x02\\x7f\\xff\"),\n\t\tValues: mod.Values{\n\t\t\tmap[int16]int16{32767: 32767}, map[int16]*int16{32767: refInt16(32767)},\n\t\t\tmap[mod.Int16]mod.Int16{32767: 32767}, map[mod.Int16]*mod.Int16{32767: refModInt16(32767)},\n\t\t}.AddVariants(mod.All...),\n\t}.Run(\"[]{max:max}\", t, marshal, unmarshal)\n}\n"
  },
  {
    "path": "tests/serialization/marshal_2_tinyint_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/tinyint\"\n)\n\nfunc TestMarshalTinyintCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeTinyInt)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.tinyint\",\n\t\t\tmarshal:   tinyint.Marshal,\n\t\t\tunmarshal: tinyint.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(128), int32(128), int64(128), int(128),\n\t\t\t\t\t\"128\", *big.NewInt(128),\n\t\t\t\t\tint16(-129), int32(-129), int64(-129), int(-129),\n\t\t\t\t\t\"-129\", *big.NewInt(-129),\n\t\t\t\t\tuint16(256), uint32(256), uint64(256), uint(256),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_vals\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\"1s2\", \"1s\", \"-1s\", \".1\", \",1\", \"0.1\", \"0,1\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_2_tinyint_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/tinyint\"\n)\n\nfunc TestMarshalTinyint(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeTinyInt)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.tinyint\",\n\t\t\tmarshal:   tinyint.Marshal,\n\t\t\tunmarshal: tinyint.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t(*int8)(nil), (*int16)(nil), (*int32)(nil), (*int64)(nil), (*int)(nil),\n\t\t\t\t\t(*uint8)(nil), (*uint16)(nil), (*uint32)(nil), (*uint64)(nil), (*uint)(nil),\n\t\t\t\t\t(*string)(nil), (*big.Int)(nil), string(\"\"),\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", big.Int{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(1), int16(1), int32(1), int64(1), int(1),\n\t\t\t\t\tuint8(1), uint16(1), uint32(1), uint64(1), uint(1),\n\t\t\t\t\t\"1\", *big.NewInt(1)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\xff\"),\n\t\t\t\tValues: mod.Values{int8(-1), int16(-1), int32(-1), int64(-1), int(-1), \"-1\", *big.NewInt(-1)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(127), int16(127), int32(127), int64(127), int(127),\n\t\t\t\t\tuint8(127), uint16(127), uint32(127), uint64(127), uint(127),\n\t\t\t\t\t\"127\", *big.NewInt(127)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"127\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x80\"),\n\t\t\t\tValues: mod.Values{int8(-128), int16(-128), int32(-128), int64(-128), int(-128), \"-128\", *big.NewInt(-128)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"-128\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\xff\"),\n\t\t\t\tValues: mod.Values{uint8(255), uint16(255), uint32(255), uint64(255), uint(255)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"255\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_3_smallint_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/smallint\"\n)\n\nfunc TestMarshalSmallintCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeSmallInt)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.smallint\",\n\t\t\tmarshal:   smallint.Marshal,\n\t\t\tunmarshal: smallint.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(32768), int64(32768), int(32768),\n\t\t\t\t\t\"32768\", *big.NewInt(32768),\n\t\t\t\t\tint32(-32769), int64(-32769), int(-32769),\n\t\t\t\t\t\"-32769\", *big.NewInt(-32769),\n\t\t\t\t\tuint32(65536), uint64(65536), uint(65536),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_vals\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\"1s2\", \"1s\", \"-1s\", \".1\", \",1\", \"0.1\", \"0,1\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x80\"),\n\t\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_int8_128\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_int8_32767\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xff\\x7f\"),\n\t\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_int8_-129\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_int8_-32768\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x01\\x00\"),\n\t\t\t\tValues: mod.Values{uint8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_uint_256\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{uint8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_uint_65535\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_3_smallint_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/smallint\"\n)\n\nfunc TestMarshalSmallint(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeSmallInt)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.smallint\",\n\t\t\tmarshal:   smallint.Marshal,\n\t\t\tunmarshal: smallint.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t(*int8)(nil), (*int16)(nil), (*int32)(nil), (*int64)(nil), (*int)(nil),\n\t\t\t\t\t(*uint8)(nil), (*uint16)(nil), (*uint32)(nil), (*uint64)(nil), (*uint)(nil),\n\t\t\t\t\t(*string)(nil), (*big.Int)(nil), \"\",\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", big.Int{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(1), int16(1), int32(1), int64(1), int(1),\n\t\t\t\t\tuint8(1), uint16(1), uint32(1), uint64(1), uint(1),\n\t\t\t\t\t\"1\", *big.NewInt(1),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(-1), int16(-1), int32(-1), int64(-1), int(-1),\n\t\t\t\t\t\"-1\", *big.NewInt(-1),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(127), int16(127), int32(127), int64(127), int(127),\n\t\t\t\t\tuint16(127), uint32(127), uint64(127), uint(127), uint(127),\n\t\t\t\t\t\"127\", *big.NewInt(127),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(-128), int16(-128), int32(-128), int64(-128), int(-128),\n\t\t\t\t\t\"-128\", *big.NewInt(-128),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(128), int32(128), int64(128), int(128),\n\t\t\t\t\tuint16(128), uint32(128), uint64(128), uint(128), uint(128),\n\t\t\t\t\t\"128\", *big.NewInt(128)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt8+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-129), int32(-129), int64(-129), int(-129),\n\t\t\t\t\t\"-129\", *big.NewInt(-129),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt8-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(32767), int32(32767), int64(32767), int(32767),\n\t\t\t\t\tuint16(32767), uint32(32767), uint64(32767), uint(32767),\n\t\t\t\t\t\"32767\", *big.NewInt(32767)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-32768), int32(-32768), int64(-32768), int(-32768),\n\t\t\t\t\t\"-32768\", *big.NewInt(-32768),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint8(255), uint16(255), uint32(255), uint64(255), uint(255),\n\t\t\t\t\tint16(255), int32(255), int64(255), int(255),\n\t\t\t\t\t\"255\", *big.NewInt(255),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint16(256), uint32(256), uint64(256), uint(256),\n\t\t\t\t\tint16(256), int32(256), int64(256), int(256),\n\t\t\t\t\t\"256\", *big.NewInt(256),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint8+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint16(65535), uint32(65535), uint64(65535), uint(65535),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint16\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_4_int_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/cqlint\"\n)\n\nfunc TestMarshalIntCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeInt)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.int\",\n\t\t\tmarshal:   cqlint.Marshal,\n\t\t\tunmarshal: cqlint.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(2147483648), int(2147483648),\n\t\t\t\t\t\"2147483648\", *big.NewInt(2147483648),\n\t\t\t\t\tint64(-2147483649), int(-2147483649),\n\t\t\t\t\t\"-2147483649\", *big.NewInt(-2147483649),\n\t\t\t\t\tuint64(4294967296), uint(4294967296),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_vals\", t, marshal)\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\"1s2\", \"1s\", \"-1s\", \".1\", \",1\", \"0.1\", \"0,1\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data2\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{int8(0), int16(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_types_int_2147483648\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x7f\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{int8(0), int16(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_types_int_-2147483647\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{int8(0), int16(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_types_int_32768\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xff\\xff\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{int8(0), int16(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_types_int_-32769\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x80\"),\n\t\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_int8_128\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xff\\xff\\xff\\x7f\"),\n\t\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_int8_-129\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{uint8(0), uint16(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_types_uint_4294967295\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x01\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{uint8(0), uint16(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_types_uint_65536\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x01\\x00\"),\n\t\t\t\tValues: mod.Values{uint8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_uint_256\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_4_int_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/cqlint\"\n)\n\nfunc TestMarshalInt(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeInt)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.int\",\n\t\t\tmarshal:   cqlint.Marshal,\n\t\t\tunmarshal: cqlint.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t(*int8)(nil), (*int16)(nil), (*int32)(nil), (*int64)(nil), (*int)(nil),\n\t\t\t\t\t(*uint8)(nil), (*uint16)(nil), (*uint32)(nil), (*uint64)(nil), (*uint)(nil),\n\t\t\t\t\t(*string)(nil), (*big.Int)(nil), \"\",\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", big.Int{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(1), int16(1), int32(1), int64(1), int(1),\n\t\t\t\t\tuint8(1), uint16(1), uint32(1), uint64(1), uint(1),\n\t\t\t\t\t\"1\", *big.NewInt(1),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-1), int32(-1), int64(-1), int(-1),\n\t\t\t\t\t\"-1\", *big.NewInt(-1),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(127), int16(127), int32(127), int64(127), int(127),\n\t\t\t\t\tuint8(127), uint16(127), uint32(127), uint64(127), uint(127),\n\t\t\t\t\t\"127\", *big.NewInt(127),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(-128), int16(-128), int32(-128), int64(-128), int(-128),\n\t\t\t\t\t\"-128\", *big.NewInt(-128),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(128), int32(128), int64(128), int(128),\n\t\t\t\t\tuint16(128), uint32(128), uint64(128), uint(128),\n\t\t\t\t\t\"128\", *big.NewInt(128),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt8+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-129), int32(-129), int64(-129), int(-129),\n\t\t\t\t\t\"-129\", *big.NewInt(-129),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt8-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(32767), int32(32767), int64(32767), int(32767),\n\t\t\t\t\tuint16(32767), uint32(32767), uint64(32767), uint(32767),\n\t\t\t\t\t\"32767\", *big.NewInt(32767),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-32768), int32(-32768), int64(-32768), int(-32768),\n\t\t\t\t\t\"-32768\", *big.NewInt(-32768),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(32768), int64(32768), int(32768),\n\t\t\t\t\tuint32(32768), uint64(32768), uint(32768),\n\t\t\t\t\t\"32768\", *big.NewInt(32768),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt16+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(-32769), int64(-32769), int(-32769),\n\t\t\t\t\t\"-32769\", *big.NewInt(-32769),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt16-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(2147483647), int64(2147483647), int(2147483647),\n\t\t\t\t\tuint32(2147483647), uint64(2147483647), uint(2147483647),\n\t\t\t\t\t\"2147483647\", *big.NewInt(2147483647),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(-2147483648), int64(-2147483648), int(-2147483648),\n\t\t\t\t\t\"-2147483648\", *big.NewInt(-2147483648),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint8(255), uint16(255), uint32(255), uint64(255), uint(255),\n\t\t\t\t\tint16(255), int32(255), int64(255), int(255),\n\t\t\t\t\t\"255\", *big.NewInt(255),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x01\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint16(256), uint32(256), uint64(256), uint(256),\n\t\t\t\t\tint16(256), int32(256), int64(256), int(256),\n\t\t\t\t\t\"256\", *big.NewInt(256),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint8+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint16(65535), uint32(65535), uint64(65535), uint(65535),\n\t\t\t\t\tint32(65535), int64(65535), int(65535),\n\t\t\t\t\t\"65535\", *big.NewInt(65535),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x01\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(65536), uint64(65536), uint(65536),\n\t\t\t\t\tint32(65536), int64(65536), int(65536),\n\t\t\t\t\t\"65536\", *big.NewInt(65536),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint16+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(4294967295), uint64(4294967295), uint(4294967295),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint32\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_5_bigint_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/bigint\"\n)\n\nfunc TestMarshalBigIntCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeBigInt)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.bigint\",\n\t\t\tmarshal:   bigint.Marshal,\n\t\t\tunmarshal: bigint.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tserialization.NegativeMarshalSet{\n\t\t\tValues: mod.Values{\n\t\t\t\t\"9223372036854775808\",\n\t\t\t\t\"-9223372036854775809\",\n\t\t\t\t*big.NewInt(0).Add(big.NewInt(9223372036854775807), big.NewInt(1)),\n\t\t\t\t*big.NewInt(0).Add(big.NewInt(-9223372036854775808), big.NewInt(-1)),\n\t\t\t}.AddVariants(mod.All...),\n\t\t}.Run(\"big_vals\", t, marshal)\n\n\t\tserialization.NegativeMarshalSet{\n\t\t\tValues: mod.Values{\"1s2\", \"1s\", \"-1s\", \".1\", \",1\", \"0.1\", \"0,1\"}.AddVariants(mod.All...),\n\t\t}.Run(\"corrupt_vals\", t, marshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\tValues: mod.Values{\n\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t}.AddVariants(mod.All...),\n\t\t}.Run(\"big_data\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\tValues: mod.Values{\n\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t}.AddVariants(mod.All...),\n\t\t}.Run(\"small_data\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData: []byte(\"\\x80\"),\n\t\t\tValues: mod.Values{\n\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t}.AddVariants(mod.All...),\n\t\t}.Run(\"small_data2\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\"),\n\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int8_128\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x7f\"),\n\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int8_-129\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\"),\n\t\t\tValues: mod.Values{int8(0), int16(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int_32768\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\x7f\\xff\"),\n\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int8_-32769\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\"),\n\t\t\tValues: mod.Values{int8(0), int16(0), int32(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int_2147483647\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\xff\\xff\\xff\\x7f\\xff\\xff\\xff\\xff\"),\n\t\t\tValues: mod.Values{int8(0), int16(0), int32(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int_-2147483648\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\"),\n\t\t\tValues: mod.Values{uint8(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_uint8_256\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\"),\n\t\t\tValues: mod.Values{uint8(0), uint16(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_uint_65536\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\"),\n\t\t\tValues: mod.Values{uint8(0), uint16(0), uint32(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_uint_4294967296\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\tValues: mod.Values{uint8(0), uint16(0), uint32(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_uint_max\", t, unmarshal)\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_5_bigint_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/bigint\"\n)\n\nfunc TestMarshalBigInt(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\ttType := gocql.NewNativeType(4, gocql.TypeBigInt)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.bigint\",\n\t\t\tmarshal:   bigint.Marshal,\n\t\t\tunmarshal: bigint.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t(*int8)(nil), (*int16)(nil), (*int32)(nil), (*int64)(nil), (*int)(nil),\n\t\t\t\t\t(*uint8)(nil), (*uint16)(nil), (*uint32)(nil), (*uint64)(nil), (*uint)(nil),\n\t\t\t\t\t(*string)(nil), (*big.Int)(nil), \"\",\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", big.Int{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(1), int16(1), int32(1), int64(1), int(1),\n\t\t\t\t\tuint8(1), uint16(1), uint32(1), uint64(1), uint(1),\n\t\t\t\t\t\"1\", *big.NewInt(1),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(-1), int16(-1), int32(-1), int64(-1), int(-1),\n\t\t\t\t\t\"-1\", *big.NewInt(-1),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(127), int16(127), int32(127), int64(127), int(127),\n\t\t\t\t\tuint8(127), uint16(127), uint32(127), uint64(127), uint(127),\n\t\t\t\t\t\"127\", *big.NewInt(127),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(-128), int16(-128), int32(-128), int64(-128), int(-128),\n\t\t\t\t\t\"-128\", *big.NewInt(-128),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(128), int32(128), int64(128), int(128),\n\t\t\t\t\tuint8(128), uint16(128), uint32(128), uint64(128), uint(128),\n\t\t\t\t\t\"128\", *big.NewInt(128),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt8+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-129), int32(-129), int64(-129), int(-129),\n\t\t\t\t\t\"-129\", *big.NewInt(-129),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt8-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(32767), int32(32767), int64(32767), int(32767),\n\t\t\t\t\tuint16(32767), uint32(32767), uint64(32767), uint(32767),\n\t\t\t\t\t\"32767\", *big.NewInt(32767),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-32768), int32(-32768), int64(-32768), int(-32768),\n\t\t\t\t\t\"-32768\", *big.NewInt(-32768),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(32768), int64(32768), int(32768),\n\t\t\t\t\tuint16(32768), uint32(32768), uint64(32768), uint(32768),\n\t\t\t\t\t\"32768\", *big.NewInt(32768),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt16+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(-32769), int64(-32769), int(-32769),\n\t\t\t\t\t\"-32769\", *big.NewInt(-32769),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt16-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x7f\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(2147483647), int64(2147483647), int(2147483647),\n\t\t\t\t\tuint32(2147483647), uint64(2147483647), uint(2147483647),\n\t\t\t\t\t\"2147483647\", *big.NewInt(2147483647),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(-2147483648), int64(-2147483648), int(-2147483648),\n\t\t\t\t\t\"-2147483648\", *big.NewInt(-2147483648),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(2147483648), int(2147483648),\n\t\t\t\t\tuint32(2147483648), uint64(2147483648), uint(2147483648),\n\t\t\t\t\t\"2147483648\", *big.NewInt(2147483648),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt32+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\x7f\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-2147483649), int(-2147483649),\n\t\t\t\t\t\"-2147483649\", *big.NewInt(-2147483649),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt32-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(9223372036854775807), int(9223372036854775807),\n\t\t\t\t\tuint64(9223372036854775807), uint(9223372036854775807),\n\t\t\t\t\t\"9223372036854775807\", *big.NewInt(9223372036854775807),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-9223372036854775808), int(-9223372036854775808),\n\t\t\t\t\t\"-9223372036854775808\", *big.NewInt(-9223372036854775808),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint8(255), uint16(255), uint32(255), uint64(255), uint(255),\n\t\t\t\t\tint16(255), int32(255), int64(255), int(255),\n\t\t\t\t\t\"255\", *big.NewInt(255),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint16(256), uint32(256), uint64(256), uint(256),\n\t\t\t\t\tint16(256), int32(256), int64(256), int(256),\n\t\t\t\t\t\"256\", *big.NewInt(256),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint8+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint16(65535), uint32(65535), uint64(65535), uint(65535),\n\t\t\t\t\tint32(65535), int64(65535), int(65535),\n\t\t\t\t\t\"65535\", *big.NewInt(65535),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(65536), uint64(65536), uint(65536),\n\t\t\t\t\tint32(65536), int64(65536), int(65536),\n\t\t\t\t\t\"65536\", *big.NewInt(65536),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint16+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(4294967295), uint64(4294967295), uint(4294967295),\n\t\t\t\t\tint64(4294967295), int(4294967295),\n\t\t\t\t\t\"4294967295\", *big.NewInt(4294967295),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(4294967296), uint(4294967296),\n\t\t\t\t\tint64(4294967296), int(4294967296),\n\t\t\t\t\t\"4294967296\", *big.NewInt(4294967296),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint32+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(18446744073709551615), uint(18446744073709551615),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint64\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_6_counter_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/counter\"\n)\n\nfunc TestMarshalCounterCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeCounter)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.counter\",\n\t\t\tmarshal:   counter.Marshal,\n\t\t\tunmarshal: counter.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tserialization.NegativeMarshalSet{\n\t\t\tValues: mod.Values{\n\t\t\t\t\"9223372036854775808\",\n\t\t\t\t\"-9223372036854775809\",\n\t\t\t\t*big.NewInt(0).Add(big.NewInt(9223372036854775807), big.NewInt(1)),\n\t\t\t\t*big.NewInt(0).Add(big.NewInt(-9223372036854775808), big.NewInt(-1)),\n\t\t\t}.AddVariants(mod.All...),\n\t\t}.Run(\"big_vals\", t, marshal)\n\n\t\tserialization.NegativeMarshalSet{\n\t\t\tValues: mod.Values{\"1s2\", \"1s\", \"-1s\", \".1\", \",1\", \"0.1\", \"0,1\"}.AddVariants(mod.All...),\n\t\t}.Run(\"corrupt_vals\", t, marshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\tValues: mod.Values{\n\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t}.AddVariants(mod.All...),\n\t\t}.Run(\"big_data\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\tValues: mod.Values{\n\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t}.AddVariants(mod.All...),\n\t\t}.Run(\"small_data\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData: []byte(\"\\x80\"),\n\t\t\tValues: mod.Values{\n\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t}.AddVariants(mod.All...),\n\t\t}.Run(\"small_data2\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\"),\n\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int8_128\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x7f\"),\n\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int8_-129\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\"),\n\t\t\tValues: mod.Values{int8(0), int16(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int_32768\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\x7f\\xff\"),\n\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int8_-32769\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\"),\n\t\t\tValues: mod.Values{int8(0), int16(0), int32(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int_2147483647\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\xff\\xff\\xff\\x7f\\xff\\xff\\xff\\xff\"),\n\t\t\tValues: mod.Values{int8(0), int16(0), int32(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_int_-2147483648\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\"),\n\t\t\tValues: mod.Values{uint8(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_uint8_256\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\"),\n\t\t\tValues: mod.Values{uint8(0), uint16(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_uint_65536\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\"),\n\t\t\tValues: mod.Values{uint8(0), uint16(0), uint32(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_uint_4294967296\", t, unmarshal)\n\n\t\tserialization.NegativeUnmarshalSet{\n\t\t\tData:   []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\tValues: mod.Values{uint8(0), uint16(0), uint32(0)}.AddVariants(mod.All...),\n\t\t}.Run(\"small_type_uint_max\", t, unmarshal)\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_6_counter_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/counter\"\n)\n\nfunc TestMarshalCounter(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\ttType := gocql.NewNativeType(4, gocql.TypeCounter)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.counter\",\n\t\t\tmarshal:   counter.Marshal,\n\t\t\tunmarshal: counter.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t(*int8)(nil), (*int16)(nil), (*int32)(nil), (*int64)(nil), (*int)(nil),\n\t\t\t\t\t(*uint8)(nil), (*uint16)(nil), (*uint32)(nil), (*uint64)(nil), (*uint)(nil),\n\t\t\t\t\t(*string)(nil), (*big.Int)(nil), \"\",\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", big.Int{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(1), int16(1), int32(1), int64(1), int(1),\n\t\t\t\t\tuint8(1), uint16(1), uint32(1), uint64(1), uint(1),\n\t\t\t\t\t\"1\", *big.NewInt(1),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(-1), int16(-1), int32(-1), int64(-1), int(-1),\n\t\t\t\t\t\"-1\", *big.NewInt(-1),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(127), int16(127), int32(127), int64(127), int(127),\n\t\t\t\t\tuint8(127), uint16(127), uint32(127), uint64(127), uint(127),\n\t\t\t\t\t\"127\", *big.NewInt(127),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(-128), int16(-128), int32(-128), int64(-128), int(-128),\n\t\t\t\t\t\"-128\", *big.NewInt(-128),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(128), int32(128), int64(128), int(128),\n\t\t\t\t\tuint8(128), uint16(128), uint32(128), uint64(128), uint(128),\n\t\t\t\t\t\"128\", *big.NewInt(128),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt8+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-129), int32(-129), int64(-129), int(-129),\n\t\t\t\t\t\"-129\", *big.NewInt(-129),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt8-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(32767), int32(32767), int64(32767), int(32767),\n\t\t\t\t\tuint16(32767), uint32(32767), uint64(32767), uint(32767),\n\t\t\t\t\t\"32767\", *big.NewInt(32767),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-32768), int32(-32768), int64(-32768), int(-32768),\n\t\t\t\t\t\"-32768\", *big.NewInt(-32768),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(32768), int64(32768), int(32768),\n\t\t\t\t\tuint16(32768), uint32(32768), uint64(32768), uint(32768),\n\t\t\t\t\t\"32768\", *big.NewInt(32768),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt16+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(-32769), int64(-32769), int(-32769),\n\t\t\t\t\t\"-32769\", *big.NewInt(-32769),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt16-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x7f\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(2147483647), int64(2147483647), int(2147483647),\n\t\t\t\t\tuint32(2147483647), uint64(2147483647), uint(2147483647),\n\t\t\t\t\t\"2147483647\", *big.NewInt(2147483647),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(-2147483648), int64(-2147483648), int(-2147483648),\n\t\t\t\t\t\"-2147483648\", *big.NewInt(-2147483648),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(2147483648), int(2147483648),\n\t\t\t\t\tuint32(2147483648), uint64(2147483648), uint(2147483648),\n\t\t\t\t\t\"2147483648\", *big.NewInt(2147483648),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt32+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\x7f\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-2147483649), int(-2147483649),\n\t\t\t\t\t\"-2147483649\", *big.NewInt(-2147483649),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt32-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(9223372036854775807), int(9223372036854775807),\n\t\t\t\t\tuint64(9223372036854775807), uint(9223372036854775807),\n\t\t\t\t\t\"9223372036854775807\", *big.NewInt(9223372036854775807),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-9223372036854775808), int(-9223372036854775808),\n\t\t\t\t\t\"-9223372036854775808\", *big.NewInt(-9223372036854775808),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint8(255), uint16(255), uint32(255), uint64(255), uint(255),\n\t\t\t\t\tint16(255), int32(255), int64(255), int(255),\n\t\t\t\t\t\"255\", *big.NewInt(255),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint16(256), uint32(256), uint64(256), uint(256),\n\t\t\t\t\tint16(256), int32(256), int64(256), int(256),\n\t\t\t\t\t\"256\", *big.NewInt(256),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint8+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint16(65535), uint32(65535), uint64(65535), uint(65535),\n\t\t\t\t\tint32(65535), int64(65535), int(65535),\n\t\t\t\t\t\"65535\", *big.NewInt(65535),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(65536), uint64(65536), uint(65536),\n\t\t\t\t\tint32(65536), int64(65536), int(65536),\n\t\t\t\t\t\"65536\", *big.NewInt(65536),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint16+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x00\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(4294967295), uint64(4294967295), uint(4294967295),\n\t\t\t\t\tint64(4294967295), int(4294967295),\n\t\t\t\t\t\"4294967295\", *big.NewInt(4294967295),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(4294967296), uint(4294967296),\n\t\t\t\t\tint64(4294967296), int(4294967296),\n\t\t\t\t\t\"4294967296\", *big.NewInt(4294967296),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint32+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(18446744073709551615), uint(18446744073709551615),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint64\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_7_varint_corrupt_test.go",
    "content": "package serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/varint\"\n)\n\nfunc TestMarshalVarIntCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t\tname      string\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeVarint)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.varint\",\n\t\t\tmarshal:   varint.Marshal,\n\t\t\tunmarshal: varint.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeMarshalSet{\n\t\t\t\tValues: mod.Values{\"1s2\", \"1s\", \"-1s\", \".1\", \",1\", \"0.1\", \"0,1\"}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_vals\", t, marshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\x00\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_data+\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData: []byte(\"\\xff\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"corrupt_data-\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x80\"),\n\t\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_maxInt8+1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xff\\x7f\"),\n\t\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_minInt8-1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{int8(0), int16(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_maxInt16+1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xff\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{int8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_minInt16-1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{int8(0), int16(0), int32(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_maxInt32+1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xff\\x7f\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{int8(0), int16(0), int32(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_minInt32-1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{int8(0), int16(0), int32(0), int64(0), int(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_maxInt64+1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{int8(0), int16(0), int32(0), int64(0), int(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_minInt64-1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x01\\x00\"),\n\t\t\t\tValues: mod.Values{uint8(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_maxUint8+1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x01\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{uint8(0), uint16(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_maxUint16+1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x01\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{uint8(0), uint16(0), uint32(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_maxUint32+1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{uint8(0), uint16(0), uint32(0), uint64(0), uint(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_type_maxUint64+1\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_7_varint_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/varint\"\n)\n\nfunc TestMarshalVarIntNew(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeVarint)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.varint\",\n\t\t\tmarshal:   varint.Marshal,\n\t\t\tunmarshal: varint.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t(*int8)(nil), (*int16)(nil), (*int32)(nil), (*int64)(nil), (*int)(nil),\n\t\t\t\t\t(*uint8)(nil), (*uint16)(nil), (*uint32)(nil), (*uint64)(nil), (*uint)(nil),\n\t\t\t\t\t(*string)(nil), (*big.Int)(nil), \"\",\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: nil,\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"\", big.Int{},\n\t\t\t\t}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: make([]byte, 0),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(0), int16(0), int32(0), int64(0), int(0),\n\t\t\t\t\tuint8(0), uint16(0), uint32(0), uint64(0), uint(0),\n\t\t\t\t\t\"0\", *big.NewInt(0),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(1), int16(1), int32(1), int64(1), int(1),\n\t\t\t\t\tuint8(1), uint16(1), uint32(1), uint64(1), uint(1),\n\t\t\t\t\t\"1\", *big.NewInt(1),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(-1), int16(-1), int32(-1), int64(-1), int(-1),\n\t\t\t\t\t\"-1\", *big.NewInt(-1),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(127), int16(127), int32(127), int64(127), int(127),\n\t\t\t\t\tuint8(127), uint16(127), uint32(127), uint64(127), uint(127),\n\t\t\t\t\t\"127\", *big.NewInt(127),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint8(-128), int16(-128), int32(-128), int64(-128), int(-128),\n\t\t\t\t\t\"-128\", *big.NewInt(-128),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x80\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(128), int32(128), int64(128), int(128),\n\t\t\t\t\tuint8(128), uint16(128), uint32(128), uint64(128), uint(128),\n\t\t\t\t\t\"128\", *big.NewInt(128),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt8+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\x7f\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-129), int32(-129), int64(-129), int(-129),\n\t\t\t\t\t\"-129\", *big.NewInt(-129),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt8-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(32767), int32(32767), int64(32767), int(32767),\n\t\t\t\t\tuint16(32767), uint32(32767), uint64(32767), uint(32767),\n\t\t\t\t\t\"32767\", *big.NewInt(32767),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint16(-32768), int32(-32768), int64(-32768), int(-32768),\n\t\t\t\t\t\"-32768\", *big.NewInt(-32768),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x80\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(32768), int64(32768), int(32768),\n\t\t\t\t\tuint16(32768), uint32(32768), uint64(32768), uint(32768),\n\t\t\t\t\t\"32768\", *big.NewInt(32768),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt16+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\x7f\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(-32769), int64(-32769), int(-32769),\n\t\t\t\t\t\"-32769\", *big.NewInt(-32769),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt16-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(8388607), int64(8388607), int(8388607),\n\t\t\t\t\tuint32(8388607), uint64(8388607), uint(8388607),\n\t\t\t\t\t\"8388607\", *big.NewInt(8388607),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt24\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(-8388608), int64(-8388608), int(-8388608),\n\t\t\t\t\t\"-8388608\", *big.NewInt(-8388608),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt24\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x80\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(8388608), int(8388608),\n\t\t\t\t\tuint32(8388608), uint64(8388608), uint(8388608),\n\t\t\t\t\t\"8388608\", *big.NewInt(8388608),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt24+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\x7f\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-8388609), int(-8388609),\n\t\t\t\t\t\"-8388609\", *big.NewInt(-8388609),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt24-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(2147483647), int64(2147483647), int(2147483647),\n\t\t\t\t\tuint32(2147483647), uint64(2147483647), uint(2147483647),\n\t\t\t\t\t\"2147483647\", *big.NewInt(2147483647),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint32(-2147483648), int64(-2147483648), int(-2147483648),\n\t\t\t\t\t\"-2147483648\", *big.NewInt(-2147483648),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(2147483648), int(2147483648),\n\t\t\t\t\tuint32(2147483648), uint64(2147483648), uint(2147483648),\n\t\t\t\t\t\"2147483648\", *big.NewInt(2147483648),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt32+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\x7f\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-2147483649), int(-2147483649),\n\t\t\t\t\t\"-2147483649\", *big.NewInt(-2147483649),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt32-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(549755813887), int(549755813887),\n\t\t\t\t\tuint64(549755813887), uint(549755813887),\n\t\t\t\t\t\"549755813887\", *big.NewInt(549755813887),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt40\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-549755813888), int(-549755813888),\n\t\t\t\t\t\"-549755813888\", *big.NewInt(-549755813888),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt40\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x80\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(549755813888), int(549755813888),\n\t\t\t\t\tuint64(549755813888), uint(549755813888),\n\t\t\t\t\t\"549755813888\", *big.NewInt(549755813888),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt40+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\x7f\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-549755813889), int(-549755813889),\n\t\t\t\t\t\"-549755813889\", *big.NewInt(-549755813889),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt40-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(140737488355327), int(140737488355327),\n\t\t\t\t\tuint64(140737488355327), uint(140737488355327),\n\t\t\t\t\t\"140737488355327\", *big.NewInt(140737488355327),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt48\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-140737488355328), int(-140737488355328),\n\t\t\t\t\t\"-140737488355328\", *big.NewInt(-140737488355328),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt48\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x80\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(140737488355328), int(140737488355328),\n\t\t\t\t\tuint64(140737488355328), uint(140737488355328),\n\t\t\t\t\t\"140737488355328\", *big.NewInt(140737488355328),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt48+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-140737488355329), int(-140737488355329),\n\t\t\t\t\t\"-140737488355329\", *big.NewInt(-140737488355329),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt48-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(36028797018963967), int(36028797018963967),\n\t\t\t\t\tuint64(36028797018963967), uint(36028797018963967),\n\t\t\t\t\t\"36028797018963967\", *big.NewInt(36028797018963967),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt56\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-36028797018963968), int(-36028797018963968),\n\t\t\t\t\t\"-36028797018963968\", *big.NewInt(-36028797018963968),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt56\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(36028797018963968), int(36028797018963968),\n\t\t\t\t\tuint64(36028797018963968), uint(36028797018963968),\n\t\t\t\t\t\"36028797018963968\", *big.NewInt(36028797018963968),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt56+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-36028797018963969), int(-36028797018963969),\n\t\t\t\t\t\"-36028797018963969\", *big.NewInt(-36028797018963969),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt56-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(9223372036854775807), int(9223372036854775807),\n\t\t\t\t\tuint64(9223372036854775807), uint(9223372036854775807),\n\t\t\t\t\t\"9223372036854775807\", *big.NewInt(9223372036854775807),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tint64(-9223372036854775808), int(-9223372036854775808),\n\t\t\t\t\t\"-9223372036854775808\", *big.NewInt(-9223372036854775808),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"9223372036854775808\", *big.NewInt(0).Add(big.NewInt(1), big.NewInt(9223372036854775807)),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxInt64+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\xff\\x7f\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"-9223372036854775809\", *big.NewInt(0).Add(big.NewInt(-1), big.NewInt(-9223372036854775808)),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"minInt64-1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint8(255), uint16(255), uint32(255), uint64(255), uint(255),\n\t\t\t\t\tint16(255), int32(255), int64(255), int(255),\n\t\t\t\t\t\"255\", *big.NewInt(255),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint8\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint16(256), uint32(256), uint64(256), uint(256),\n\t\t\t\t\tint16(256), int32(256), int64(256), int(256),\n\t\t\t\t\t\"256\", *big.NewInt(256),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint8+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint16(65535), uint32(65535), uint64(65535), uint(65535),\n\t\t\t\t\tint32(65535), int64(65535), int(65535),\n\t\t\t\t\t\"65535\", *big.NewInt(65535),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint16\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(65536), uint64(65536), uint(65536),\n\t\t\t\t\tint32(65536), int64(65536), int(65536),\n\t\t\t\t\t\"65536\", *big.NewInt(65536),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint16+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(16777215), uint64(16777215), uint(16777215),\n\t\t\t\t\tint32(16777215), int64(16777215), int(16777215),\n\t\t\t\t\t\"16777215\", *big.NewInt(16777215),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint24\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(16777216), uint64(16777216), uint(16777216),\n\t\t\t\t\tint32(16777216), int64(16777216), int(16777216),\n\t\t\t\t\t\"16777216\", *big.NewInt(16777216),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint24+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint32(4294967295), uint64(4294967295), uint(4294967295),\n\t\t\t\t\tint64(4294967295), int(4294967295),\n\t\t\t\t\t\"4294967295\", *big.NewInt(4294967295),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint32\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(4294967296), uint(4294967296),\n\t\t\t\t\tint64(4294967296), int(4294967296),\n\t\t\t\t\t\"4294967296\", *big.NewInt(4294967296),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint32+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(1099511627775), uint(1099511627775),\n\t\t\t\t\tint64(1099511627775), int(1099511627775),\n\t\t\t\t\t\"1099511627775\", *big.NewInt(1099511627775),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint40\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(1099511627776), uint(1099511627776),\n\t\t\t\t\tint64(1099511627776), int(1099511627776),\n\t\t\t\t\t\"1099511627776\", *big.NewInt(1099511627776),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint40+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(281474976710655), uint(281474976710655),\n\t\t\t\t\tint64(281474976710655), int(281474976710655),\n\t\t\t\t\t\"281474976710655\", *big.NewInt(281474976710655),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint48\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(281474976710656), uint(281474976710656),\n\t\t\t\t\tint64(281474976710656), int(281474976710656),\n\t\t\t\t\t\"281474976710656\", *big.NewInt(281474976710656),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint48+1\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(72057594037927935), uint(72057594037927935),\n\t\t\t\t\tint64(72057594037927935), int(72057594037927935),\n\t\t\t\t\t\"72057594037927935\", *big.NewInt(72057594037927935),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint56\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(72057594037927936), uint(72057594037927936),\n\t\t\t\t\tint64(72057594037927936), int(72057594037927936),\n\t\t\t\t\t\"72057594037927936\", *big.NewInt(72057594037927936),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint56+1\", t, marshal, unmarshal)\n\n\t\t\tbigMaxUint64 := new(big.Int)\n\t\t\tbigMaxUint64.SetString(\"18446744073709551615\", 10)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\tuint64(18446744073709551615), uint(18446744073709551615),\n\t\t\t\t\t\"18446744073709551615\", *bigMaxUint64,\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint64\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData: []byte(\"\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{\n\t\t\t\t\t\"18446744073709551616\", *big.NewInt(0).Add(bigMaxUint64, big.NewInt(1)),\n\t\t\t\t}.AddVariants(mod.All...),\n\t\t\t}.Run(\"maxUint64+1\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_8_float_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/float\"\n)\n\nfunc TestMarshalFloatCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeFloat)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.float\",\n\t\t\tmarshal:   float.Marshal,\n\t\t\tunmarshal: float.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x80\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float32(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x80\"),\n\t\t\t\tValues: mod.Values{float32(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x80\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float32(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data2\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_8_float_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/float\"\n)\n\nfunc TestMarshalFloat(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeFloat)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.float\",\n\t\t\tmarshal:   float.Marshal,\n\t\t\tunmarshal: float.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   nil,\n\t\t\t\tValues: mod.Values{(*float32)(nil)}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   nil,\n\t\t\t\tValues: mod.Values{float32(0)}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   make([]byte, 0),\n\t\t\t\tValues: mod.Values{float32(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float32(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x7f\\x7f\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{float32(math.MaxFloat32)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"max\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\xff\\x7f\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{float32(-math.MaxFloat32)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"min\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{float32(math.SmallestNonzeroFloat32)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"smallest_pos\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x80\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{float32(-math.SmallestNonzeroFloat32)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"smallest_neg\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x7f\\x80\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float32(math.Inf(1))}.AddVariants(mod.All...),\n\t\t\t}.Run(\"inf+\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\xff\\x80\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float32(math.Inf(-1))}.AddVariants(mod.All...),\n\t\t\t}.Run(\"inf-\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x7f\\xc0\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float32(math.NaN())}.AddVariants(mod.All...),\n\t\t\t}.Run(\"nan\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x40\\x49\\x0f\\xdb\"),\n\t\t\t\tValues: mod.Values{float32(3.14159265)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"pi\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_9_double_corrupt_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/double\"\n)\n\nfunc TestMarshalDoubleCorrupt(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeDouble)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.double\",\n\t\t\tmarshal:   double.Marshal,\n\t\t\tunmarshal: double.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float64(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"big_data\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x80\"),\n\t\t\t\tValues: mod.Values{float64(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data1\", t, unmarshal)\n\n\t\t\tserialization.NegativeUnmarshalSet{\n\t\t\t\tData:   []byte(\"\\x80\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float64(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"small_data2\", t, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tests/serialization/marshal_9_duble_test.go",
    "content": "//go:build unit\n// +build unit\n\npackage serialization_test\n\nimport (\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com/gocql/gocql\"\n\t\"github.com/gocql/gocql/internal/tests/serialization\"\n\t\"github.com/gocql/gocql/internal/tests/serialization/mod\"\n\t\"github.com/gocql/gocql/serialization/double\"\n)\n\nfunc TestMarshalDouble(t *testing.T) {\n\tt.Parallel()\n\n\ttype testSuite struct {\n\t\tname      string\n\t\tmarshal   func(any) ([]byte, error)\n\t\tunmarshal func(bytes []byte, i any) error\n\t}\n\n\ttType := gocql.NewNativeType(4, gocql.TypeDouble)\n\n\ttestSuites := [2]testSuite{\n\t\t{\n\t\t\tname:      \"serialization.double\",\n\t\t\tmarshal:   double.Marshal,\n\t\t\tunmarshal: double.Unmarshal,\n\t\t},\n\t\t{\n\t\t\tname: \"glob\",\n\t\t\tmarshal: func(i any) ([]byte, error) {\n\t\t\t\treturn gocql.Marshal(tType, i)\n\t\t\t},\n\t\t\tunmarshal: func(bytes []byte, i any) error {\n\t\t\t\treturn gocql.Unmarshal(tType, bytes, i)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tSuite := range testSuites {\n\t\tmarshal := tSuite.marshal\n\t\tunmarshal := tSuite.unmarshal\n\n\t\tt.Run(tSuite.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   nil,\n\t\t\t\tValues: mod.Values{(*float64)(nil)}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]nullable\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   nil,\n\t\t\t\tValues: mod.Values{float64(0)}.AddVariants(mod.CustomType),\n\t\t\t}.Run(\"[nil]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   make([]byte, 0),\n\t\t\t\tValues: mod.Values{float64(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"[]unmarshal\", t, nil, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float64(0)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"zeros\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x7f\\xef\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{float64(math.MaxFloat64)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"max\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\xff\\xef\\xff\\xff\\xff\\xff\\xff\\xff\"),\n\t\t\t\tValues: mod.Values{float64(-math.MaxFloat64)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"min\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{float64(math.SmallestNonzeroFloat64)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"smallest_pos\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{float64(-math.SmallestNonzeroFloat64)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"smallest_neg\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x7f\\xf0\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float64(math.Inf(1))}.AddVariants(mod.All...),\n\t\t\t}.Run(\"inf+\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\xff\\xf0\\x00\\x00\\x00\\x00\\x00\\x00\"),\n\t\t\t\tValues: mod.Values{float64(math.Inf(-1))}.AddVariants(mod.All...),\n\t\t\t}.Run(\"inf-\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x7f\\xf8\\x00\\x00\\x00\\x00\\x00\\x01\"),\n\t\t\t\tValues: mod.Values{float64(math.NaN())}.AddVariants(mod.All...),\n\t\t\t}.Run(\"nan\", t, marshal, unmarshal)\n\n\t\t\tserialization.PositiveSet{\n\t\t\t\tData:   []byte(\"\\x40\\x09\\x21\\xfb\\x53\\xc8\\xd4\\xf1\"),\n\t\t\t\tValues: mod.Values{float64(3.14159265)}.AddVariants(mod.All...),\n\t\t\t}.Run(\"pi\", t, marshal, unmarshal)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "token.go",
    "content": "// Copyright (c) 2015 The gocql Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"crypto/md5\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/gocql/gocql/internal/murmur\"\n)\n\n// a token partitioner\ntype Partitioner interface {\n\tName() string\n\tHash([]byte) Token\n\tParseString(string) Token\n}\n\n// a Token\ntype Token interface {\n\tfmt.Stringer\n\tLess(Token) bool\n}\n\n// murmur3 partitioner\ntype murmur3Partitioner struct{}\n\nfunc (p murmur3Partitioner) Name() string {\n\treturn \"Murmur3Partitioner\"\n}\n\nfunc (p murmur3Partitioner) Hash(partitionKey []byte) Token {\n\th1 := murmur.Murmur3H1(partitionKey)\n\treturn int64Token(h1)\n}\n\n// murmur3 little-endian, 128-bit hash, but returns only h1\nfunc (p murmur3Partitioner) ParseString(str string) Token {\n\treturn parseInt64Token(str)\n}\n\n// int64 token\ntype int64Token int64\n\nfunc parseInt64Token(str string) int64Token {\n\tval, _ := strconv.ParseInt(str, 10, 64)\n\treturn int64Token(val)\n}\n\nfunc (m int64Token) String() string {\n\treturn strconv.FormatInt(int64(m), 10)\n}\n\nfunc (m int64Token) Less(token Token) bool {\n\treturn m < token.(int64Token)\n}\n\n// order preserving partitioner and token\ntype orderedPartitioner struct{}\ntype orderedToken string\n\nfunc (p orderedPartitioner) Name() string {\n\treturn \"OrderedPartitioner\"\n}\n\nfunc (p orderedPartitioner) Hash(partitionKey []byte) Token {\n\t// the partition key is the token\n\treturn orderedToken(partitionKey)\n}\n\nfunc (p orderedPartitioner) ParseString(str string) Token {\n\treturn orderedToken(str)\n}\n\nfunc (o orderedToken) String() string {\n\treturn string(o)\n}\n\nfunc (o orderedToken) Less(token Token) bool {\n\treturn o < token.(orderedToken)\n}\n\n// random partitioner and token\ntype randomPartitioner struct{}\ntype randomToken big.Int\n\nfunc (r randomPartitioner) Name() string {\n\treturn \"RandomPartitioner\"\n}\n\n// 2 ** 128\nvar maxHashInt, _ = new(big.Int).SetString(\"340282366920938463463374607431768211456\", 10)\n\nfunc (p randomPartitioner) Hash(partitionKey []byte) Token {\n\tsum := md5.Sum(partitionKey)\n\tval := new(big.Int)\n\tval.SetBytes(sum[:])\n\tif sum[0] > 127 {\n\t\tval.Sub(val, maxHashInt)\n\t\tval.Abs(val)\n\t}\n\n\treturn (*randomToken)(val)\n}\n\nfunc (p randomPartitioner) ParseString(str string) Token {\n\tval := new(big.Int)\n\tval.SetString(str, 10)\n\treturn (*randomToken)(val)\n}\n\nfunc (r *randomToken) String() string {\n\treturn (*big.Int)(r).String()\n}\n\nfunc (r *randomToken) Less(token Token) bool {\n\treturn -1 == (*big.Int)(r).Cmp((*big.Int)(token.(*randomToken)))\n}\n\ntype hostToken struct {\n\ttoken Token\n\thost  *HostInfo\n}\n\nfunc (ht hostToken) String() string {\n\treturn fmt.Sprintf(\"{token=%v host=%v}\", ht.token, ht.host.HostID())\n}\n\n// a data structure for organizing the relationship between tokens and hosts\ntype tokenRing struct {\n\tpartitioner Partitioner\n\n\t// tokens map token range to primary replica.\n\t// The elements in tokens are sorted by token ascending.\n\t// The range for a given item in tokens starts after preceding range and ends with the token specified in\n\t// token. The end token is part of the range.\n\t// The lowest (i.e. index 0) range wraps around the ring (its preceding range is the one with largest index).\n\ttokens []hostToken\n\n\thosts []*HostInfo\n}\n\nfunc newTokenRing(partitioner string, hosts []*HostInfo) (*tokenRing, error) {\n\ttokenRing := &tokenRing{\n\t\thosts: hosts,\n\t}\n\n\tif strings.HasSuffix(partitioner, \"Murmur3Partitioner\") {\n\t\ttokenRing.partitioner = murmur3Partitioner{}\n\t} else if strings.HasSuffix(partitioner, \"OrderedPartitioner\") {\n\t\ttokenRing.partitioner = orderedPartitioner{}\n\t} else if strings.HasSuffix(partitioner, \"RandomPartitioner\") {\n\t\ttokenRing.partitioner = randomPartitioner{}\n\t} else {\n\t\treturn nil, fmt.Errorf(\"unsupported partitioner '%s'\", partitioner)\n\t}\n\n\tfor _, host := range hosts {\n\t\tfor _, strToken := range host.Tokens() {\n\t\t\ttoken := tokenRing.partitioner.ParseString(strToken)\n\t\t\ttokenRing.tokens = append(tokenRing.tokens, hostToken{token: token, host: host})\n\t\t}\n\t}\n\n\tsort.Sort(tokenRing)\n\n\treturn tokenRing, nil\n}\n\nfunc (t *tokenRing) Len() int {\n\treturn len(t.tokens)\n}\n\nfunc (t *tokenRing) Less(i, j int) bool {\n\treturn t.tokens[i].token.Less(t.tokens[j].token)\n}\n\nfunc (t *tokenRing) Swap(i, j int) {\n\tt.tokens[i], t.tokens[j] = t.tokens[j], t.tokens[i]\n}\n\nfunc (t *tokenRing) String() string {\n\tbuf := &bytes.Buffer{}\n\tbuf.WriteString(\"TokenRing(\")\n\tif t.partitioner != nil {\n\t\tbuf.WriteString(t.partitioner.Name())\n\t}\n\tbuf.WriteString(\"){\")\n\tsep := \"\"\n\tfor i, th := range t.tokens {\n\t\tbuf.WriteString(sep)\n\t\tsep = \",\"\n\t\tbuf.WriteString(\"\\n\\t[\")\n\t\tbuf.WriteString(strconv.Itoa(i))\n\t\tbuf.WriteString(\"]\")\n\t\tbuf.WriteString(th.token.String())\n\t\tbuf.WriteString(\":\")\n\t\tbuf.WriteString(th.host.ConnectAddress().String())\n\t}\n\tbuf.WriteString(\"\\n}\")\n\treturn string(buf.Bytes())\n}\n\n// GetHostForPartitionKey finds host information for given partition key.\n//\n// It returns two tokens. First is token that exactly corresponds to the partition key (and could be used to\n// determine shard, for example), second token is the endToken that corresponds to the host.\nfunc (t *tokenRing) GetHostForPartitionKey(partitionKey []byte) (host *HostInfo, token Token, endToken Token) {\n\tif t == nil {\n\t\treturn nil, nil, nil\n\t}\n\n\ttoken = t.partitioner.Hash(partitionKey)\n\thost, endToken = t.GetHostForToken(token)\n\treturn host, token, endToken\n}\n\nfunc (t *tokenRing) GetHostForToken(token Token) (host *HostInfo, endToken Token) {\n\tif t == nil || len(t.tokens) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// find the primary replica\n\tp := sort.Search(len(t.tokens), func(i int) bool {\n\t\treturn !t.tokens[i].token.Less(token)\n\t})\n\n\tif p == len(t.tokens) {\n\t\t// wrap around to the first in the ring\n\t\tp = 0\n\t}\n\n\tv := t.tokens[p]\n\treturn v.host, v.token\n}\n"
  },
  {
    "path": "token_test.go",
    "content": "// Copyright (c) 2015 The gocql Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"net\"\n\t\"sort\"\n\t\"strconv\"\n\t\"testing\"\n)\n\n// Tests of the murmur3Patitioner\nfunc TestMurmur3Partitioner(t *testing.T) {\n\tt.Parallel()\n\n\ttoken := murmur3Partitioner{}.ParseString(\"-1053604476080545076\")\n\n\tif \"-1053604476080545076\" != token.String() {\n\t\tt.Errorf(\"Expected '-1053604476080545076' but was '%s'\", token)\n\t}\n\n\t// at least verify that the partitioner\n\t// doesn't return nil\n\tpk, _ := marshalInt(1)\n\ttoken = murmur3Partitioner{}.Hash(pk)\n\tif token == nil {\n\t\tt.Fatal(\"token was nil\")\n\t}\n}\n\n// Tests of the int64Token\nfunc TestInt64Token(t *testing.T) {\n\tt.Parallel()\n\n\tif int64Token(42).Less(int64Token(42)) {\n\t\tt.Errorf(\"Expected Less to return false, but was true\")\n\t}\n\tif !int64Token(-42).Less(int64Token(42)) {\n\t\tt.Errorf(\"Expected Less to return true, but was false\")\n\t}\n\tif int64Token(42).Less(int64Token(-42)) {\n\t\tt.Errorf(\"Expected Less to return false, but was true\")\n\t}\n}\n\n// Tests of the orderedPartitioner\nfunc TestOrderedPartitioner(t *testing.T) {\n\tt.Parallel()\n\n\t// at least verify that the partitioner\n\t// doesn't return nil\n\tp := orderedPartitioner{}\n\tpk, _ := marshalInt(1)\n\ttoken := p.Hash(pk)\n\tif token == nil {\n\t\tt.Fatal(\"token was nil\")\n\t}\n\n\tstr := token.String()\n\tparsedToken := p.ParseString(str)\n\n\tif !bytes.Equal([]byte(token.(orderedToken)), []byte(parsedToken.(orderedToken))) {\n\t\tt.Errorf(\"Failed to convert to and from a string %s expected %x but was %x\",\n\t\t\tstr,\n\t\t\t[]byte(token.(orderedToken)),\n\t\t\t[]byte(parsedToken.(orderedToken)),\n\t\t)\n\t}\n}\n\n// Tests of the orderedToken\nfunc TestOrderedToken(t *testing.T) {\n\tt.Parallel()\n\n\tif orderedToken([]byte{0, 0, 4, 2}).Less(orderedToken([]byte{0, 0, 4, 2})) {\n\t\tt.Errorf(\"Expected Less to return false, but was true\")\n\t}\n\tif !orderedToken([]byte{0, 0, 3}).Less(orderedToken([]byte{0, 0, 4, 2})) {\n\t\tt.Errorf(\"Expected Less to return true, but was false\")\n\t}\n\tif orderedToken([]byte{0, 0, 4, 2}).Less(orderedToken([]byte{0, 0, 3})) {\n\t\tt.Errorf(\"Expected Less to return false, but was true\")\n\t}\n}\n\n// Tests of the randomPartitioner\nfunc TestRandomPartitioner(t *testing.T) {\n\tt.Parallel()\n\n\t// at least verify that the partitioner\n\t// doesn't return nil\n\tp := randomPartitioner{}\n\tpk, _ := marshalInt(1)\n\ttoken := p.Hash(pk)\n\tif token == nil {\n\t\tt.Fatal(\"token was nil\")\n\t}\n\n\tstr := token.String()\n\tparsedToken := p.ParseString(str)\n\n\tif (*big.Int)(token.(*randomToken)).Cmp((*big.Int)(parsedToken.(*randomToken))) != 0 {\n\t\tt.Errorf(\"Failed to convert to and from a string %s expected %v but was %v\",\n\t\t\tstr,\n\t\t\ttoken,\n\t\t\tparsedToken,\n\t\t)\n\t}\n}\n\nfunc TestRandomPartitionerMatchesReference(t *testing.T) {\n\tt.Parallel()\n\n\t// example taken from datastax python driver\n\t//    >>> from cassandra.metadata import MD5Token\n\t//    >>> MD5Token.hash_fn(\"test\")\n\t//    12707736894140473154801792860916528374L\n\tvar p randomPartitioner\n\texpect := \"12707736894140473154801792860916528374\"\n\tactual := p.Hash([]byte(\"test\")).String()\n\tif actual != expect {\n\t\tt.Errorf(\"expected random partitioner to generate tokens in the same way as the reference\"+\n\t\t\t\" python client. Expected %s, but got %s\", expect, actual)\n\t}\n}\n\n// Tests of the randomToken\nfunc TestRandomToken(t *testing.T) {\n\tt.Parallel()\n\n\tif ((*randomToken)(big.NewInt(42))).Less((*randomToken)(big.NewInt(42))) {\n\t\tt.Errorf(\"Expected Less to return false, but was true\")\n\t}\n\tif !((*randomToken)(big.NewInt(41))).Less((*randomToken)(big.NewInt(42))) {\n\t\tt.Errorf(\"Expected Less to return true, but was false\")\n\t}\n\tif ((*randomToken)(big.NewInt(42))).Less((*randomToken)(big.NewInt(41))) {\n\t\tt.Errorf(\"Expected Less to return false, but was true\")\n\t}\n}\n\ntype intToken int\n\nfunc (i intToken) String() string        { return strconv.Itoa(int(i)) }\nfunc (i intToken) Less(token Token) bool { return i < token.(intToken) }\n\n// Test of the token ring implementation based on example at the start of this\n// page of documentation:\n// http://www.datastax.com/docs/0.8/cluster_architecture/partitioning\nfunc TestTokenRing_Int(t *testing.T) {\n\tt.Parallel()\n\n\thost0 := &HostInfo{}\n\thost25 := &HostInfo{}\n\thost50 := &HostInfo{}\n\thost75 := &HostInfo{}\n\tring := &tokenRing{\n\t\tpartitioner: nil,\n\t\t// these tokens and hosts are out of order to test sorting\n\t\ttokens: []hostToken{\n\t\t\t{intToken(0), host0},\n\t\t\t{intToken(50), host50},\n\t\t\t{intToken(75), host75},\n\t\t\t{intToken(25), host25},\n\t\t},\n\t}\n\n\tsort.Sort(ring)\n\n\tif host, endToken := ring.GetHostForToken(intToken(0)); host != host0 || endToken != intToken(0) {\n\t\tt.Error(\"Expected host 0 for token 0\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(1)); host != host25 || endToken != intToken(25) {\n\t\tt.Error(\"Expected host 25 for token 1\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(24)); host != host25 || endToken != intToken(25) {\n\t\tt.Error(\"Expected host 25 for token 24\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(25)); host != host25 || endToken != intToken(25) {\n\t\tt.Error(\"Expected host 25 for token 25\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(26)); host != host50 || endToken != intToken(50) {\n\t\tt.Error(\"Expected host 50 for token 26\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(49)); host != host50 || endToken != intToken(50) {\n\t\tt.Error(\"Expected host 50 for token 49\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(50)); host != host50 || endToken != intToken(50) {\n\t\tt.Error(\"Expected host 50 for token 50\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(51)); host != host75 || endToken != intToken(75) {\n\t\tt.Error(\"Expected host 75 for token 51\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(74)); host != host75 || endToken != intToken(75) {\n\t\tt.Error(\"Expected host 75 for token 74\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(75)); host != host75 || endToken != intToken(75) {\n\t\tt.Error(\"Expected host 75 for token 75\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(76)); host != host0 || endToken != intToken(0) {\n\t\tt.Error(\"Expected host 0 for token 76\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(99)); host != host0 || endToken != intToken(0) {\n\t\tt.Error(\"Expected host 0 for token 99\")\n\t}\n\tif host, endToken := ring.GetHostForToken(intToken(100)); host != host0 || endToken != intToken(0) {\n\t\tt.Error(\"Expected host 0 for token 100\")\n\t}\n}\n\n// Test for the behavior of a nil pointer to tokenRing\nfunc TestTokenRing_Nil(t *testing.T) {\n\tt.Parallel()\n\n\tvar ring *tokenRing = nil\n\n\tif host, endToken := ring.GetHostForToken(nil); host != nil || endToken != nil {\n\t\tt.Error(\"Expected nil for nil token ring\")\n\t}\n\tif host, token, endToken := ring.GetHostForPartitionKey(nil); host != nil || token != nil || endToken != nil {\n\t\tt.Error(\"Expected nil for nil token ring\")\n\t}\n}\n\n// Test of the recognition of the partitioner class\nfunc TestTokenRing_UnknownPartition(t *testing.T) {\n\tt.Parallel()\n\n\t_, err := newTokenRing(\"UnknownPartitioner\", nil)\n\tif err == nil {\n\t\tt.Error(\"Expected error for unknown partitioner value, but was nil\")\n\t}\n}\n\nfunc hostsForTests(n int) []*HostInfo {\n\thosts := make([]*HostInfo, n)\n\tfor i := 0; i < n; i++ {\n\t\thost := &HostInfo{\n\t\t\tconnectAddress: net.IPv4(1, 1, 1, byte(n)),\n\t\t\ttokens:         []string{fmt.Sprintf(\"%d\", n)},\n\t\t}\n\n\t\thosts[i] = host\n\t}\n\treturn hosts\n}\n\n// Test of the tokenRing with the Murmur3Partitioner\nfunc TestTokenRing_Murmur3(t *testing.T) {\n\tt.Parallel()\n\n\t// Note, strings are parsed directly to int64, they are not murmur3 hashed\n\thosts := hostsForTests(4)\n\tring, err := newTokenRing(\"Murmur3Partitioner\", hosts)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create token ring due to error: %v\", err)\n\t}\n\n\tp := murmur3Partitioner{}\n\n\tfor _, host := range hosts {\n\t\tactual, _ := ring.GetHostForToken(p.ParseString(host.tokens[0]))\n\t\tif !actual.ConnectAddress().Equal(host.ConnectAddress()) {\n\t\t\tt.Errorf(\"Expected address %v for token %q, but was %v\", host.ConnectAddress(),\n\t\t\t\thost.tokens[0], actual.ConnectAddress())\n\t\t}\n\t}\n\n\tactual, _ := ring.GetHostForToken(p.ParseString(\"12\"))\n\tif !actual.ConnectAddress().Equal(hosts[1].ConnectAddress()) {\n\t\tt.Errorf(\"Expected address 1 for token \\\"12\\\", but was %s\", actual.ConnectAddress())\n\t}\n\n\tactual, _ = ring.GetHostForToken(p.ParseString(\"24324545443332\"))\n\tif !actual.ConnectAddress().Equal(hosts[0].ConnectAddress()) {\n\t\tt.Errorf(\"Expected address 0 for token \\\"24324545443332\\\", but was %s\", actual.ConnectAddress())\n\t}\n}\n\n// Test of the tokenRing with the OrderedPartitioner\nfunc TestTokenRing_Ordered(t *testing.T) {\n\tt.Parallel()\n\n\t// Tokens here more or less are similar layout to the int tokens above due\n\t// to each numeric character translating to a consistently offset byte.\n\thosts := hostsForTests(4)\n\tring, err := newTokenRing(\"OrderedPartitioner\", hosts)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create token ring due to error: %v\", err)\n\t}\n\n\tp := orderedPartitioner{}\n\n\tvar actual *HostInfo\n\tfor _, host := range hosts {\n\t\tactual, _ := ring.GetHostForToken(p.ParseString(host.tokens[0]))\n\t\tif !actual.ConnectAddress().Equal(host.ConnectAddress()) {\n\t\t\tt.Errorf(\"Expected address %v for token %q, but was %v\", host.ConnectAddress(),\n\t\t\t\thost.tokens[0], actual.ConnectAddress())\n\t\t}\n\t}\n\n\tactual, _ = ring.GetHostForToken(p.ParseString(\"12\"))\n\tif !actual.peer.Equal(hosts[1].peer) {\n\t\tt.Errorf(\"Expected address 1 for token \\\"12\\\", but was %s\", actual.ConnectAddress())\n\t}\n\n\tactual, _ = ring.GetHostForToken(p.ParseString(\"24324545443332\"))\n\tif !actual.ConnectAddress().Equal(hosts[1].ConnectAddress()) {\n\t\tt.Errorf(\"Expected address 1 for token \\\"24324545443332\\\", but was %s\", actual.ConnectAddress())\n\t}\n}\n\n// Test of the tokenRing with the RandomPartitioner\nfunc TestTokenRing_Random(t *testing.T) {\n\tt.Parallel()\n\n\t// String tokens are parsed into big.Int in base 10\n\thosts := hostsForTests(4)\n\tring, err := newTokenRing(\"RandomPartitioner\", hosts)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create token ring due to error: %v\", err)\n\t}\n\n\tp := randomPartitioner{}\n\n\tvar actual *HostInfo\n\tfor _, host := range hosts {\n\t\tactual, _ := ring.GetHostForToken(p.ParseString(host.tokens[0]))\n\t\tif !actual.ConnectAddress().Equal(host.ConnectAddress()) {\n\t\t\tt.Errorf(\"Expected address %v for token %q, but was %v\", host.ConnectAddress(),\n\t\t\t\thost.tokens[0], actual.ConnectAddress())\n\t\t}\n\t}\n\n\tactual, _ = ring.GetHostForToken(p.ParseString(\"12\"))\n\tif !actual.peer.Equal(hosts[1].peer) {\n\t\tt.Errorf(\"Expected address 1 for token \\\"12\\\", but was %s\", actual.ConnectAddress())\n\t}\n\n\tactual, _ = ring.GetHostForToken(p.ParseString(\"24324545443332\"))\n\tif !actual.ConnectAddress().Equal(hosts[0].ConnectAddress()) {\n\t\tt.Errorf(\"Expected address 1 for token \\\"24324545443332\\\", but was %s\", actual.ConnectAddress())\n\t}\n}\n"
  },
  {
    "path": "topology.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype hostTokens struct {\n\t// token is end (inclusive) of token range these hosts belong to\n\ttoken Token\n\thosts []*HostInfo\n}\n\n// tokenRingReplicas maps token ranges to list of replicas.\n// The elements in tokenRingReplicas are sorted by token ascending.\n// The range for a given item in tokenRingReplicas starts after preceding range and ends with the token specified in\n// token. The end token is part of the range.\n// The lowest (i.e. index 0) range wraps around the ring (its preceding range is the one with largest index).\ntype tokenRingReplicas []hostTokens\n\nfunc (h tokenRingReplicas) Less(i, j int) bool { return h[i].token.Less(h[j].token) }\nfunc (h tokenRingReplicas) Len() int           { return len(h) }\nfunc (h tokenRingReplicas) Swap(i, j int)      { h[i], h[j] = h[j], h[i] }\n\nfunc (h tokenRingReplicas) replicasFor(t Token) *hostTokens {\n\tif len(h) == 0 {\n\t\treturn nil\n\t}\n\n\tp := sort.Search(len(h), func(i int) bool {\n\t\treturn !h[i].token.Less(t)\n\t})\n\n\tif p >= len(h) {\n\t\t// rollover\n\t\tp = 0\n\t}\n\n\treturn &h[p]\n}\n\ntype placementStrategy interface {\n\treplicaMap(tokenRing *tokenRing) tokenRingReplicas\n\treplicationFactor(dc string) int\n}\n\nfunc getReplicationFactorFromOpts(val any) (int, error) {\n\tswitch v := val.(type) {\n\tcase int:\n\t\tif v < 0 {\n\t\t\treturn 0, fmt.Errorf(\"invalid replication_factor %d\", v)\n\t\t}\n\t\treturn v, nil\n\tcase string:\n\t\tn, err := strconv.Atoi(v)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"invalid replication_factor %q: %v\", v, err)\n\t\t} else if n < 0 {\n\t\t\treturn 0, fmt.Errorf(\"invalid replication_factor %d\", n)\n\t\t}\n\t\treturn n, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unknown replication_factor type %T\", v)\n\t}\n}\n\nfunc getStrategy(ks *KeyspaceMetadata, logger StdLogger) placementStrategy {\n\tswitch {\n\tcase strings.Contains(ks.StrategyClass, \"SimpleStrategy\"):\n\t\trf, err := getReplicationFactorFromOpts(ks.StrategyOptions[\"replication_factor\"])\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"parse rf for keyspace %q: %v\", ks.Name, err)\n\t\t\treturn nil\n\t\t}\n\t\treturn &simpleStrategy{rf: rf}\n\tcase strings.Contains(ks.StrategyClass, \"NetworkTopologyStrategy\"):\n\t\tdcs := make(map[string]int)\n\t\tfor dc, rf := range ks.StrategyOptions {\n\t\t\tif dc == \"class\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\trf, err := getReplicationFactorFromOpts(rf)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"parse rf for keyspace %q, dc %q: %v\", ks.Name, dc, err)\n\t\t\t\t// skip DC if the rf is invalid/unsupported, so that we can at least work with other working DCs.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdcs[dc] = rf\n\t\t}\n\t\treturn &networkTopology{dcs: dcs}\n\tcase strings.Contains(ks.StrategyClass, \"LocalStrategy\"):\n\t\treturn nil\n\tdefault:\n\t\tlogger.Printf(\"parse rf for keyspace %q: unsupported strategy class: %v\", ks.Name, ks.StrategyClass)\n\t\treturn nil\n\t}\n}\n\ntype simpleStrategy struct {\n\trf int\n}\n\nfunc (s *simpleStrategy) replicationFactor(dc string) int {\n\treturn s.rf\n}\n\nfunc (s *simpleStrategy) replicaMap(tokenRing *tokenRing) tokenRingReplicas {\n\ttokens := tokenRing.tokens\n\tring := make(tokenRingReplicas, len(tokens))\n\n\tfor i, th := range tokens {\n\t\treplicas := make([]*HostInfo, 0, s.rf)\n\t\tseen := make(map[*HostInfo]bool)\n\n\t\tfor j := 0; j < len(tokens) && len(replicas) < s.rf; j++ {\n\t\t\th := tokens[(i+j)%len(tokens)]\n\t\t\tif !seen[h.host] {\n\t\t\t\treplicas = append(replicas, h.host)\n\t\t\t\tseen[h.host] = true\n\t\t\t}\n\t\t}\n\n\t\tring[i] = hostTokens{token: th.token, hosts: replicas}\n\t}\n\n\tsort.Sort(ring)\n\n\treturn ring\n}\n\ntype networkTopology struct {\n\tdcs map[string]int\n}\n\nfunc (n *networkTopology) replicationFactor(dc string) int {\n\treturn n.dcs[dc]\n}\n\nfunc (n *networkTopology) haveRF(replicaCounts map[string]int) bool {\n\tif len(replicaCounts) != len(n.dcs) {\n\t\treturn false\n\t}\n\n\tfor dc, rf := range n.dcs {\n\t\tif rf != replicaCounts[dc] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc (n *networkTopology) replicaMap(tokenRing *tokenRing) tokenRingReplicas {\n\tdcRacks := make(map[string]map[string]struct{}, len(n.dcs))\n\t// skipped hosts in a dc\n\tskipped := make(map[string][]*HostInfo, len(n.dcs))\n\t// number of replicas per dc\n\treplicasInDC := make(map[string]int, len(n.dcs))\n\t// dc -> racks\n\tseenDCRacks := make(map[string]map[string]struct{}, len(n.dcs))\n\n\tfor _, h := range tokenRing.hosts {\n\t\tdc := h.DataCenter()\n\t\track := h.Rack()\n\n\t\tracks, ok := dcRacks[dc]\n\t\tif !ok {\n\t\t\tracks = make(map[string]struct{})\n\t\t\tdcRacks[dc] = racks\n\t\t}\n\t\tracks[rack] = struct{}{}\n\t}\n\n\tfor dc, racks := range dcRacks {\n\t\treplicasInDC[dc] = 0\n\t\tseenDCRacks[dc] = make(map[string]struct{}, len(racks))\n\t}\n\n\ttokens := tokenRing.tokens\n\treplicaRing := make(tokenRingReplicas, 0, len(tokens))\n\n\tvar totalRF int\n\tfor _, rf := range n.dcs {\n\t\ttotalRF += rf\n\t}\n\n\tfor i, th := range tokenRing.tokens {\n\t\tif rf := n.dcs[th.host.DataCenter()]; rf == 0 {\n\t\t\t// skip this token since no replica in this datacenter.\n\t\t\tcontinue\n\t\t}\n\n\t\tfor k, v := range skipped {\n\t\t\tskipped[k] = v[:0]\n\t\t}\n\n\t\tfor dc := range n.dcs {\n\t\t\treplicasInDC[dc] = 0\n\t\t\tfor rack := range seenDCRacks[dc] {\n\t\t\t\tdelete(seenDCRacks[dc], rack)\n\t\t\t}\n\t\t}\n\n\t\treplicas := make([]*HostInfo, 0, totalRF)\n\t\tfor j := 0; j < len(tokens) && (len(replicas) < totalRF && !n.haveRF(replicasInDC)); j++ {\n\t\t\t// TODO: ensure we dont add the same host twice\n\t\t\tp := i + j\n\t\t\tif p >= len(tokens) {\n\t\t\t\tp -= len(tokens)\n\t\t\t}\n\t\t\th := tokens[p].host\n\n\t\t\tdc := h.DataCenter()\n\t\t\track := h.Rack()\n\n\t\t\trf := n.dcs[dc]\n\t\t\tif rf == 0 {\n\t\t\t\t// skip this DC, dont know about it or replication factor is zero\n\t\t\t\tcontinue\n\t\t\t} else if replicasInDC[dc] >= rf {\n\t\t\t\tif replicasInDC[dc] > rf {\n\t\t\t\t\tpanic(fmt.Sprintf(\"replica overflow. rf=%d have=%d in dc %q\", rf, replicasInDC[dc], dc))\n\t\t\t\t}\n\n\t\t\t\t// have enough replicas in this DC\n\t\t\t\tcontinue\n\t\t\t} else if _, ok := dcRacks[dc][rack]; !ok {\n\t\t\t\t// dont know about this rack\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tracks := seenDCRacks[dc]\n\t\t\tif _, ok := racks[rack]; ok && len(racks) == len(dcRacks[dc]) {\n\t\t\t\t// we have been through all the racks and dont have RF yet, add this\n\t\t\t\treplicas = append(replicas, h)\n\t\t\t\treplicasInDC[dc]++\n\t\t\t} else if !ok {\n\t\t\t\tif racks == nil {\n\t\t\t\t\tracks = make(map[string]struct{}, 1)\n\t\t\t\t\tseenDCRacks[dc] = racks\n\t\t\t\t}\n\n\t\t\t\t// new rack\n\t\t\t\tracks[rack] = struct{}{}\n\t\t\t\treplicas = append(replicas, h)\n\t\t\t\tr := replicasInDC[dc] + 1\n\n\t\t\t\tif len(racks) == len(dcRacks[dc]) {\n\t\t\t\t\t// if we have been through all the racks, drain the rest of the skipped\n\t\t\t\t\t// hosts until we have RF. The next iteration will skip in the block\n\t\t\t\t\t// above\n\t\t\t\t\tskippedHosts := skipped[dc]\n\t\t\t\t\tvar k int\n\t\t\t\t\tfor ; k < len(skippedHosts) && r+k < rf; k++ {\n\t\t\t\t\t\tsh := skippedHosts[k]\n\t\t\t\t\t\treplicas = append(replicas, sh)\n\t\t\t\t\t}\n\t\t\t\t\tr += k\n\t\t\t\t\tskipped[dc] = skippedHosts[k:]\n\t\t\t\t}\n\t\t\t\treplicasInDC[dc] = r\n\t\t\t} else {\n\t\t\t\t// already seen this rack, keep hold of this host incase\n\t\t\t\t// we dont get enough for rf\n\t\t\t\tskipped[dc] = append(skipped[dc], h)\n\t\t\t}\n\t\t}\n\n\t\tif len(replicas) == 0 {\n\t\t\tpanic(fmt.Sprintf(\"no replicas for token: %v\", th.token))\n\t\t} else if !replicas[0].Equal(th.host) {\n\t\t\tpanic(fmt.Sprintf(\"first replica is not the primary replica for the token: expected %v got %v\", replicas[0].ConnectAddress(), th.host.ConnectAddress()))\n\t\t}\n\n\t\treplicaRing = append(replicaRing, hostTokens{token: th.token, hosts: replicas})\n\t}\n\n\tdcsWithReplicas := 0\n\tfor _, dc := range n.dcs {\n\t\tif dc > 0 {\n\t\t\tdcsWithReplicas++\n\t\t}\n\t}\n\n\tif dcsWithReplicas == len(dcRacks) && len(replicaRing) != len(tokens) {\n\t\tpanic(fmt.Sprintf(\"token map different size to token ring: got %d expected %d\", len(replicaRing), len(tokens)))\n\t}\n\n\treturn replicaRing\n}\n"
  },
  {
    "path": "topology_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc TestPlacementStrategy_SimpleStrategy(t *testing.T) {\n\tt.Parallel()\n\n\thost0 := &HostInfo{hostId: tUUID(0)}\n\thost25 := &HostInfo{hostId: tUUID(25)}\n\thost50 := &HostInfo{hostId: tUUID(50)}\n\thost75 := &HostInfo{hostId: tUUID(75)}\n\n\ttokens := []hostToken{\n\t\t{intToken(0), host0},\n\t\t{intToken(25), host25},\n\t\t{intToken(50), host50},\n\t\t{intToken(75), host75},\n\t}\n\n\thosts := []*HostInfo{host0, host25, host50, host75}\n\n\tstrat := &simpleStrategy{rf: 2}\n\ttokenReplicas := strat.replicaMap(&tokenRing{hosts: hosts, tokens: tokens})\n\tif len(tokenReplicas) != len(tokens) {\n\t\tt.Fatalf(\"expected replica map to have %d items but has %d\", len(tokens), len(tokenReplicas))\n\t}\n\n\tfor _, replicas := range tokenReplicas {\n\t\tif len(replicas.hosts) != strat.rf {\n\t\t\tt.Errorf(\"expected to have %d replicas got %d for token=%v\", strat.rf, len(replicas.hosts), replicas.token)\n\t\t}\n\t}\n\n\tfor i, token := range tokens {\n\t\tht := tokenReplicas.replicasFor(token.token)\n\t\tif ht.token != token.token {\n\t\t\tt.Errorf(\"token %v not in replica map: %v\", token, ht.hosts)\n\t\t}\n\n\t\tfor j, replica := range ht.hosts {\n\t\t\texp := tokens[(i+j)%len(tokens)].host\n\t\t\tif exp != replica {\n\t\t\t\tt.Errorf(\"expected host %v to be a replica of %v got %v\", exp.hostId, token, replica.hostId)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestPlacementStrategy_NetworkStrategy(t *testing.T) {\n\tt.Parallel()\n\n\tconst (\n\t\ttotalDCs   = 3\n\t\tracksPerDC = 3\n\t\thostsPerDC = 5\n\t)\n\n\ttests := []struct {\n\t\tname                   string\n\t\tstrat                  *networkTopology\n\t\texpectedReplicaMapSize int\n\t}{\n\t\t{\n\t\t\tname: \"full\",\n\t\t\tstrat: &networkTopology{\n\t\t\t\tdcs: map[string]int{\n\t\t\t\t\t\"dc1\": 1,\n\t\t\t\t\t\"dc2\": 2,\n\t\t\t\t\t\"dc3\": 3,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedReplicaMapSize: hostsPerDC * totalDCs,\n\t\t},\n\t\t{\n\t\t\tname: \"missing\",\n\t\t\tstrat: &networkTopology{\n\t\t\t\tdcs: map[string]int{\n\t\t\t\t\t\"dc2\": 2,\n\t\t\t\t\t\"dc3\": 3,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedReplicaMapSize: hostsPerDC * 2,\n\t\t},\n\t\t{\n\t\t\tname: \"zero\",\n\t\t\tstrat: &networkTopology{\n\t\t\t\tdcs: map[string]int{\n\t\t\t\t\t\"dc1\": 0,\n\t\t\t\t\t\"dc2\": 2,\n\t\t\t\t\t\"dc3\": 3,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedReplicaMapSize: hostsPerDC * 2,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tvar (\n\t\t\t\thosts  []*HostInfo\n\t\t\t\ttokens []hostToken\n\t\t\t)\n\t\t\tdcRing := make(map[string][]hostToken, totalDCs)\n\t\t\thostIdx := 0\n\t\t\tfor i := 0; i < totalDCs; i++ {\n\t\t\t\tvar dcTokens []hostToken\n\t\t\t\tdc := fmt.Sprintf(\"dc%d\", i+1)\n\n\t\t\t\tfor j := 0; j < hostsPerDC; j++ {\n\t\t\t\t\track := fmt.Sprintf(\"rack%d\", (j%racksPerDC)+1)\n\t\t\t\t\ttokenStr := fmt.Sprintf(\"%s:%s:%d\", dc, rack, j)\n\n\t\t\t\t\th := &HostInfo{hostId: tUUID(hostIdx), dataCenter: dc, rack: rack}\n\t\t\t\t\thostIdx++\n\n\t\t\t\t\ttoken := hostToken{\n\t\t\t\t\t\ttoken: orderedToken(tokenStr),\n\t\t\t\t\t\thost:  h,\n\t\t\t\t\t}\n\n\t\t\t\t\ttokens = append(tokens, token)\n\t\t\t\t\tdcTokens = append(dcTokens, token)\n\n\t\t\t\t\thosts = append(hosts, h)\n\t\t\t\t}\n\n\t\t\t\tsort.Sort(&tokenRing{tokens: dcTokens})\n\t\t\t\tdcRing[dc] = dcTokens\n\t\t\t}\n\n\t\t\tif len(tokens) != hostsPerDC*totalDCs {\n\t\t\t\tt.Fatalf(\"expected %d tokens in the ring got %d\", hostsPerDC*totalDCs, len(tokens))\n\t\t\t}\n\t\t\tsort.Sort(&tokenRing{tokens: tokens})\n\n\t\t\tvar expReplicas int\n\t\t\tfor _, rf := range test.strat.dcs {\n\t\t\t\texpReplicas += rf\n\t\t\t}\n\n\t\t\ttokenReplicas := test.strat.replicaMap(&tokenRing{hosts: hosts, tokens: tokens})\n\t\t\tif len(tokenReplicas) != test.expectedReplicaMapSize {\n\t\t\t\tt.Fatalf(\"expected replica map to have %d items but has %d\", test.expectedReplicaMapSize,\n\t\t\t\t\tlen(tokenReplicas))\n\t\t\t}\n\t\t\tif !sort.IsSorted(tokenReplicas) {\n\t\t\t\tt.Fatal(\"replica map was not sorted by token\")\n\t\t\t}\n\n\t\t\tfor token, replicas := range tokenReplicas {\n\t\t\t\tif len(replicas.hosts) != expReplicas {\n\t\t\t\t\tt.Fatalf(\"expected to have %d replicas got %d for token=%v\", expReplicas, len(replicas.hosts), token)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor dc, rf := range test.strat.dcs {\n\t\t\t\tif rf == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdcTokens := dcRing[dc]\n\t\t\t\tfor i, th := range dcTokens {\n\t\t\t\t\ttoken := th.token\n\t\t\t\t\tallReplicas := tokenReplicas.replicasFor(token)\n\t\t\t\t\tif allReplicas.token != token {\n\t\t\t\t\t\tt.Fatalf(\"token %v not in replica map\", token)\n\t\t\t\t\t}\n\n\t\t\t\t\tvar replicas []*HostInfo\n\t\t\t\t\tfor _, replica := range allReplicas.hosts {\n\t\t\t\t\t\tif replica.dataCenter == dc {\n\t\t\t\t\t\t\treplicas = append(replicas, replica)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(replicas) != rf {\n\t\t\t\t\t\tt.Fatalf(\"expected %d replicas in dc %q got %d\", rf, dc, len(replicas))\n\t\t\t\t\t}\n\n\t\t\t\t\tvar lastRack string\n\t\t\t\t\tfor j, replica := range replicas {\n\t\t\t\t\t\t// expected is in the next rack\n\t\t\t\t\t\tvar exp *HostInfo\n\t\t\t\t\t\tif lastRack == \"\" {\n\t\t\t\t\t\t\t// primary, first replica\n\t\t\t\t\t\t\texp = dcTokens[(i+j)%len(dcTokens)].host\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tfor k := 0; k < len(dcTokens); k++ {\n\t\t\t\t\t\t\t\t// walk around the ring from i + j to find the next host the\n\t\t\t\t\t\t\t\t// next rack\n\t\t\t\t\t\t\t\tp := (i + j + k) % len(dcTokens)\n\t\t\t\t\t\t\t\th := dcTokens[p].host\n\t\t\t\t\t\t\t\tif h.rack != lastRack {\n\t\t\t\t\t\t\t\t\texp = h\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif exp.rack == lastRack {\n\t\t\t\t\t\t\t\tt.Fatal(\"no more racks\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlastRack = replica.rack\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "tracer.go",
    "content": "package gocql\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n)\n\n// Tracer is the interface implemented by query tracers. Tracers have the\n// ability to obtain a detailed event log of all events that happened during\n// the execution of a query from Cassandra. Gathering this information might\n// be essential for debugging and optimizing queries, but this feature should\n// not be used on production systems with very high load.\ntype Tracer interface {\n\tTrace(traceId []byte)\n}\n\ntype TraceWriter struct {\n\tsession *Session\n\tw       io.Writer\n\tmu      sync.Mutex\n\n\tmaxAttempts   int\n\tsleepInterval time.Duration\n}\n\n// NewTraceWriter returns a simple Tracer implementation that outputs\n// the event log in a textual format.\nfunc NewTraceWriter(session *Session, w io.Writer) *TraceWriter {\n\treturn &TraceWriter{session: session, w: w, maxAttempts: 5, sleepInterval: 3 * time.Millisecond}\n}\n\nfunc (t *TraceWriter) SetMaxAttempts(maxAttempts int) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tt.maxAttempts = maxAttempts\n}\n\nfunc (t *TraceWriter) SetSleepInterval(sleepInterval time.Duration) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tt.sleepInterval = sleepInterval\n}\n\nfunc (t *TraceWriter) Trace(traceId []byte) {\n\tvar (\n\t\ttimestamp time.Time\n\t\tactivity  string\n\t\tsource    string\n\t\telapsed   int\n\t\tthread    string\n\t)\n\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tfetchAttempts := 1\n\tif t.maxAttempts > 0 {\n\t\tfetchAttempts = t.maxAttempts\n\t}\n\n\tisDone := false\n\tfor i := 0; i < fetchAttempts; i++ {\n\t\tvar duration int\n\n\t\titer := t.session.control.querySystem(`SELECT duration\n\t\t\tFROM system_traces.sessions\n\t\t\tWHERE session_id = ?`, traceId)\n\t\titer.Scan(&duration)\n\t\tif duration > 0 {\n\t\t\tisDone = true\n\t\t}\n\n\t\tif err := iter.Close(); err != nil {\n\t\t\tfmt.Fprintln(t.w, \"Error:\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif isDone || i == fetchAttempts-1 {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(t.sleepInterval)\n\t}\n\tif !isDone {\n\t\tfmt.Fprintln(t.w, \"Error: failed to wait tracing to complete. !!! Tracing is incomplete !!!\")\n\t}\n\n\tvar (\n\t\tcoordinator string\n\t\tduration    int\n\t)\n\n\titer := t.session.control.querySystem(`SELECT coordinator, duration\n\t\tFROM system_traces.sessions\n\t\tWHERE session_id = ?`, traceId)\n\n\titer.Scan(&coordinator, &duration)\n\tif err := iter.Close(); err != nil {\n\t\tfmt.Fprintln(t.w, \"Error:\", err)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(t.w, \"Tracing session %016x (coordinator: %s, duration: %v):\\n\",\n\t\ttraceId, coordinator, time.Duration(duration)*time.Microsecond)\n\n\titer = t.session.control.querySystem(`SELECT event_id, activity, source, source_elapsed, thread\n\t\t\tFROM system_traces.events\n\t\t\tWHERE session_id = ?`, traceId)\n\n\tfor iter.Scan(&timestamp, &activity, &source, &elapsed, &thread) {\n\t\tfmt.Fprintf(t.w, \"%s: %s [%s] (source: %s, elapsed: %d)\\n\",\n\t\t\ttimestamp.Format(\"2006/01/02 15:04:05.999999\"), activity, thread, source, elapsed)\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\tfmt.Fprintln(t.w, \"Error:\", err)\n\t}\n}\n\ntype TracerEnhanced struct {\n\tsession  *Session\n\ttraceIDs [][]byte\n\tmu       sync.Mutex\n}\n\nfunc NewTracer(session *Session) *TracerEnhanced {\n\treturn &TracerEnhanced{session: session}\n}\n\nfunc (t *TracerEnhanced) Trace(traceId []byte) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tt.traceIDs = append(t.traceIDs, traceId)\n}\n\nfunc (t *TracerEnhanced) AllTraceIDs() [][]byte {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\treturn t.traceIDs\n}\n\nfunc (t *TracerEnhanced) IsReady(traceId []byte) (bool, error) {\n\tisDone := false\n\tvar duration int\n\n\titer := t.session.control.querySystem(`SELECT duration\n\t\tFROM system_traces.sessions\n\t\tWHERE session_id = ?`, traceId)\n\titer.Scan(&duration)\n\tif duration > 0 {\n\t\tisDone = true\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\treturn false, err\n\t}\n\n\tif isDone {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\nfunc (t *TracerEnhanced) GetCoordinatorTime(traceId []byte) (string, time.Duration, error) {\n\tvar (\n\t\tcoordinator string\n\t\tduration    int\n\t)\n\n\titer := t.session.control.querySystem(`SELECT coordinator, duration\n\t\tFROM system_traces.sessions\n\t\tWHERE session_id = ?`, traceId)\n\n\titer.Scan(&coordinator, &duration)\n\tif err := iter.Close(); err != nil {\n\t\treturn coordinator, time.Duration(duration) * time.Microsecond, err\n\t}\n\n\treturn coordinator, time.Duration(duration) * time.Microsecond, nil\n}\n\ntype TraceEntry struct {\n\tTimestamp time.Time\n\tActivity  string\n\tSource    string\n\tThread    string\n\tElapsed   int\n}\n\nfunc (t *TracerEnhanced) GetActivities(traceId []byte) ([]TraceEntry, error) {\n\titer := t.session.control.querySystem(`SELECT event_id, activity, source, source_elapsed, thread\n\t\tFROM system_traces.events\n\t\tWHERE session_id = ?`, traceId)\n\n\tvar (\n\t\ttimestamp time.Time\n\t\tactivity  string\n\t\tsource    string\n\t\telapsed   int\n\t\tthread    string\n\t)\n\n\tvar activities []TraceEntry\n\n\tfor iter.Scan(&timestamp, &activity, &source, &elapsed, &thread) {\n\t\tactivities = append(activities, TraceEntry{Timestamp: timestamp, Activity: activity, Source: source, Elapsed: elapsed, Thread: thread})\n\t}\n\n\tif err := iter.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn activities, nil\n}\n"
  },
  {
    "path": "tracer_test.go",
    "content": "//go:build integration\n// +build integration\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestTracingNewAPI(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\tif err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (id int primary key)`, table)); err != nil {\n\t\tt.Fatal(\"create:\", err)\n\t}\n\n\ttrace := NewTracer(session)\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id) VALUES (?)`, table), 42).Trace(trace).Exec(); err != nil {\n\t\tt.Fatal(\"insert:\", err)\n\t}\n\n\tvar value int\n\tif err := session.Query(fmt.Sprintf(`SELECT id FROM %s WHERE id = ?`, table), 42).Trace(trace).Scan(&value); err != nil {\n\t\tt.Fatal(\"select:\", err)\n\t} else if value != 42 {\n\t\tt.Fatalf(\"value: expected %d, got %d\", 42, value)\n\t}\n\n\tfor _, traceID := range trace.AllTraceIDs() {\n\t\tvar (\n\t\t\tisReady bool\n\t\t\terr     error\n\t\t)\n\t\tfor !isReady {\n\t\t\tisReady, err = trace.IsReady(traceID)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Error: \", err)\n\t\t\t}\n\t\t}\n\t\tactivities, err := trace.GetActivities(traceID)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tcoordinator, _, err := trace.GetCoordinatorTime(traceID)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif len(activities) == 0 {\n\t\t\tt.Fatal(\"Failed to obtain any tracing for tradeID: \", traceID)\n\t\t} else if coordinator == \"\" {\n\t\t\tt.Fatal(\"Failed to obtain coordinator for traceID: \", traceID)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "tuple_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestTupleSimple(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid int,\n\t\tcoord frozen<tuple<int, int>>,\n\n\t\tprimary key(id))`, table))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, coord) VALUES(?, (?, ?))\", table), 1, 100, -100).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar (\n\t\tid    int\n\t\tcoord struct {\n\t\t\tx int\n\t\t\ty int\n\t\t}\n\t)\n\n\titer := session.Query(fmt.Sprintf(\"SELECT id, coord FROM %s WHERE id=?\", table), 1)\n\tif err := iter.Scan(&id, &coord.x, &coord.y); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif id != 1 {\n\t\tt.Errorf(\"expected to get id=1 got: %v\", id)\n\t} else if coord.x != 100 {\n\t\tt.Errorf(\"expected to get coord.x=100 got: %v\", coord.x)\n\t} else if coord.y != -100 {\n\t\tt.Errorf(\"expected to get coord.y=-100 got: %v\", coord.y)\n\t}\n\n}\n\nfunc TestTuple_NullTuple(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid int,\n\t\tcoord frozen<tuple<int, int>>,\n\n\t\tprimary key(id))`, table))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tconst id = 1\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, coord) VALUES(?, (?, ?))\", table), id, nil, nil).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tx := new(int)\n\ty := new(int)\n\titer := session.Query(fmt.Sprintf(\"SELECT coord FROM %s WHERE id=?\", table), id)\n\tif err := iter.Scan(&x, &y); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif x != nil {\n\t\tt.Fatalf(\"should be nil got %+#v\", x)\n\t} else if y != nil {\n\t\tt.Fatalf(\"should be nil got %+#v\", y)\n\t}\n\n}\n\nfunc TestTuple_TupleNotSet(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid int,\n\t\tcoord frozen<tuple<int, int>>,\n\n\t\tprimary key(id))`, table))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tconst id = 1\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id,coord) VALUES(?, (?,?))\", table), id, 1, 2).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id) VALUES(?)\", table), id+1).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tx := new(int)\n\ty := new(int)\n\titer := session.Query(fmt.Sprintf(\"SELECT coord FROM %s WHERE id=?\", table), id)\n\tif err := iter.Scan(x, y); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif *x != 1 {\n\t\tt.Fatalf(\"x should be %d got %+#v, value=%d\", 1, x, *x)\n\t}\n\tif *y != 2 {\n\t\tt.Fatalf(\"y should be %d got %+#v, value=%d\", 2, y, *y)\n\t}\n\n\t// Check if the supplied targets are reset to nil\n\titer = session.Query(fmt.Sprintf(\"SELECT coord FROM %s WHERE id=?\", table), id+1)\n\tif err := iter.Scan(x, y); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif *x != 0 {\n\t\tt.Fatalf(\"x should be %d got %+#v, value=%d\", 0, x, *x)\n\t}\n\tif *y != 0 {\n\t\tt.Fatalf(\"y should be %d got %+#v, value=%d\", 0, y, *y)\n\t}\n}\n\nfunc TestTupleMapScan(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid int,\n\t\tval frozen<tuple<int, int>>,\n\n\t\tprimary key(id))`, table))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id, val) VALUES (1, (1, 2));`, table)).Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tm := make(map[string]any)\n\terr = session.Query(fmt.Sprintf(`SELECT * FROM %s`, table)).MapScan(m)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif m[\"val[0]\"] != 1 {\n\t\tt.Fatalf(\"expacted val[0] to be %d but was %d\", 1, m[\"val[0]\"])\n\t}\n\tif m[\"val[1]\"] != 2 {\n\t\tt.Fatalf(\"expacted val[1] to be %d but was %d\", 2, m[\"val[1]\"])\n\t}\n}\n\nfunc TestTupleMapScanNil(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\t\tid int,\n\t\t\tval frozen<tuple<int, int>>,\n\n\t\t\tprimary key(id))`, table))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id, val) VALUES (?,(?,?));`, table), 1, nil, nil).Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tm := make(map[string]any)\n\terr = session.Query(fmt.Sprintf(`SELECT * FROM %s`, table)).MapScan(m)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif m[\"val[0]\"] != 0 {\n\t\tt.Fatalf(\"expacted val[0] to be %d but was %d\", 0, m[\"val[0]\"])\n\t}\n\tif m[\"val[1]\"] != 0 {\n\t\tt.Fatalf(\"expacted val[1] to be %d but was %d\", 0, m[\"val[1]\"])\n\t}\n}\n\nfunc TestTupleMapScanNotSet(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\t\tid int,\n\t\t\tval frozen<tuple<int, int>>,\n\n\t\t\tprimary key(id))`, table))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id) VALUES (?);`, table), 1).Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tm := make(map[string]any)\n\terr = session.Query(fmt.Sprintf(`SELECT * FROM %s`, table)).MapScan(m)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif m[\"val[0]\"] != 0 {\n\t\tt.Fatalf(\"expacted val[0] to be %d but was %d\", 0, m[\"val[0]\"])\n\t}\n\tif m[\"val[1]\"] != 0 {\n\t\tt.Fatalf(\"expacted val[1] to be %d but was %d\", 0, m[\"val[1]\"])\n\t}\n}\n\nfunc TestTupleLastFieldEmpty(t *testing.T) {\n\tt.Parallel()\n\n\t// Regression test - empty value used to be treated as NULL value in the last tuple field\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\t\tid int,\n\t\t\tval frozen<tuple<text, text>>,\n\n\t\t\tprimary key(id))`, table))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id, val) VALUES (?,(?,?));`, table), 1, \"abc\", \"\").Exec(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar e1, e2 *string\n\tif err := session.Query(fmt.Sprintf(\"SELECT val FROM %s WHERE id = ?\", table), 1).Scan(&e1, &e2); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif e1 == nil {\n\t\tt.Fatal(\"expected e1 not to be nil\")\n\t}\n\tif *e1 != \"abc\" {\n\t\tt.Fatalf(\"expected e1 to be equal to \\\"abc\\\", but is %v\", *e2)\n\t}\n\tif e2 == nil {\n\t\tt.Fatal(\"expected e2 not to be nil\")\n\t}\n\tif *e2 != \"\" {\n\t\tt.Fatalf(\"expected e2 to be an empty string, but is %v\", *e2)\n\t}\n}\n\nfunc TestTuple_NestedCollection(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid int,\n\t\tval list<frozen<tuple<int, text>>>,\n\n\t\tprimary key(id))`, table))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttype typ struct {\n\t\tA int\n\t\tB string\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tval  any\n\t}{\n\t\t{name: \"slice\", val: [][]any{{1, \"2\"}, {3, \"4\"}}},\n\t\t{name: \"array\", val: [][2]any{{1, \"2\"}, {3, \"4\"}}},\n\t\t{name: \"struct\", val: []typ{{1, \"2\"}, {3, \"4\"}}},\n\t}\n\n\tfor i, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id, val) VALUES (?, ?);`, table), i, test.val).Exec(); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\trv := reflect.ValueOf(test.val)\n\t\t\tres := reflect.New(rv.Type()).Elem().Addr().Interface()\n\n\t\t\terr = session.Query(fmt.Sprintf(`SELECT val FROM %s WHERE id=?`, table), i).Scan(res)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tresVal := reflect.ValueOf(res).Elem().Interface()\n\t\t\tif !reflect.DeepEqual(test.val, resVal) {\n\t\t\t\tt.Fatalf(\"unmarshaled value not equal to the original value: expected %#v, got %#v\", test.val, resVal)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestTuple_NullableNestedCollection(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid int,\n\t\tval list<frozen<tuple<text, text>>>,\n\n\t\tprimary key(id))`, table))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttype typ struct {\n\t\tA *string\n\t\tB *string\n\t}\n\n\tptrStr := func(s string) *string {\n\t\tret := new(string)\n\t\t*ret = s\n\t\treturn ret\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tval  any\n\t}{\n\t\t{name: \"slice\", val: [][]*string{{ptrStr(\"1\"), nil}, {nil, ptrStr(\"2\")}, {ptrStr(\"3\"), ptrStr(\"\")}}},\n\t\t{name: \"array\", val: [][2]*string{{ptrStr(\"1\"), nil}, {nil, ptrStr(\"2\")}, {ptrStr(\"3\"), ptrStr(\"\")}}},\n\t\t{name: \"struct\", val: []typ{{ptrStr(\"1\"), nil}, {nil, ptrStr(\"2\")}, {ptrStr(\"3\"), ptrStr(\"\")}}},\n\t}\n\n\tfor i, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tif err := session.Query(fmt.Sprintf(`INSERT INTO %s (id, val) VALUES (?, ?);`, table), i, test.val).Exec(); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\trv := reflect.ValueOf(test.val)\n\t\t\tres := reflect.New(rv.Type()).Interface()\n\n\t\t\terr = session.Query(fmt.Sprintf(`SELECT val FROM %s WHERE id=?`, table), i).Scan(res)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tresVal := reflect.ValueOf(res).Elem().Interface()\n\t\t\tif !reflect.DeepEqual(test.val, resVal) {\n\t\t\t\tt.Fatalf(\"unmarshaled value not equal to the original value: expected %#v, got %#v\", test.val, resVal)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "udt_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype position struct {\n\tLat     int    `cql:\"lat\"`\n\tLon     int    `cql:\"lon\"`\n\tPadding string `json:\"padding\"`\n}\n\n// NOTE: due to current implementation details it is not currently possible to use\n// a pointer receiver type for the UDTMarshaler interface to handle UDT's\nfunc (p position) MarshalUDT(name string, info TypeInfo) ([]byte, error) {\n\tswitch name {\n\tcase \"lat\":\n\t\treturn Marshal(info, p.Lat)\n\tcase \"lon\":\n\t\treturn Marshal(info, p.Lon)\n\tcase \"padding\":\n\t\treturn Marshal(info, p.Padding)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown column for position: %q\", name)\n\t}\n}\n\nfunc (p *position) UnmarshalUDT(name string, info TypeInfo, data []byte) error {\n\tswitch name {\n\tcase \"lat\":\n\t\treturn Unmarshal(info, data, &p.Lat)\n\tcase \"lon\":\n\t\treturn Unmarshal(info, data, &p.Lon)\n\tcase \"padding\":\n\t\treturn Unmarshal(info, data, &p.Padding)\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown column for position: %q\", name)\n\t}\n}\n\nfunc TestUDT_Marshaler(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\ttypeName := testTypeName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TYPE gocql_test.%s(\n\t\tlat int,\n\t\tlon int,\n\t\tpadding text);`, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid int,\n\t\tname text,\n\t\tloc frozen<%s>,\n\n\t\tprimary key(id)\n\t);`, table, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tconst (\n\t\texpLat = -1\n\t\texpLon = 2\n\t)\n\tpad := strings.Repeat(\"X\", 1000)\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, name, loc) VALUES(?, ?, ?)\", table), 1, \"test\", &position{expLat, expLon, pad}).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpos := &position{}\n\n\terr = session.Query(fmt.Sprintf(\"SELECT loc FROM %s WHERE id = ?\", table), 1).Scan(pos)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif pos.Lat != expLat {\n\t\tt.Errorf(\"expeceted lat to be be %d got %d\", expLat, pos.Lat)\n\t}\n\tif pos.Lon != expLon {\n\t\tt.Errorf(\"expeceted lon to be be %d got %d\", expLon, pos.Lon)\n\t}\n\tif pos.Padding != pad {\n\t\tt.Errorf(\"expected to get padding %q got %q\\n\", pad, pos.Padding)\n\t}\n}\n\nfunc TestUDT_Reflect(t *testing.T) {\n\tt.Parallel()\n\n\t// Uses reflection instead of implementing the marshaling type\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\ttypeName := testTypeName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TYPE gocql_test.%s(\n\t\tname text,\n\t\towner text);`, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tposition int,\n\t\thorse frozen<%s>,\n\n\t\tprimary key(position)\n\t);`, table, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttype horse struct {\n\t\tName  string `cql:\"name\"`\n\t\tOwner string `cql:\"owner\"`\n\t}\n\n\tinsertedHorse := &horse{\n\t\tName:  \"pony\",\n\t\tOwner: \"jim\",\n\t}\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(position, horse) VALUES(?, ?)\", table), 1, insertedHorse).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tretrievedHorse := &horse{}\n\terr = session.Query(fmt.Sprintf(\"SELECT horse FROM %s WHERE position = ?\", table), 1).Scan(retrievedHorse)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif *retrievedHorse != *insertedHorse {\n\t\tt.Fatalf(\"expected to get %+v got %+v\", insertedHorse, retrievedHorse)\n\t}\n}\n\nfunc TestUDT_NullObject(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\ttypeName := testTypeName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TYPE gocql_test.%s(\n\t\tname text,\n\t\towner text);`, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid uuid,\n\t\tudt_col frozen<%s>,\n\n\t\tprimary key(id)\n\t);`, table, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttype col struct {\n\t\tName  string `cql:\"name\"`\n\t\tOwner string `cql:\"owner\"`\n\t}\n\n\tid := TimeUUID()\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id) VALUES(?)\", table), id).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treadCol := &col{\n\t\tName:  \"temp\",\n\t\tOwner: \"temp\",\n\t}\n\n\terr = session.Query(fmt.Sprintf(\"SELECT udt_col FROM %s WHERE id = ?\", table), id).Scan(readCol)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif readCol.Name != \"\" {\n\t\tt.Errorf(\"expected empty string to be returned for null udt: got %q\", readCol.Name)\n\t}\n\tif readCol.Owner != \"\" {\n\t\tt.Errorf(\"expected empty string to be returned for null udt: got %q\", readCol.Owner)\n\t}\n}\n\nfunc TestMapScanUDT(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\ttypeName := testTypeName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TYPE gocql_test.%s (\n\t\tcreated_timestamp timestamp,\n\t\tmessage text\n\t);`, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\tid uuid PRIMARY KEY,\n\t\ttype int,\n\t\tlog_entries list<frozen <%s>>\n\t);`, table, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tentry := []struct {\n\t\tCreatedTimestamp time.Time `cql:\"created_timestamp\"`\n\t\tMessage          string    `cql:\"message\"`\n\t}{\n\t\t{\n\t\t\tCreatedTimestamp: time.Now().Truncate(time.Millisecond),\n\t\t\tMessage:          \"test time now\",\n\t\t},\n\t}\n\n\tid, _ := RandomUUID()\n\tconst typ = 1\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, type, log_entries) VALUES (?, ?, ?)\", table), id, typ, entry).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trawResult := map[string]any{}\n\terr = session.Query(fmt.Sprintf(`SELECT * FROM %s WHERE id = ?`, table), id).MapScan(rawResult)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlogEntries, ok := rawResult[\"log_entries\"].([]map[string]any)\n\tif !ok {\n\t\tt.Fatal(\"log_entries not in scanned map\")\n\t}\n\n\tif len(logEntries) != 1 {\n\t\tt.Fatalf(\"expected to get 1 log_entry got %d\", len(logEntries))\n\t}\n\n\tlogEntry := logEntries[0]\n\n\ttimestamp, ok := logEntry[\"created_timestamp\"]\n\tif !ok {\n\t\tt.Error(\"created_timestamp not unmarshalled into map\")\n\t} else {\n\t\tif ts, ok := timestamp.(time.Time); ok {\n\t\t\tif !ts.In(time.UTC).Equal(entry[0].CreatedTimestamp.In(time.UTC)) {\n\t\t\t\tt.Errorf(\"created_timestamp not equal to stored: got %v expected %v\", ts.In(time.UTC), entry[0].CreatedTimestamp.In(time.UTC))\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"created_timestamp was not time.Time got: %T\", timestamp)\n\t\t}\n\t}\n\n\tmessage, ok := logEntry[\"message\"]\n\tif !ok {\n\t\tt.Error(\"message not unmarshalled into map\")\n\t} else {\n\t\tif ts, ok := message.(string); ok {\n\t\t\tif ts != message {\n\t\t\t\tt.Errorf(\"message not equal to stored: got %v expected %v\", ts, entry[0].Message)\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"message was not string got: %T\", message)\n\t\t}\n\t}\n}\n\nfunc TestUDT_MissingField(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\ttypeName := testTypeName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TYPE gocql_test.%s(\n\t\tname text,\n\t\towner text);`, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid uuid,\n\t\tudt_col frozen<%s>,\n\n\t\tprimary key(id)\n\t);`, table, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttype col struct {\n\t\tName string `cql:\"name\"`\n\t}\n\n\twriteCol := &col{\n\t\tName: \"test\",\n\t}\n\n\tid := TimeUUID()\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, udt_col) VALUES(?, ?)\", table), id, writeCol).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treadCol := &col{}\n\terr = session.Query(fmt.Sprintf(\"SELECT udt_col FROM %s WHERE id = ?\", table), id).Scan(readCol)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif readCol.Name != writeCol.Name {\n\t\tt.Errorf(\"expected %q: got %q\", writeCol.Name, readCol.Name)\n\t}\n}\n\nfunc TestUDT_EmptyCollections(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\ttypeName := testTypeName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TYPE gocql_test.%s(\n\t\ta list<text>,\n\t\tb map<text, text>,\n\t\tc set<text>\n\t);`, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid uuid,\n\t\tudt_col frozen<%s>,\n\n\t\tprimary key(id)\n\t);`, table, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttype udt struct {\n\t\tA []string          `cql:\"a\"`\n\t\tB map[string]string `cql:\"b\"`\n\t\tC []string          `cql:\"c\"`\n\t}\n\n\tid := TimeUUID()\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, udt_col) VALUES(?, ?)\", table), id, &udt{}).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar val udt\n\terr = session.Query(fmt.Sprintf(\"SELECT udt_col FROM %s WHERE id=?\", table), id).Scan(&val)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif val.A != nil {\n\t\tt.Errorf(\"expected to get nil got %#+v\", val.A)\n\t}\n\tif val.B != nil {\n\t\tt.Errorf(\"expected to get nil got %#+v\", val.B)\n\t}\n\tif val.C != nil {\n\t\tt.Errorf(\"expected to get nil got %#+v\", val.C)\n\t}\n}\n\nfunc TestUDT_UpdateField(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\ttypeName := testTypeName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TYPE gocql_test.%s(\n\t\tname text,\n\t\towner text);`, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid uuid,\n\t\tudt_col frozen<%s>,\n\n\t\tprimary key(id)\n\t);`, table, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttype col struct {\n\t\tName  string `cql:\"name\"`\n\t\tOwner string `cql:\"owner\"`\n\t\tData  string `cql:\"data\"`\n\t}\n\n\twriteCol := &col{\n\t\tName:  \"test-name\",\n\t\tOwner: \"test-owner\",\n\t}\n\n\tid := TimeUUID()\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, udt_col) VALUES(?, ?)\", table), id, writeCol).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := createTable(session, fmt.Sprintf(`ALTER TYPE gocql_test.%s ADD data text;`, typeName)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treadCol := &col{}\n\terr = session.Query(fmt.Sprintf(\"SELECT udt_col FROM %s WHERE id = ?\", table), id).Scan(readCol)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif *readCol != *writeCol {\n\t\tt.Errorf(\"expected %+v: got %+v\", *writeCol, *readCol)\n\t}\n}\n\nfunc TestUDT_ScanNullUDT(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\ttable := testTableName(t)\n\ttypeName := testTypeName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TYPE gocql_test.%s(\n\t\tlat int,\n\t\tlon int,\n\t\tpadding text);`, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid int,\n\t\tname text,\n\t\tloc frozen<%s>,\n\t\tprimary key(id)\n\t);`, table, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, name) VALUES(?, ?)\", table), 1, \"test\").Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpos := &position{}\n\n\terr = session.Query(fmt.Sprintf(\"SELECT loc FROM %s WHERE id = ?\", table), 1).Scan(pos)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "uuid.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2012, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\n// The uuid package can be used to generate and parse universally unique\n// identifiers, a standardized format in the form of a 128 bit number.\n//\n// http://tools.ietf.org/html/rfc4122\n\nimport (\n\t\"crypto/rand\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\ntype UUID [16]byte\n\nvar hardwareAddr []byte\nvar clockSeq uint32\n\nconst (\n\tVariantNCSCompat = 0\n\tVariantIETF      = 2\n\tVariantMicrosoft = 6\n\tVariantFuture    = 7\n)\n\nfunc init() {\n\tif interfaces, err := net.Interfaces(); err == nil {\n\t\tfor _, i := range interfaces {\n\t\t\tif i.Flags&net.FlagLoopback == 0 && len(i.HardwareAddr) > 0 {\n\t\t\t\thardwareAddr = i.HardwareAddr\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif hardwareAddr == nil {\n\t\t// If we failed to obtain the MAC address of the current computer,\n\t\t// we will use a randomly generated 6 byte sequence instead and set\n\t\t// the multicast bit as recommended in RFC 4122.\n\t\thardwareAddr = make([]byte, 6)\n\t\t_, err := io.ReadFull(rand.Reader, hardwareAddr)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\thardwareAddr[0] = hardwareAddr[0] | 0x01\n\t}\n\n\t// initialize the clock sequence with a random number\n\tvar clockSeqRand [2]byte\n\tio.ReadFull(rand.Reader, clockSeqRand[:])\n\tclockSeq = uint32(clockSeqRand[1])<<8 | uint32(clockSeqRand[0])\n}\n\n// ParseUUID parses a 32 digit hexadecimal number (that might contain hypens)\n// representing an UUID.\nfunc ParseUUID(input string) (UUID, error) {\n\tvar u UUID\n\tj := 0\n\tfor _, r := range input {\n\t\tswitch {\n\t\tcase r == '-' && j&1 == 0:\n\t\t\tcontinue\n\t\tcase r >= '0' && r <= '9' && j < 32:\n\t\t\tu[j/2] |= byte(r-'0') << uint(4-j&1*4)\n\t\tcase r >= 'a' && r <= 'f' && j < 32:\n\t\t\tu[j/2] |= byte(r-'a'+10) << uint(4-j&1*4)\n\t\tcase r >= 'A' && r <= 'F' && j < 32:\n\t\t\tu[j/2] |= byte(r-'A'+10) << uint(4-j&1*4)\n\t\tdefault:\n\t\t\treturn UUID{}, fmt.Errorf(\"invalid UUID %q\", input)\n\t\t}\n\t\tj += 1\n\t}\n\tif j != 32 {\n\t\treturn UUID{}, fmt.Errorf(\"invalid UUID %q\", input)\n\t}\n\treturn u, nil\n}\n\nfunc ParseUUIDMust(input string) UUID {\n\tuuid, err := ParseUUID(input)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn uuid\n}\n\n// UUIDFromBytes converts a raw byte slice to an UUID.\nfunc UUIDFromBytes(input []byte) (UUID, error) {\n\tvar u UUID\n\tif len(input) != 16 {\n\t\treturn u, errors.New(\"UUIDs must be exactly 16 bytes long\")\n\t}\n\n\tcopy(u[:], input)\n\treturn u, nil\n}\n\nfunc MustRandomUUID() UUID {\n\tuuid, err := RandomUUID()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn uuid\n}\n\n// RandomUUID generates a totally random UUID (version 4) as described in\n// RFC 4122.\nfunc RandomUUID() (UUID, error) {\n\tvar u UUID\n\t_, err := io.ReadFull(rand.Reader, u[:])\n\tif err != nil {\n\t\treturn u, err\n\t}\n\tu[6] &= 0x0F // clear version\n\tu[6] |= 0x40 // set version to 4 (random uuid)\n\tu[8] &= 0x3F // clear variant\n\tu[8] |= 0x80 // set to IETF variant\n\treturn u, nil\n}\n\nvar timeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix()\n\n// getTimestamp converts time to UUID (version 1) timestamp.\n// It must be an interval of 100-nanoseconds since timeBase.\nfunc getTimestamp(t time.Time) int64 {\n\tutcTime := t.In(time.UTC)\n\tts := int64(utcTime.Unix()-timeBase)*10000000 + int64(utcTime.Nanosecond()/100)\n\n\treturn ts\n}\n\n// TimeUUID generates a new time based UUID (version 1) using the current\n// time as the timestamp.\nfunc TimeUUID() UUID {\n\treturn UUIDFromTime(time.Now())\n}\n\n// The min and max clock values for a UUID.\n//\n// Cassandra's TimeUUIDType compares the lsb parts as signed byte arrays.\n// Thus, the min value for each byte is -128 and the max is +127.\nconst (\n\tminClock = 0x8080\n\tmaxClock = 0x7f7f\n)\n\n// The min and max node values for a UUID.\n//\n// See explanation about Cassandra's TimeUUIDType comparison logic above.\nvar (\n\tminNode = []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80}\n\tmaxNode = []byte{0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f}\n)\n\n// MinTimeUUID generates a \"fake\" time based UUID (version 1) which will be\n// the smallest possible UUID generated for the provided timestamp.\n//\n// UUIDs generated by this function are not unique and are mostly suitable only\n// in queries to select a time range of a Cassandra's TimeUUID column.\nfunc MinTimeUUID(t time.Time) UUID {\n\treturn TimeUUIDWith(getTimestamp(t), minClock, minNode)\n}\n\n// MaxTimeUUID generates a \"fake\" time based UUID (version 1) which will be\n// the biggest possible UUID generated for the provided timestamp.\n//\n// UUIDs generated by this function are not unique and are mostly suitable only\n// in queries to select a time range of a Cassandra's TimeUUID column.\nfunc MaxTimeUUID(t time.Time) UUID {\n\treturn TimeUUIDWith(getTimestamp(t), maxClock, maxNode)\n}\n\n// UUIDFromTime generates a new time based UUID (version 1) as described in\n// RFC 4122. This UUID contains the MAC address of the node that generated\n// the UUID, the given timestamp and a sequence number.\nfunc UUIDFromTime(t time.Time) UUID {\n\tts := getTimestamp(t)\n\tclock := atomic.AddUint32(&clockSeq, 1)\n\n\treturn TimeUUIDWith(ts, clock, hardwareAddr)\n}\n\n// TimeUUIDWith generates a new time based UUID (version 1) as described in\n// RFC4122 with given parameters. t is the number of 100's of nanoseconds\n// since 15 Oct 1582 (60bits). clock is the number of clock sequence (14bits).\n// node is a slice to gurarantee the uniqueness of the UUID (up to 6bytes).\n// Note: calling this function does not increment the static clock sequence.\nfunc TimeUUIDWith(t int64, clock uint32, node []byte) UUID {\n\tvar u UUID\n\n\tu[0], u[1], u[2], u[3] = byte(t>>24), byte(t>>16), byte(t>>8), byte(t)\n\tu[4], u[5] = byte(t>>40), byte(t>>32)\n\tu[6], u[7] = byte(t>>56)&0x0F, byte(t>>48)\n\n\tu[8] = byte(clock >> 8)\n\tu[9] = byte(clock)\n\n\tcopy(u[10:], node)\n\n\tu[6] |= 0x10 // set version to 1 (time based uuid)\n\tu[8] &= 0x3F // clear variant\n\tu[8] |= 0x80 // set to IETF variant\n\n\treturn u\n}\n\n// String returns the UUID in it's canonical form, a 32 digit hexadecimal\n// number in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.\nfunc (u UUID) String() string {\n\tvar offsets = [...]int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34}\n\tconst hexString = \"0123456789abcdef\"\n\tr := make([]byte, 36)\n\tfor i, b := range u {\n\t\tr[offsets[i]] = hexString[b>>4]\n\t\tr[offsets[i]+1] = hexString[b&0xF]\n\t}\n\tr[8] = '-'\n\tr[13] = '-'\n\tr[18] = '-'\n\tr[23] = '-'\n\treturn string(r)\n\n}\n\n// Bytes returns the raw byte slice for this UUID. A UUID is always 128 bits\n// (16 bytes) long.\nfunc (u UUID) Bytes() []byte {\n\treturn u[:]\n}\n\nvar emptyUUID = UUID{}\n\nfunc (u UUID) IsEmpty() bool {\n\treturn u == emptyUUID\n}\n\n// Variant returns the variant of this UUID. This package will only generate\n// UUIDs in the IETF variant.\nfunc (u UUID) Variant() int {\n\tx := u[8]\n\tif x&0x80 == 0 {\n\t\treturn VariantNCSCompat\n\t}\n\tif x&0x40 == 0 {\n\t\treturn VariantIETF\n\t}\n\tif x&0x20 == 0 {\n\t\treturn VariantMicrosoft\n\t}\n\treturn VariantFuture\n}\n\n// Version extracts the version of this UUID variant. The RFC 4122 describes\n// five kinds of UUIDs.\nfunc (u UUID) Version() int {\n\treturn int(u[6] & 0xF0 >> 4)\n}\n\n// Node extracts the MAC address of the node who generated this UUID. It will\n// return nil if the UUID is not a time based UUID (version 1).\nfunc (u UUID) Node() []byte {\n\tif u.Version() != 1 {\n\t\treturn nil\n\t}\n\treturn u[10:]\n}\n\n// Clock extracts the clock sequence of this UUID. It will return zero if the\n// UUID is not a time based UUID (version 1).\nfunc (u UUID) Clock() uint32 {\n\tif u.Version() != 1 {\n\t\treturn 0\n\t}\n\n\t// Clock sequence is the lower 14bits of u[8:10]\n\treturn uint32(u[8]&0x3F)<<8 | uint32(u[9])\n}\n\n// Timestamp extracts the timestamp information from a time based UUID\n// (version 1).\nfunc (u UUID) Timestamp() int64 {\n\tif u.Version() != 1 {\n\t\treturn 0\n\t}\n\treturn int64(uint64(u[0])<<24|uint64(u[1])<<16|\n\t\tuint64(u[2])<<8|uint64(u[3])) +\n\t\tint64(uint64(u[4])<<40|uint64(u[5])<<32) +\n\t\tint64(uint64(u[6]&0x0F)<<56|uint64(u[7])<<48)\n}\n\n// Time is like Timestamp, except that it returns a time.Time.\nfunc (u UUID) Time() time.Time {\n\tif u.Version() != 1 {\n\t\treturn time.Time{}\n\t}\n\tt := u.Timestamp()\n\tsec := t / 1e7\n\tnsec := (t % 1e7) * 100\n\treturn time.Unix(sec+timeBase, nsec).UTC()\n}\n\n// Marshaling for JSON\nfunc (u UUID) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + u.String() + `\"`), nil\n}\n\n// Unmarshaling for JSON\nfunc (u *UUID) UnmarshalJSON(data []byte) error {\n\tstr := strings.Trim(string(data), `\"`)\n\tif len(str) > 36 {\n\t\treturn fmt.Errorf(\"invalid JSON UUID %s\", str)\n\t}\n\n\tparsed, err := ParseUUID(str)\n\tif err == nil {\n\t\tcopy(u[:], parsed[:])\n\t}\n\n\treturn err\n}\n\nfunc (u UUID) MarshalText() ([]byte, error) {\n\treturn []byte(u.String()), nil\n}\n\nfunc (u *UUID) UnmarshalText(text []byte) (err error) {\n\t*u, err = ParseUUID(string(text))\n\treturn\n}\n"
  },
  {
    "path": "uuid_test.go",
    "content": "//go:build unit\n// +build unit\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestUUIDNil(t *testing.T) {\n\tt.Parallel()\n\n\tvar uuid UUID\n\twant, got := \"00000000-0000-0000-0000-000000000000\", uuid.String()\n\tif want != got {\n\t\tt.Fatalf(\"TestNil: expected %q got %q\", want, got)\n\t}\n}\n\nvar testsUUID = []struct {\n\tinput   string\n\tvariant int\n\tversion int\n}{\n\t{\"b4f00409-cef8-4822-802c-deb20704c365\", VariantIETF, 4},\n\t{\"B4F00409-CEF8-4822-802C-DEB20704C365\", VariantIETF, 4}, //Use capital letters\n\t{\"f81d4fae-7dec-11d0-a765-00a0c91e6bf6\", VariantIETF, 1},\n\t{\"00000000-7dec-11d0-a765-00a0c91e6bf6\", VariantIETF, 1},\n\t{\"3051a8d7-aea7-1801-e0bf-bc539dd60cf3\", VariantFuture, 1},\n\t{\"3051a8d7-aea7-2801-e0bf-bc539dd60cf3\", VariantFuture, 2},\n\t{\"3051a8d7-aea7-3801-e0bf-bc539dd60cf3\", VariantFuture, 3},\n\t{\"3051a8d7-aea7-4801-e0bf-bc539dd60cf3\", VariantFuture, 4},\n\t{\"3051a8d7-aea7-3801-e0bf-bc539dd60cf3\", VariantFuture, 5},\n\t{\"d0e817e1-e4b1-1801-3fe6-b4b60ccecf9d\", VariantNCSCompat, 0},\n\t{\"d0e817e1-e4b1-1801-bfe6-b4b60ccecf9d\", VariantIETF, 1},\n\t{\"d0e817e1-e4b1-1801-dfe6-b4b60ccecf9d\", VariantMicrosoft, 0},\n\t{\"d0e817e1-e4b1-1801-ffe6-b4b60ccecf9d\", VariantFuture, 0},\n}\n\nfunc TestPredefinedUUID(t *testing.T) {\n\tt.Parallel()\n\n\tfor i := range testsUUID {\n\t\tuuid, err := ParseUUID(testsUUID[i].input)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ParseUUID #%d: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif str := uuid.String(); str != strings.ToLower(testsUUID[i].input) {\n\t\t\tt.Errorf(\"String #%d: expected %q got %q\", i, testsUUID[i].input, str)\n\t\t\tcontinue\n\t\t}\n\n\t\tif variant := uuid.Variant(); variant != testsUUID[i].variant {\n\t\t\tt.Errorf(\"Variant #%d: expected %d got %d\", i, testsUUID[i].variant, variant)\n\t\t}\n\n\t\tif testsUUID[i].variant == VariantIETF {\n\t\t\tif version := uuid.Version(); version != testsUUID[i].version {\n\t\t\t\tt.Errorf(\"Version #%d: expected %d got %d\", i, testsUUID[i].version, version)\n\t\t\t}\n\t\t}\n\n\t\tjson, err := uuid.MarshalJSON()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"MarshalJSON #%d: %v\", i, err)\n\t\t}\n\t\texpectedJson := `\"` + strings.ToLower(testsUUID[i].input) + `\"`\n\t\tif string(json) != expectedJson {\n\t\t\tt.Errorf(\"MarshalJSON #%d: expected %v got %v\", i, expectedJson, string(json))\n\t\t}\n\n\t\tvar unmarshaled UUID\n\t\terr = unmarshaled.UnmarshalJSON(json)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"UnmarshalJSON #%d: %v\", i, err)\n\t\t}\n\t\tif unmarshaled != uuid {\n\t\t\tt.Errorf(\"UnmarshalJSON #%d: expected %v got %v\", i, uuid, unmarshaled)\n\t\t}\n\t}\n}\n\nfunc TestInvalidUUIDCharacter(t *testing.T) {\n\tt.Parallel()\n\n\t_, err := ParseUUID(\"z4f00409-cef8-4822-802c-deb20704c365\")\n\tif err == nil || !strings.Contains(err.Error(), \"invalid UUID\") {\n\t\tt.Fatalf(\"expected invalid UUID error, got '%v' \", err)\n\t}\n}\n\nfunc TestInvalidUUIDLength(t *testing.T) {\n\tt.Parallel()\n\n\t_, err := ParseUUID(\"4f00\")\n\tif err == nil || !strings.Contains(err.Error(), \"invalid UUID\") {\n\t\tt.Fatalf(\"expected invalid UUID error, got '%v' \", err)\n\t}\n\n\t_, err = UUIDFromBytes(TimeUUID().Bytes()[:15])\n\tif err == nil || err.Error() != \"UUIDs must be exactly 16 bytes long\" {\n\t\tt.Fatalf(\"expected error '%v', got '%v'\", \"UUIDs must be exactly 16 bytes long\", err)\n\t}\n}\n\nfunc TestRandomUUID(t *testing.T) {\n\tt.Parallel()\n\n\tfor i := 0; i < 20; i++ {\n\t\tuuid, err := RandomUUID()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"RandomUUID: %v\", err)\n\t\t}\n\t\tif variant := uuid.Variant(); variant != VariantIETF {\n\t\t\tt.Errorf(\"wrong variant. expected %d got %d\", VariantIETF, variant)\n\t\t}\n\t\tif version := uuid.Version(); version != 4 {\n\t\t\tt.Errorf(\"wrong version. expected %d got %d\", 4, version)\n\t\t}\n\t}\n}\n\nfunc TestRandomUUIDInvalidAPICalls(t *testing.T) {\n\tt.Parallel()\n\n\tuuid, err := RandomUUID()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error %v\", err)\n\t}\n\n\tif node := uuid.Node(); node != nil {\n\t\tt.Fatalf(\"expected nil, got %v\", node)\n\t}\n\n\tif stamp := uuid.Timestamp(); stamp != 0 {\n\t\tt.Fatalf(\"expceted 0, got %v\", stamp)\n\t}\n\tzeroT := time.Time{}\n\tif to := uuid.Time(); to != zeroT {\n\t\tt.Fatalf(\"expected %v, got %v\", zeroT, to)\n\t}\n}\n\nfunc TestUUIDFromTime(t *testing.T) {\n\tt.Parallel()\n\n\tdate := time.Date(1982, 5, 5, 12, 34, 56, 400, time.UTC)\n\tuuid := UUIDFromTime(date)\n\n\tif uuid.Time() != date {\n\t\tt.Errorf(\"embedded time incorrect. Expected %v got %v\", date, uuid.Time())\n\t}\n}\n\nfunc TestTimeUUIDWith(t *testing.T) {\n\tt.Parallel()\n\n\tutcTime := time.Date(1982, 5, 5, 12, 34, 56, 400, time.UTC)\n\tts := int64(utcTime.Unix()-timeBase)*10000000 + int64(utcTime.Nanosecond()/100)\n\tclockSeq := uint32(0x3FFF)           // Max number of clock sequence.\n\tnode := [7]byte{0, 1, 2, 3, 4, 5, 6} // The last element should be ignored.\n\tuuid := TimeUUIDWith(ts, clockSeq, node[:])\n\n\tif got := uuid.Variant(); got != VariantIETF {\n\t\tt.Errorf(\"wrong variant. expected %d got %d\", VariantIETF, got)\n\t}\n\tif got, want := uuid.Version(), 1; got != want {\n\t\tt.Errorf(\"wrong version. Expected %v got %v\", want, got)\n\t}\n\tif got := uuid.Timestamp(); got != int64(ts) {\n\t\tt.Errorf(\"wrong timestamp. Expected %v got %v\", ts, got)\n\t}\n\tif got := uuid.Clock(); uint32(got) != clockSeq {\n\t\tt.Errorf(\"wrong clock. expected %v got %v\", clockSeq, got)\n\t}\n\tif got, want := uuid.Node(), node[:6]; !bytes.Equal(got, want) {\n\t\tt.Errorf(\"wrong node. expected %x, bot %x\", want, got)\n\t}\n}\n\nfunc TestParseUUID(t *testing.T) {\n\tt.Parallel()\n\n\tuuid := ParseUUIDMust(\"486f3a88-775b-11e3-ae07-d231feb1dc81\")\n\tif uuid.Time() != time.Date(2014, 1, 7, 5, 19, 29, 222516000, time.UTC) {\n\t\tt.Errorf(\"Expected date of 1/7/2014 at 5:19:29.222516, got %v\", uuid.Time())\n\t}\n}\n\nfunc TestTimeUUID(t *testing.T) {\n\tt.Parallel()\n\n\tvar node []byte\n\ttimestamp := int64(0)\n\tfor i := 0; i < 20; i++ {\n\t\tuuid := TimeUUID()\n\n\t\tif variant := uuid.Variant(); variant != VariantIETF {\n\t\t\tt.Errorf(\"wrong variant. expected %d got %d\", VariantIETF, variant)\n\t\t}\n\t\tif version := uuid.Version(); version != 1 {\n\t\t\tt.Errorf(\"wrong version. expected %d got %d\", 1, version)\n\t\t}\n\n\t\tif n := uuid.Node(); !bytes.Equal(n, node) && i > 0 {\n\t\t\tt.Errorf(\"wrong node. expected %x, got %x\", node, n)\n\t\t} else if i == 0 {\n\t\t\tnode = n\n\t\t}\n\n\t\tts := uuid.Timestamp()\n\t\tif ts < timestamp {\n\t\t\tt.Errorf(\"timestamps must grow: timestamp=%v ts=%v\", timestamp, ts)\n\t\t}\n\t\ttimestamp = ts\n\t}\n}\n\nfunc TestUnmarshalJSON(t *testing.T) {\n\tt.Parallel()\n\n\tvar withHyphens, withoutHypens, tooLong UUID\n\n\twithHyphens.UnmarshalJSON([]byte(`\"486f3a88-775b-11e3-ae07-d231feb1dc81\"`))\n\tif withHyphens.Time().Truncate(time.Second) != time.Date(2014, 1, 7, 5, 19, 29, 0, time.UTC) {\n\t\tt.Errorf(\"Expected date of 1/7/2014 at 5:19:29, got %v\", withHyphens.Time())\n\t}\n\n\twithoutHypens.UnmarshalJSON([]byte(`\"486f3a88775b11e3ae07d231feb1dc81\"`))\n\tif withoutHypens.Time().Truncate(time.Second) != time.Date(2014, 1, 7, 5, 19, 29, 0, time.UTC) {\n\t\tt.Errorf(\"Expected date of 1/7/2014 at 5:19:29, got %v\", withoutHypens.Time())\n\t}\n\n\terr := tooLong.UnmarshalJSON([]byte(`\"486f3a88-775b-11e3-ae07-d231feb1dc81486f3a88\"`))\n\tif err == nil {\n\t\tt.Errorf(\"no error for invalid JSON UUID\")\n\t}\n\n}\n\nfunc TestMarshalText(t *testing.T) {\n\tt.Parallel()\n\n\tu, err := ParseUUID(\"486f3a88-775b-11e3-ae07-d231feb1dc81\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttext, err := u.MarshalText()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar u2 UUID\n\tif err := u2.UnmarshalText(text); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif u != u2 {\n\t\tt.Fatalf(\"uuids not equal after marshalling: before=%s after=%s\", u, u2)\n\t}\n}\n\nfunc TestMinTimeUUID(t *testing.T) {\n\tt.Parallel()\n\n\taTime := time.Now()\n\tminTimeUUID := MinTimeUUID(aTime)\n\n\tts := aTime.Unix()\n\ttsFromUUID := minTimeUUID.Time().Unix()\n\tif ts != tsFromUUID {\n\t\tt.Errorf(\"timestamps are not equal: expected %d, got %d\", ts, tsFromUUID)\n\t}\n\n\tclockFromUUID := minTimeUUID.Clock()\n\t// clear two most significant bits, as they are used for IETF variant\n\tif minClock&0x3FFF != clockFromUUID {\n\t\tt.Errorf(\"clocks are not equal: expected %08b, got %08b\", minClock&0x3FFF, clockFromUUID)\n\t}\n\n\tnodeFromUUID := minTimeUUID.Node()\n\tif !bytes.Equal(minNode, nodeFromUUID) {\n\t\tt.Errorf(\"nodes are not equal: expected %08b, got %08b\", minNode, nodeFromUUID)\n\t}\n}\n\nfunc TestMaxTimeUUID(t *testing.T) {\n\tt.Parallel()\n\n\taTime := time.Now()\n\tmaxTimeUUID := MaxTimeUUID(aTime)\n\n\tts := aTime.Unix()\n\ttsFromUUID := maxTimeUUID.Time().Unix()\n\tif ts != tsFromUUID {\n\t\tt.Errorf(\"timestamps are not equal: expected %d, got %d\", ts, tsFromUUID)\n\t}\n\n\tclockFromUUID := maxTimeUUID.Clock()\n\tif maxClock&0x3FFF != clockFromUUID {\n\t\tt.Errorf(\"clocks are not equal: expected %08b, got %08b\", maxClock&0x3FFF, clockFromUUID)\n\t}\n\n\tnodeFromUUID := maxTimeUUID.Node()\n\tif !bytes.Equal(maxNode, nodeFromUUID) {\n\t\tt.Errorf(\"nodes are not equal:  expected %08b, got %08b\", maxNode, nodeFromUUID)\n\t}\n}\n"
  },
  {
    "path": "vector_bench_test.go",
    "content": "// Copyright (c) 2012 The gocql Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n//go:build all || unit\n// +build all unit\n\npackage gocql\n\nimport (\n\t\"encoding/binary\"\n\t\"math\"\n\t\"testing\"\n)\n\nconst vectorTypePrefix = apacheCassandraTypePrefix + \"VectorType(\" + apacheCassandraTypePrefix + \"FloatType, \"\nconst vectorTypeSuffix = \")\"\n\nfunc makeFloatVectorType(dim int, dimStr string) VectorType {\n\treturn VectorType{\n\t\tNativeType: NativeType{\n\t\t\tproto:  protoVersion4,\n\t\t\ttyp:    TypeCustom,\n\t\t\tcustom: vectorTypePrefix + dimStr + vectorTypeSuffix,\n\t\t},\n\t\tSubType:    NativeType{proto: protoVersion4, typ: TypeFloat},\n\t\tDimensions: dim,\n\t}\n}\n\n// BenchmarkUnmarshalVectorFloat32 measures unmarshal performance for float32 vectors\n// across common embedding dimensions used in AI/ML applications.\nfunc BenchmarkUnmarshalVectorFloat32(b *testing.B) {\n\tdims := []struct {\n\t\tdim    int\n\t\tdimStr string\n\t}{\n\t\t{dim: 128, dimStr: \"128\"},\n\t\t{dim: 384, dimStr: \"384\"},\n\t\t{dim: 768, dimStr: \"768\"},\n\t\t{dim: 1536, dimStr: \"1536\"},\n\t}\n\n\tfor _, entry := range dims {\n\t\tdim := entry.dim\n\t\tdimStr := entry.dimStr\n\t\tb.Run(\"dim_\"+dimStr, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\n\t\t\tdata := make([]byte, dim*4)\n\t\t\tfor i := 0; i < dim; i++ {\n\t\t\t\tbinary.BigEndian.PutUint32(data[i*4:], math.Float32bits(float32(i)*0.1))\n\t\t\t}\n\n\t\t\tinfo := makeFloatVectorType(dim, dimStr)\n\t\t\tvar result []float32\n\n\t\t\tb.SetBytes(int64(dim * 4))\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif err := unmarshalVector(info, data, &result); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkMarshalVectorFloat32 measures marshal performance for float32 vectors\n// across common embedding dimensions.\nfunc BenchmarkMarshalVectorFloat32(b *testing.B) {\n\tdims := []struct {\n\t\tdim    int\n\t\tdimStr string\n\t}{\n\t\t{dim: 128, dimStr: \"128\"},\n\t\t{dim: 384, dimStr: \"384\"},\n\t\t{dim: 768, dimStr: \"768\"},\n\t\t{dim: 1536, dimStr: \"1536\"},\n\t}\n\n\tfor _, entry := range dims {\n\t\tdim := entry.dim\n\t\tdimStr := entry.dimStr\n\t\tb.Run(\"dim_\"+dimStr, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\n\t\t\tvec := make([]float32, dim)\n\t\t\tfor i := range vec {\n\t\t\t\tvec[i] = float32(i) * 0.1\n\t\t\t}\n\n\t\t\tinfo := makeFloatVectorType(dim, dimStr)\n\n\t\t\tb.SetBytes(int64(dim * 4))\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := marshalVector(info, vec); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkVectorRoundTrip measures full marshal -> unmarshal cycle.\nfunc BenchmarkVectorRoundTrip(b *testing.B) {\n\tdims := []struct {\n\t\tdim    int\n\t\tdimStr string\n\t}{\n\t\t{dim: 128, dimStr: \"128\"},\n\t\t{dim: 384, dimStr: \"384\"},\n\t\t{dim: 768, dimStr: \"768\"},\n\t\t{dim: 1536, dimStr: \"1536\"},\n\t}\n\n\tfor _, entry := range dims {\n\t\tdim := entry.dim\n\t\tdimStr := entry.dimStr\n\t\tb.Run(\"dim_\"+dimStr, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\n\t\t\tsrcVec := make([]float32, dim)\n\t\t\tfor i := range srcVec {\n\t\t\t\tsrcVec[i] = float32(i) * 0.1\n\t\t\t}\n\n\t\t\tinfo := makeFloatVectorType(dim, dimStr)\n\t\t\tvar dstVec []float32\n\n\t\t\tb.SetBytes(int64(dim * 4 * 2))\n\t\t\tb.ResetTimer()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tdata, err := marshalVector(info, srcVec)\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif err := unmarshalVector(info, data, &dstVec); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "vector_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"gopkg.in/inf.v0\"\n\n\t\"github.com/gocql/gocql/internal/tests\"\n)\n\ntype person struct {\n\tFirstName string `cql:\"first_name\"`\n\tLastName  string `cql:\"last_name\"`\n\tAge       int    `cql:\"age\"`\n}\n\nfunc (p person) String() string {\n\treturn fmt.Sprintf(\"Person{firstName: %s, lastName: %s, Age: %d}\", p.FirstName, p.LastName, p.Age)\n}\n\nfunc TestVector_Marshaler(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif *flagDistribution == \"cassandra\" && flagCassVersion.Before(5, 0, 0) {\n\t\tt.Skip(\"Vector types have been introduced in Cassandra 5.0\")\n\t}\n\n\tif *flagDistribution == \"scylla\" && flagCassVersion.Before(2025, 3, 0) {\n\t\tt.Skip(\"Vector types have been introduced in ScyllaDB 2025.3\")\n\t}\n\n\tfixedTable := testTableName(t, \"fixed\")\n\tvariableTable := testTableName(t, \"variable\")\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE IF NOT EXISTS gocql_test.%s(id int primary key, vec vector<float, 3>);`, fixedTable))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE IF NOT EXISTS gocql_test.%s(id int primary key, vec vector<text, 4>);`, variableTable))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tinsertFixVec := []float32{8, 2.5, -5.0}\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, vec) VALUES(?, ?)\", fixedTable), 1, insertFixVec).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar selectFixVec []float32\n\terr = session.Query(fmt.Sprintf(\"SELECT vec FROM %s WHERE id = ?\", fixedTable), 1).Scan(&selectFixVec)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttests.AssertDeepEqual(t, \"fixed size element vector\", insertFixVec, selectFixVec)\n\n\tlongText := tests.RandomText(500)\n\tinsertVarVec := []string{\"apache\", \"cassandra\", longText, \"gocql\"}\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, vec) VALUES(?, ?)\", variableTable), 1, insertVarVec).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar selectVarVec []string\n\terr = session.Query(fmt.Sprintf(\"SELECT vec FROM %s WHERE id = ?\", variableTable), 1).Scan(&selectVarVec)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttests.AssertDeepEqual(t, \"variable size element vector\", insertVarVec, selectVarVec)\n}\n\nfunc TestVector_Types(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif *flagDistribution == \"cassandra\" && flagCassVersion.Before(5, 0, 0) {\n\t\tt.Skip(\"Vector types have been introduced in Cassandra 5.0\")\n\t}\n\n\tif *flagDistribution == \"scylla\" && flagCassVersion.Before(2025, 4, 0) {\n\t\tt.Skip(\"Vector types are useful in ScyllaDB from 2025.4 and on\")\n\t}\n\n\ttimestamp1, _ := time.Parse(\"2006-01-02\", \"2000-01-01\")\n\ttimestamp2, _ := time.Parse(\"2006-01-02 15:04:05\", \"2024-01-01 10:31:45\")\n\ttimestamp3, _ := time.Parse(\"2006-01-02 15:04:05.000\", \"2024-05-01 10:31:45.987\")\n\n\tdate1, _ := time.Parse(\"2006-01-02\", \"2000-01-01\")\n\tdate2, _ := time.Parse(\"2006-01-02\", \"2022-03-14\")\n\tdate3, _ := time.Parse(\"2006-01-02\", \"2024-12-31\")\n\n\ttime1, _ := time.Parse(\"15:04:05\", \"01:00:00\")\n\ttime2, _ := time.Parse(\"15:04:05\", \"15:23:59\")\n\ttime3, _ := time.Parse(\"15:04:05.000\", \"10:31:45.987\")\n\n\tduration1 := Duration{0, 1, 1920000000000}\n\tduration2 := Duration{1, 1, 1920000000000}\n\tduration3 := Duration{31, 0, 60000000000}\n\n\tmap1 := make(map[string]int)\n\tmap1[\"a\"] = 1\n\tmap1[\"b\"] = 2\n\tmap1[\"c\"] = 3\n\tmap2 := make(map[string]int)\n\tmap2[\"abc\"] = 123\n\tmap3 := make(map[string]int)\n\n\ttestCases := []struct {\n\t\tname       string\n\t\tcqlType    string\n\t\tvalue      any\n\t\tcomparator func(*testing.T, any, any)\n\t}{\n\t\t{name: \"ascii\", cqlType: TypeAscii.String(), value: []string{\"a\", \"1\", \"Z\"}},\n\t\t{name: \"bigint\", cqlType: TypeBigInt.String(), value: []int64{1, 2, 3}},\n\t\t{name: \"blob\", cqlType: TypeBlob.String(), value: [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6, 7}, []byte{8, 9}}},\n\t\t{name: \"boolean\", cqlType: TypeBoolean.String(), value: []bool{true, false, true}},\n\t\t{name: \"counter\", cqlType: TypeCounter.String(), value: []int64{5, 6, 7}},\n\t\t{name: \"decimal\", cqlType: TypeDecimal.String(), value: []inf.Dec{*inf.NewDec(1, 0), *inf.NewDec(2, 1), *inf.NewDec(-3, 2)}},\n\t\t{name: \"double\", cqlType: TypeDouble.String(), value: []float64{0.1, -1.2, 3}},\n\t\t{name: \"float\", cqlType: TypeFloat.String(), value: []float32{0.1, -1.2, 3}},\n\t\t{name: \"int\", cqlType: TypeInt.String(), value: []int32{1, 2, 3}},\n\t\t{name: \"text\", cqlType: TypeText.String(), value: []string{\"a\", \"b\", \"c\"}},\n\t\t{name: \"timestamp\", cqlType: TypeTimestamp.String(), value: []time.Time{timestamp1, timestamp2, timestamp3}},\n\t\t{name: \"uuid\", cqlType: TypeUUID.String(), value: []UUID{MustRandomUUID(), MustRandomUUID(), MustRandomUUID()}},\n\t\t{name: \"varchar\", cqlType: TypeVarchar.String(), value: []string{\"abc\", \"def\", \"ghi\"}},\n\t\t{name: \"varint\", cqlType: TypeVarint.String(), value: []uint64{uint64(1234), uint64(123498765), uint64(18446744073709551615)}},\n\t\t{name: \"timeuuid\", cqlType: TypeTimeUUID.String(), value: []UUID{TimeUUID(), TimeUUID(), TimeUUID()}},\n\t\t{\n\t\t\tname:    \"inet\",\n\t\t\tcqlType: TypeInet.String(),\n\t\t\tvalue:   []net.IP{net.IPv4(127, 0, 0, 1), net.IPv4(192, 168, 1, 1), net.IPv4(8, 8, 8, 8)},\n\t\t\tcomparator: func(t *testing.T, e any, a any) {\n\t\t\t\texpected := e.([]net.IP)\n\t\t\t\tactual := a.([]net.IP)\n\t\t\t\ttests.AssertEqual(t, \"vector size\", len(expected), len(actual))\n\t\t\t\tfor i, _ := range expected {\n\t\t\t\t\ttests.AssertTrue(t, \"vector\", expected[i].Equal(actual[i]))\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{name: \"date\", cqlType: TypeDate.String(), value: []time.Time{date1, date2, date3}},\n\t\t{name: \"time\", cqlType: TypeTimestamp.String(), value: []time.Time{time1, time2, time3}},\n\t\t{name: \"smallint\", cqlType: TypeSmallInt.String(), value: []int16{127, 256, -1234}},\n\t\t{name: \"tinyint\", cqlType: TypeTinyInt.String(), value: []int8{127, 9, -123}},\n\t\t{name: \"duration\", cqlType: TypeDuration.String(), value: []Duration{duration1, duration2, duration3}},\n\t\t{name: \"vector_vector_float\", cqlType: \"vector<float, 5>\", value: [][]float32{{0.1, -1.2, 3, 5, 5}, {10.1, -122222.0002, 35.0, 1, 1}, {0, 0, 0, 0, 0}}},\n\t\t{\n\t\t\tname:    \"vector_vector_set_float\",\n\t\t\tcqlType: \"vector<set<float>, 5>\",\n\t\t\tvalue: [][][]float32{\n\t\t\t\t{{1, 2}, {2, -1}, {3}, {0}, {-1.3}},\n\t\t\t\t{{2, 3}, {2, -1}, {3}, {0}, {-1.3}},\n\t\t\t\t{{1, 1000.0}, {0}, {}, {12, 14, 15, 16}, {-1.3}},\n\t\t\t},\n\t\t\tcomparator: func(t *testing.T, e any, a any) {\n\t\t\t\texpected := e.([][][]float32)\n\t\t\t\tactual := a.([][][]float32)\n\t\t\t\ttests.AssertEqual(t, \"vector size\", len(expected), len(actual))\n\t\t\t\tfor i := range expected {\n\t\t\t\t\texpVector := expected[i]\n\t\t\t\t\tactVector := actual[i]\n\t\t\t\t\ttests.AssertEqual(t, \"vector size\", len(expVector), len(actVector))\n\t\t\t\t\tfor j := range expVector {\n\t\t\t\t\t\texpSet := append([]float32(nil), expVector[j]...)\n\t\t\t\t\t\tactSet := append([]float32(nil), actVector[j]...)\n\t\t\t\t\t\tsort.Slice(expSet, func(a, b int) bool { return expSet[a] < expSet[b] })\n\t\t\t\t\t\tsort.Slice(actSet, func(a, b int) bool { return actSet[a] < actSet[b] })\n\t\t\t\t\t\ttests.AssertDeepEqual(t, \"vector set\", expSet, actSet)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{name: \"vector_tuple_text_int_float\", cqlType: \"tuple<text, int, float>\", value: [][]any{{\"a\", 1, float32(0.5)}, {\"b\", 2, float32(-1.2)}, {\"c\", 3, float32(0)}}},\n\t\t{name: \"vector_tuple_text_list_text\", cqlType: \"tuple<text, list<text>>\", value: [][]any{{\"a\", []string{\"b\", \"c\"}}, {\"d\", []string{\"e\", \"g\", \"f\"}}, {\"h\", []string{\"i\"}}}},\n\t\t{\n\t\t\tname:    \"vector_set_text\",\n\t\t\tcqlType: \"set<text>\",\n\t\t\tvalue:   [][]string{{\"a\", \"b\"}, {\"c\", \"d\"}, {\"f\", \"e\"}},\n\t\t\tcomparator: func(t *testing.T, e any, a any) {\n\t\t\t\texpected := e.([][]string)\n\t\t\t\tactual := a.([][]string)\n\t\t\t\ttests.AssertEqual(t, \"vector size\", len(expected), len(actual))\n\t\t\t\tfor i := range expected {\n\t\t\t\t\texpSet := append([]string(nil), expected[i]...)\n\t\t\t\t\tactSet := append([]string(nil), actual[i]...)\n\t\t\t\t\tsort.Strings(expSet)\n\t\t\t\t\tsort.Strings(actSet)\n\t\t\t\t\ttests.AssertDeepEqual(t, \"vector set\", expSet, actSet)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{name: \"vector_list_int\", cqlType: \"list<int>\", value: [][]int32{{1, 2, 3}, {-1, -2, -3}, {0, 0, 0}}},\n\t\t{name: \"vector_map_text_int\", cqlType: \"map<text, int>\", value: []map[string]int{map1, map2, map3}},\n\t}\n\n\tfor _, test := range testCases {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\ttableName := testTableName(t, test.name)\n\t\t\terr := createTable(session, fmt.Sprintf(`CREATE TABLE IF NOT EXISTS gocql_test.%s(id int primary key, vec vector<%s, 3>);`, tableName, test.cqlType))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, vec) VALUES(?, ?)\", tableName), 1, test.value).Exec()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tv := reflect.New(reflect.TypeOf(test.value))\n\t\t\terr = session.Query(fmt.Sprintf(\"SELECT vec FROM %s WHERE id = ?\", tableName), 1).Scan(v.Interface())\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif test.comparator != nil {\n\t\t\t\ttest.comparator(t, test.value, v.Elem().Interface())\n\t\t\t} else {\n\t\t\t\ttests.AssertDeepEqual(t, \"vector\", test.value, v.Elem().Interface())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestVector_MarshalerUDT(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif *flagDistribution == \"cassandra\" && flagCassVersion.Before(5, 0, 0) {\n\t\tt.Skip(\"Vector types have been introduced in Cassandra 5.0\")\n\t}\n\n\tif *flagDistribution == \"scylla\" && flagCassVersion.Before(2025, 3, 0) {\n\t\tt.Skip(\"Vector types have been introduced in ScyllaDB 2025.3\")\n\t}\n\n\ttable := testTableName(t)\n\ttypeName := testTypeName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TYPE gocql_test.%s(\n\t\tfirst_name text,\n\t\tlast_name text,\n\t\tage int);`, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s(\n\t\tid int,\n\t\tcouple vector<%s, 2>,\n\t\tprimary key(id)\n\t);`, table, typeName))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tp1 := person{\"Johny\", \"Bravo\", 25}\n\tp2 := person{\"Capitan\", \"Planet\", 5}\n\tinsVec := []person{p1, p2}\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, couple) VALUES(?, ?)\", table), 1, insVec).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar selVec []person\n\n\terr = session.Query(fmt.Sprintf(\"SELECT couple FROM %s WHERE id = ?\", table), 1).Scan(&selVec)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttests.AssertDeepEqual(t, \"udt\", &insVec, &selVec)\n}\n\nfunc TestVector_Empty(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif *flagDistribution == \"cassandra\" && flagCassVersion.Before(5, 0, 0) {\n\t\tt.Skip(\"Vector types have been introduced in Cassandra 5.0\")\n\t}\n\n\tif *flagDistribution == \"scylla\" && flagCassVersion.Before(2025, 3, 0) {\n\t\tt.Skip(\"Vector types have been introduced in ScyllaDB 2025.3\")\n\t}\n\n\tfixedTable := testTableName(t, \"fixed\")\n\tvariableTable := testTableName(t, \"variable\")\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE IF NOT EXISTS gocql_test.%s(id int primary key, vec vector<float, 3>);`, fixedTable))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = createTable(session, fmt.Sprintf(`CREATE TABLE IF NOT EXISTS gocql_test.%s(id int primary key, vec vector<text, 4>);`, variableTable))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id) VALUES(?)\", fixedTable), 1).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar selectFixVec []float32\n\terr = session.Query(fmt.Sprintf(\"SELECT vec FROM %s WHERE id = ?\", fixedTable), 1).Scan(&selectFixVec)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttests.AssertTrue(t, \"fixed size element vector is empty\", selectFixVec == nil)\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id) VALUES(?)\", variableTable), 1).Exec()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar selectVarVec []string\n\terr = session.Query(fmt.Sprintf(\"SELECT vec FROM %s WHERE id = ?\", variableTable), 1).Scan(&selectVarVec)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttests.AssertTrue(t, \"variable size element vector is empty\", selectVarVec == nil)\n}\n\nfunc TestVector_MissingDimension(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tif *flagDistribution == \"cassandra\" && flagCassVersion.Before(5, 0, 0) {\n\t\tt.Skip(\"Vector types have been introduced in Cassandra 5.0\")\n\t}\n\n\tif *flagDistribution == \"scylla\" && flagCassVersion.Before(2025, 3, 0) {\n\t\tt.Skip(\"Vector types have been introduced in ScyllaDB 2025.3\")\n\t}\n\n\ttable := testTableName(t)\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE IF NOT EXISTS gocql_test.%s(id int primary key, vec vector<float, 3>);`, table))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, vec) VALUES(?, ?)\", table), 1, []float32{8, -5.0}).Exec()\n\trequire.Error(t, err, \"expected vector with 3 dimensions, received 2\")\n\n\terr = session.Query(fmt.Sprintf(\"INSERT INTO %s(id, vec) VALUES(?, ?)\", table), 1, []float32{8, -5.0, 1, 3}).Exec()\n\trequire.Error(t, err, \"expected vector with 3 dimensions, received 4\")\n}\n\nfunc TestVector_SubTypeParsing(t *testing.T) {\n\tt.Parallel()\n\n\tif *flagDistribution == \"scylla\" && flagCassVersion.Before(2025, 4, 0) {\n\t\tt.Skip(\"Vector types are useful in ScyllaDB from 2025.4 and on\")\n\t}\n\tprefix := apacheCassandraTypePrefix\n\tvectorTypePrefix := prefix + \"VectorType\"\n\ttestCases := []struct {\n\t\tname     string\n\t\tcustom   string\n\t\texpected TypeInfo\n\t}{\n\t\t{name: \"text\", custom: prefix + \"UTF8Type\", expected: NativeType{typ: TypeVarchar}},\n\t\t{name: \"set_int\", custom: prefix + \"SetType(\" + prefix + \"Int32Type)\", expected: CollectionType{NativeType: NativeType{typ: TypeSet}, Key: nil, Elem: NativeType{typ: TypeInt}}},\n\t\t{\n\t\t\tname:   \"udt\",\n\t\t\tcustom: prefix + \"UserType(gocql_test,706572736f6e,66697273745f6e616d65:\" + prefix + \"UTF8Type,6c6173745f6e616d65:\" + prefix + \"UTF8Type,616765:\" + prefix + \"Int32Type)\",\n\t\t\texpected: UDTTypeInfo{\n\t\t\t\tNativeType: NativeType{typ: TypeUDT},\n\t\t\t\tKeySpace:   \"gocql_test\",\n\t\t\t\tName:       \"person\",\n\t\t\t\tElements: []UDTField{\n\t\t\t\t\tUDTField{Name: \"first_name\", Type: NativeType{typ: TypeVarchar}},\n\t\t\t\t\tUDTField{Name: \"last_name\", Type: NativeType{typ: TypeVarchar}},\n\t\t\t\t\tUDTField{Name: \"age\", Type: NativeType{typ: TypeInt}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:   \"tuple\",\n\t\t\tcustom: prefix + \"TupleType(\" + prefix + \"UTF8Type,\" + prefix + \"Int32Type,\" + prefix + \"UTF8Type)\",\n\t\t\texpected: TupleTypeInfo{\n\t\t\t\tNativeType: NativeType{typ: TypeTuple},\n\t\t\t\tElems: []TypeInfo{\n\t\t\t\t\tNativeType{typ: TypeVarchar},\n\t\t\t\t\tNativeType{typ: TypeInt},\n\t\t\t\t\tNativeType{typ: TypeVarchar},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:   \"vector_vector_inet\",\n\t\t\tcustom: prefix + \"VectorType(\" + prefix + \"VectorType(\" + prefix + \"InetAddressType, 2), 3)\",\n\t\t\texpected: VectorType{\n\t\t\t\tNativeType: NativeType{typ: TypeCustom, custom: vectorTypePrefix},\n\t\t\t\tSubType: VectorType{\n\t\t\t\t\tNativeType: NativeType{typ: TypeCustom, custom: vectorTypePrefix},\n\t\t\t\t\tSubType:    NativeType{typ: TypeInet},\n\t\t\t\t\tDimensions: 2,\n\t\t\t\t},\n\t\t\t\tDimensions: 3,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:   \"map_int_vector_text\",\n\t\t\tcustom: prefix + \"MapType(\" + prefix + \"Int32Type,\" + prefix + \"VectorType(\" + prefix + \"UTF8Type, 10))\",\n\t\t\texpected: CollectionType{\n\t\t\t\tNativeType: NativeType{typ: TypeMap},\n\t\t\t\tKey:        NativeType{typ: TypeInt},\n\t\t\t\tElem: VectorType{\n\t\t\t\t\tNativeType: NativeType{typ: TypeCustom, custom: vectorTypePrefix},\n\t\t\t\t\tSubType:    NativeType{typ: TypeVarchar},\n\t\t\t\t\tDimensions: 10,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:   \"set_map_vector_text_text\",\n\t\t\tcustom: prefix + \"SetType(\" + prefix + \"MapType(\" + prefix + \"VectorType(\" + prefix + \"Int32Type, 10),\" + prefix + \"UTF8Type))\",\n\t\t\texpected: CollectionType{\n\t\t\t\tNativeType: NativeType{typ: TypeSet},\n\t\t\t\tKey:        nil,\n\t\t\t\tElem: CollectionType{\n\t\t\t\t\tNativeType: NativeType{typ: TypeMap},\n\t\t\t\t\tKey: VectorType{\n\t\t\t\t\t\tNativeType: NativeType{typ: TypeCustom, custom: vectorTypePrefix},\n\t\t\t\t\t\tSubType:    NativeType{typ: TypeInt},\n\t\t\t\t\t\tDimensions: 10,\n\t\t\t\t\t},\n\t\t\t\t\tElem: NativeType{typ: TypeVarchar},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range testCases {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tf := newFramer(nil, 0)\n\t\t\tf.writeShort(0)\n\t\t\tf.writeString(fmt.Sprintf(\"%sVectorType(%s, 2)\", prefix, test.custom))\n\t\t\tparsedType := f.readTypeInfo()\n\t\t\trequire.IsType(t, parsedType, VectorType{})\n\t\t\tvectorType := parsedType.(VectorType)\n\t\t\ttests.AssertEqual(t, \"dimensions\", 2, vectorType.Dimensions)\n\t\t\ttests.AssertDeepEqual(t, \"vector\", test.expected, vectorType.SubType)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "version.go",
    "content": "/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport \"runtime/debug\"\n\nconst (\n\tmainPackage = \"github.com/gocql/gocql\"\n)\n\nvar defaultDriverVersion string\n\nfunc init() {\n\tbuildInfo, ok := debug.ReadBuildInfo()\n\tif ok {\n\t\tfor _, d := range buildInfo.Deps {\n\t\t\tif d.Path == mainPackage {\n\t\t\t\tdefaultDriverVersion = d.Version\n\t\t\t\tif d.Replace != nil {\n\t\t\t\t\tdefaultDriverVersion = d.Replace.Version\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "warning_handler.go",
    "content": "package gocql\n\ntype DefaultWarningHandler struct {\n\tlogger StdLogger\n}\n\nfunc DefaultWarningHandlerBuilder(session *Session) WarningHandler {\n\treturn DefaultWarningHandler{\n\t\tlogger: session.logger,\n\t}\n}\n\nfunc (d DefaultWarningHandler) HandleWarnings(qry ExecutableQuery, host *HostInfo, warnings []string) {\n\tif d.logger == nil {\n\t\treturn\n\t}\n\tif host != nil && !host.hostId.IsEmpty() {\n\t\td.logger.Printf(\"[%s] warnings: %v\", host.hostId.String(), warnings)\n\t} else {\n\t\td.logger.Printf(\"Cluster warnings: %v\", warnings)\n\t}\n}\n\nvar _ WarningHandler = DefaultWarningHandler{}\n\nfunc NoopWarningHandlerBuilder(session *Session) WarningHandler {\n\treturn nil\n}\n"
  },
  {
    "path": "wiki_test.go",
    "content": "//go:build integration\n// +build integration\n\n/*\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/*\n * Content before git sha 34fdeebefcbf183ed7f916f931aa0586fdaa1b40\n * Copyright (c) 2016, The Gocql authors,\n * provided under the BSD-3-Clause License.\n * See the NOTICE file distributed with this work for additional information.\n */\n\npackage gocql\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"gopkg.in/inf.v0\"\n)\n\ntype WikiPage struct {\n\tTitle       string\n\tRevId       UUID\n\tBody        string\n\tViews       int64\n\tProtected   bool\n\tModified    time.Time\n\tRating      *inf.Dec\n\tTags        []string\n\tAttachments map[string]WikiAttachment\n}\n\ntype WikiAttachment []byte\n\nvar wikiTestData = []*WikiPage{\n\t{\n\t\tTitle:    \"Frontpage\",\n\t\tRevId:    TimeUUID(),\n\t\tBody:     \"Welcome to this wiki page!\",\n\t\tRating:   inf.NewDec(131, 3),\n\t\tModified: time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC),\n\t\tTags:     []string{\"start\", \"important\", \"test\"},\n\t\tAttachments: map[string]WikiAttachment{\n\t\t\t\"logo\":    WikiAttachment(\"\\x00company logo\\x00\"),\n\t\t\t\"favicon\": WikiAttachment(\"favicon.ico\"),\n\t\t},\n\t},\n\t{\n\t\tTitle:    \"Foobar\",\n\t\tRevId:    TimeUUID(),\n\t\tBody:     \"foo::Foo f = new foo::Foo(foo::Foo::INIT);\",\n\t\tModified: time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC),\n\t},\n}\n\ntype WikiTest struct {\n\tsession *Session\n\ttb      testing.TB\n\n\ttable string\n}\n\nfunc CreateSchema(session *Session, tb testing.TB, table string) *WikiTest {\n\ttable = testTableName(tb, table)\n\tif err := createTable(session, fmt.Sprintf(\"DROP TABLE IF EXISTS gocql_test.%s\", table)); err != nil {\n\t\ttb.Fatal(\"CreateSchema:\", err)\n\t}\n\n\terr := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s (\n\t\t\ttitle       varchar,\n\t\t\trevid       timeuuid,\n\t\t\tbody        varchar,\n\t\t\tviews       bigint,\n\t\t\tprotected   boolean,\n\t\t\tmodified    timestamp,\n\t\t\trating      decimal,\n\t\t\ttags        set<varchar>,\n\t\t\tattachments map<varchar, blob>,\n\t\t\tPRIMARY KEY (title, revid)\n\t\t)`, table))\n\n\tif err != nil {\n\t\ttb.Fatal(\"CreateSchema:\", err)\n\t}\n\n\treturn &WikiTest{\n\t\tsession: session,\n\t\ttb:      tb,\n\t\ttable:   table,\n\t}\n}\n\nfunc (w *WikiTest) CreatePages(n int) {\n\tvar page WikiPage\n\tt0 := time.Now()\n\tfor i := 0; i < n; i++ {\n\t\tpage.Title = fmt.Sprintf(\"generated_%d\", (i&16)+1)\n\t\tpage.Modified = t0.Add(time.Duration(i-n) * time.Minute)\n\t\tpage.RevId = UUIDFromTime(page.Modified)\n\t\tpage.Body = fmt.Sprintf(\"text %d\", i)\n\t\tif err := w.InsertPage(&page); err != nil {\n\t\t\tw.tb.Error(\"CreatePages:\", err)\n\t\t}\n\t}\n}\n\nfunc (w *WikiTest) InsertPage(page *WikiPage) error {\n\treturn w.session.Query(fmt.Sprintf(`INSERT INTO %s\n\t\t(title, revid, body, views, protected, modified, rating, tags, attachments)\n\t\tVALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, w.table),\n\t\tpage.Title, page.RevId, page.Body, page.Views, page.Protected,\n\t\tpage.Modified, page.Rating, page.Tags, page.Attachments).Exec()\n}\n\nfunc (w *WikiTest) SelectPage(page *WikiPage, title string, revid UUID) error {\n\treturn w.session.Query(fmt.Sprintf(`SELECT title, revid, body, views, protected,\n\t\tmodified,tags, attachments, rating\n\t\tFROM %s WHERE title = ? AND revid = ? LIMIT 1`, w.table),\n\t\ttitle, revid).Scan(&page.Title, &page.RevId,\n\t\t&page.Body, &page.Views, &page.Protected, &page.Modified, &page.Tags,\n\t\t&page.Attachments, &page.Rating)\n}\n\nfunc (w *WikiTest) GetPageCount() int {\n\tvar count int\n\tif err := w.session.Query(fmt.Sprintf(`SELECT COUNT(*) FROM %s`, w.table)).Scan(&count); err != nil {\n\t\tw.tb.Error(\"GetPageCount\", err)\n\t}\n\treturn count\n}\n\nfunc TestWikiCreateSchema(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tCreateSchema(session, t, \"create\")\n}\n\nfunc BenchmarkWikiCreateSchema(b *testing.B) {\n\tb.StopTimer()\n\tsession := createSession(b)\n\tdefer func() {\n\t\tb.StopTimer()\n\t\tsession.Close()\n\t}()\n\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tCreateSchema(session, b, \"bench_create\")\n\t}\n}\n\nfunc TestWikiCreatePages(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tw := CreateSchema(session, t, \"create_pages\")\n\n\tnumPages := 5\n\tw.CreatePages(numPages)\n\tif count := w.GetPageCount(); count != numPages {\n\t\tt.Errorf(\"expected %d pages, got %d pages.\", numPages, count)\n\t}\n}\n\nfunc BenchmarkWikiCreatePages(b *testing.B) {\n\tb.StopTimer()\n\tsession := createSession(b)\n\tdefer func() {\n\t\tb.StopTimer()\n\t\tsession.Close()\n\t}()\n\n\tw := CreateSchema(session, b, \"bench_create_pages\")\n\n\tb.StartTimer()\n\n\tw.CreatePages(b.N)\n}\n\nfunc BenchmarkWikiSelectAllPages(b *testing.B) {\n\tb.StopTimer()\n\tsession := createSession(b)\n\tdefer func() {\n\t\tb.StopTimer()\n\t\tsession.Close()\n\t}()\n\tw := CreateSchema(session, b, \"bench_select_all\")\n\n\tw.CreatePages(100)\n\tb.StartTimer()\n\n\tvar page WikiPage\n\tfor i := 0; i < b.N; i++ {\n\t\titer := session.Query(fmt.Sprintf(`SELECT title, revid, body, views, protected,\n\t\t\tmodified, tags, attachments, rating\n\t\t\tFROM %s`, w.table)).Iter()\n\t\tfor iter.Scan(&page.Title, &page.RevId, &page.Body, &page.Views,\n\t\t\t&page.Protected, &page.Modified, &page.Tags, &page.Attachments,\n\t\t\t&page.Rating) {\n\t\t\t// pass\n\t\t}\n\t\tif err := iter.Close(); err != nil {\n\t\t\tb.Error(err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkWikiSelectSinglePage(b *testing.B) {\n\tb.StopTimer()\n\tsession := createSession(b)\n\tdefer func() {\n\t\tb.StopTimer()\n\t\tsession.Close()\n\t}()\n\tw := CreateSchema(session, b, \"bench_select_single\")\n\tpages := make([]WikiPage, 100)\n\tw.CreatePages(len(pages))\n\titer := session.Query(fmt.Sprintf(`SELECT title, revid FROM %s`, w.table)).Iter()\n\tfor i := 0; i < len(pages); i++ {\n\t\tif !iter.Scan(&pages[i].Title, &pages[i].RevId) {\n\t\t\tpages = pages[:i]\n\t\t\tbreak\n\t\t}\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tb.Error(err)\n\t}\n\tb.StartTimer()\n\n\tvar page WikiPage\n\tfor i := 0; i < b.N; i++ {\n\t\tp := &pages[i%len(pages)]\n\t\tif err := w.SelectPage(&page, p.Title, p.RevId); err != nil {\n\t\t\tb.Error(err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkWikiSelectPageCount(b *testing.B) {\n\tb.StopTimer()\n\tsession := createSession(b)\n\tdefer func() {\n\t\tb.StopTimer()\n\t\tsession.Close()\n\t}()\n\n\tw := CreateSchema(session, b, \"bench_page_count\")\n\tconst numPages = 10\n\tw.CreatePages(numPages)\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tif count := w.GetPageCount(); count != numPages {\n\t\t\tb.Errorf(\"expected %d pages, got %d pages.\", numPages, count)\n\t\t}\n\t}\n}\n\nfunc TestWikiTypicalCRUD(t *testing.T) {\n\tt.Parallel()\n\n\tsession := createSession(t)\n\tdefer session.Close()\n\n\tw := CreateSchema(session, t, \"crud\")\n\n\tfor _, page := range wikiTestData {\n\t\tif err := w.InsertPage(page); err != nil {\n\t\t\tt.Error(\"InsertPage:\", err)\n\t\t}\n\t}\n\tif count := w.GetPageCount(); count != len(wikiTestData) {\n\t\tt.Errorf(\"count: expected %d, got %d\\n\", len(wikiTestData), count)\n\t}\n\tfor _, original := range wikiTestData {\n\t\tpage := new(WikiPage)\n\t\tif err := w.SelectPage(page, original.Title, original.RevId); err != nil {\n\t\t\tt.Error(\"SelectPage:\", err)\n\t\t\tcontinue\n\t\t}\n\t\tsort.Sort(sort.StringSlice(page.Tags))\n\t\tsort.Sort(sort.StringSlice(original.Tags))\n\t\tif !reflect.DeepEqual(page, original) {\n\t\t\tt.Errorf(\"page: expected %#v, got %#v\\n\", original, page)\n\t\t}\n\t}\n}\n"
  }
]