[
  {
    "path": ".clang-format",
    "content": "# This .clang-format was originally written by Paul Ganssle\n# Its exact license is unknown but compatible with MIT\n# https://gist.github.com/pganssle/0e3a5f828b4d07d79447f6ced8e7e4db\n\n# A clang-format style that approximates Python's PEP 7\n# Useful for IDE integration\nBasedOnStyle: Google\nAlwaysBreakAfterReturnType: All\nAllowShortIfStatementsOnASingleLine: false\nAlignAfterOpenBracket: Align\nBreakBeforeBraces: Stroustrup\nColumnLimit: 79\nDerivePointerAlignment: false\nIndentWidth: 4\nLanguage: Cpp\nPointerAlignment: Right\nReflowComments: true\nSpaceBeforeParens: ControlStatements\nSpacesInParentheses: false\nTabWidth: 4\nUseTab: Never\n"
  },
  {
    "path": ".github/actionlint.yml",
    "content": "# As of March 5, 2026, actionlint via super-linter 8.5.0 does not support macOS 26, so we ignore the runner-label warning for now.\npaths:\n  .github/workflows/**/*.{yml,yaml}:\n    ignore:\n      - 'label \"macos-26\" is unknown.+'\n      - 'label \"macos-26-intel\" is unknown.+'\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  - package-ecosystem: \"github-actions\"\n    directory: \"/\"\n    schedule:\n      interval: \"weekly\"\n      day: \"monday\"\n    open-pull-requests-limit: 5\n    groups:\n      dependencies:\n        patterns:\n          - \"*\"\n    cooldown:\n      default-days: 7\n    commit-message:\n      prefix: \"chore(ci)\"\n  - package-ecosystem: \"pip\"\n    directory: \"/\"\n    schedule:\n      interval: \"weekly\"\n      day: \"monday\"\n    open-pull-requests-limit: 5\n    groups:\n      dependencies:\n        patterns:\n          - \"*\"\n    cooldown:\n      default-days: 7\n    commit-message:\n      prefix: \"chore\"\n      include: \"scope\"\n"
  },
  {
    "path": ".github/workflows/benchmark-base-hash.yml",
    "content": "---\nname: Benchmark Base Hash\n\non:\n  workflow_dispatch:\n\npermissions: {}\n\njobs:\n  benchmark:\n    permissions:\n      contents: read\n      packages: read\n    runs-on: ubuntu-24.04\n    env:\n      BENCHMARK_MAX_SIZE: 65536\n    steps:\n      - name: Checkout\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n      - name: Set up Python\n        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0\n        with:\n          python-version: \"3.14\"\n      - name: Install dependencies\n        run: |\n          pip install --upgrade pip\n          pip install .\n          pip install \".[benchmark]\"\n      - name: Tune the system for benchmarking\n        run: |\n          echo \"Running \\\"lscpu -a -e\\\"...\"\n          lscpu -a -e\n\n          echo -n \"Checking randomize_va_space: \"\n          cat /proc/sys/kernel/randomize_va_space\n          echo \"randomize_va_space should be 2, meaning ASLR is fully enabled.\"\n\n          systemctl status irqbalance\n          echo \"Stopping irqbalance...\"\n          sudo systemctl stop irqbalance\n\n          echo -n \"Checking default_smp_affinity: \"\n          cat /proc/irq/default_smp_affinity\n          echo 3 | sudo tee /proc/irq/default_smp_affinity > /dev/null\n          echo -n \"Updated default_smp_affinity to: \"\n          cat /proc/irq/default_smp_affinity\n\n          echo -n \"Checking perf_event_max_sample_rate: \"\n          cat /proc/sys/kernel/perf_event_max_sample_rate\n          echo 1 | sudo tee /proc/sys/kernel/perf_event_max_sample_rate > /dev/null\n          echo -n \"Updated perf_event_max_sample_rate to: \"\n          cat /proc/sys/kernel/perf_event_max_sample_rate\n      - name: Benchmark hash functions\n        run: |\n          mkdir var\n          taskset -c 2,3 python benchmark/benchmark.py \\\n            -o var/mmh3_base_hash_500.json \\\n            --test-hash mmh3_base_hash \\\n            --test-buffer-size-max \"$BENCHMARK_MAX_SIZE\"\n          taskset -c 2,3 python benchmark/benchmark.py \\\n            -o var/mmh3_32_500.json \\\n            --test-hash mmh3_32 \\\n            --test-buffer-size-max \"$BENCHMARK_MAX_SIZE\"\n          pip uninstall -y mmh3\n          pip install mmh3==4.1.0\n          taskset -c 2,3 python benchmark/benchmark.py \\\n            -o var/mmh3_base_hash_410.json \\\n            --test-hash mmh3_base_hash \\\n            --test-buffer-size-max \"$BENCHMARK_MAX_SIZE\"\n      - name: Reset the system from benchmarking\n        run: |\n          echo -n \"Checking perf_event_max_sample_rate: \"\n          cat /proc/sys/kernel/perf_event_max_sample_rate\n          echo 100000 | sudo tee /proc/sys/kernel/perf_event_max_sample_rate > /dev/null\n          echo -n \"Updated perf_event_max_sample_rate to: \"\n          cat /proc/sys/kernel/perf_event_max_sample_rate\n\n          echo -n \"Checking default_smp_affinity: \"\n          cat /proc/irq/default_smp_affinity\n          echo f | sudo tee /proc/irq/default_smp_affinity > /dev/null\n          echo -n \"Updated default_smp_affinity to: \"\n          cat /proc/irq/default_smp_affinity\n\n          echo \"Restarting irqbalance...\"\n          sudo systemctl restart irqbalance\n          systemctl status irqbalance\n      - name: Upload artifacts\n        uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0\n        with:\n          name: benchmark-results\n          path: var\n"
  },
  {
    "path": ".github/workflows/benchmark.yml",
    "content": "---\nname: Benchmark\n\non:\n  workflow_dispatch:\n\npermissions: {}\n\njobs:\n  benchmark:\n    permissions:\n      contents: read\n      packages: read\n    runs-on: ubuntu-24.04\n    env:\n      BENCHMARK_MAX_SIZE: 262144\n    steps:\n      - name: Checkout\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n      - name: Set up Python\n        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0\n        with:\n          python-version: \"3.14\"\n      - name: Install dependencies\n        run: |\n          pip install --upgrade pip\n          pip install .\n          pip install \".[benchmark]\"\n      - name: Tune the system for benchmarking\n        run: |\n          echo \"Running \\\"lscpu -a -e\\\"...\"\n          lscpu -a -e\n\n          echo -n \"Checking randomize_va_space: \"\n          cat /proc/sys/kernel/randomize_va_space\n          echo \"randomize_va_space should be 2, meaning ASLR is fully enabled.\"\n\n          systemctl status irqbalance\n          echo \"Stopping irqbalance...\"\n          sudo systemctl stop irqbalance\n\n          echo -n \"Checking default_smp_affinity: \"\n          cat /proc/irq/default_smp_affinity\n          echo 3 | sudo tee /proc/irq/default_smp_affinity > /dev/null\n          echo -n \"Updated default_smp_affinity to: \"\n          cat /proc/irq/default_smp_affinity\n\n          echo -n \"Checking perf_event_max_sample_rate: \"\n          cat /proc/sys/kernel/perf_event_max_sample_rate\n          echo 1 | sudo tee /proc/sys/kernel/perf_event_max_sample_rate > /dev/null\n          echo -n \"Updated perf_event_max_sample_rate to: \"\n          cat /proc/sys/kernel/perf_event_max_sample_rate\n      - name: Benchmark hash functions\n        run: |\n          mkdir var\n          declare -a hash_list=(\"mmh3_32\" \"mmh3_128\" \"xxh_32\" \"xxh_64\" \\\n             \"xxh3_64\" \"xxh3_128\" \"md5\" \"sha1\")\n          for hash_name in \"${hash_list[@]}\"; do\n            echo \"${hash_name}\"\n            taskset -c 2,3 python benchmark/benchmark.py \\\n              -o var/\"${hash_name}\".json \\\n              --test-hash \"${hash_name}\" \\\n              --test-buffer-size-max \"$BENCHMARK_MAX_SIZE\"\n          done\n      - name: Reset the system from benchmarking\n        run: |\n          echo -n \"Checking perf_event_max_sample_rate: \"\n          cat /proc/sys/kernel/perf_event_max_sample_rate\n          echo 100000 | sudo tee /proc/sys/kernel/perf_event_max_sample_rate > /dev/null\n          echo -n \"Updated perf_event_max_sample_rate to: \"\n          cat /proc/sys/kernel/perf_event_max_sample_rate\n\n          echo -n \"Checking default_smp_affinity: \"\n          cat /proc/irq/default_smp_affinity\n          echo f | sudo tee /proc/irq/default_smp_affinity > /dev/null\n          echo -n \"Updated default_smp_affinity to: \"\n          cat /proc/irq/default_smp_affinity\n\n          echo \"Restarting irqbalance...\"\n          sudo systemctl restart irqbalance\n          systemctl status irqbalance\n      - name: Upload artifacts\n        uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0\n        with:\n          name: benchmark-results\n          path: var\n"
  },
  {
    "path": ".github/workflows/build.yml",
    "content": "# This workflow is intended for quick building tests.\n# Use wheels.yml for complete building/uploading tests.\n---\nname: Build\n\non: # yamllint disable-line rule:truthy\n  push:\n    branches: \"**\"\n  pull_request:\n    types:\n      - opened\n      - synchronize\n      - reopened\n\npermissions: {}\n\njobs:\n  build:\n    permissions:\n      contents: read\n      packages: read\n\n    strategy:\n      matrix:\n        os: [macos-26, windows-2025, ubuntu-24.04]\n        python-version: [\"3.10\", \"3.11\", \"3.12\", \"3.13\", \"3.14\", \"3.14t\"]\n\n    runs-on: ${{ matrix.os }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n      - name: Set up Python ${{ matrix.python-version }}\n        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0\n        with:\n          python-version: ${{ matrix.python-version }}\n      - name: Install dependencies\n        run: |\n          python -m pip install --upgrade pip\n          pip install setuptools build\n          pip install .\n          pip install \".[test,type]\"\n      - name: Test with pytest\n        run: python -m pytest\n      - name: Test type hints with mypy\n        run: mypy --strict tests\n      - name: Test building from the source distribution\n        shell: bash\n        run: |\n          pip uninstall -y mmh3\n          python -m build --sdist\n          python -m pip install dist/*.tar.gz\n          python -m pytest\n          mypy --strict tests\n"
  },
  {
    "path": ".github/workflows/draft-pdf.yml",
    "content": "---\nname: Draft Paper\n\non:\n  push:\n    branches:\n      - paper\n  workflow_dispatch:\n\npermissions: {}\n\njobs:\n  paper:\n    permissions:\n      contents: read\n      packages: read\n\n    runs-on: ubuntu-latest\n\n    name: Paper Draft\n    if: github.event_name == 'workflow_dispatch' && github.ref == 'refs/heads/master'\n    steps:\n      - name: Checkout\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n      - name: Build draft PDF\n        uses: openjournals/openjournals-draft-action@85a18372e48f551d8af9ddb7a747de685fbbb01c # v1.0\n        with:\n          journal: joss\n          # This should be the path to the paper within your repo.\n          paper-path: paper/paper.md\n      - name: Upload\n        uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0\n        with:\n          name: paper\n          # This is the output path where Pandoc will write the compiled\n          # PDF. Note, this should be the same directory as the input\n          # paper.md\n          path: paper/paper.pdf\n"
  },
  {
    "path": ".github/workflows/superlinter.yml",
    "content": "---\nname: Super-Linter\n\non: # yamllint disable-line rule:truthy\n  push:\n    branches: \"**\"\n  pull_request:\n    types:\n      - opened\n      - synchronize\n      - reopened\n\npermissions: {}\n\njobs:\n  # Set the job key. The key is displayed as the job name\n  # when a job name is not provided\n  super-lint:\n    # Name the Job\n    name: Lint code base\n    # Set the type of machine to run on\n    runs-on: ubuntu-latest\n\n    permissions:\n      contents: read\n      packages: read\n      # To report GitHub Actions status checks\n      statuses: write\n\n    steps:\n      # Checks out a copy of your repository on the ubuntu-latest machine\n      - name: Checkout code\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n          # super-linter needs the full git history to get the\n          # list of files that changed across commits\n          fetch-depth: 0\n\n      # Runs the Super-Linter action\n      - name: Run Super-Linter\n        uses: super-linter/super-linter@61abc07d755095a68f4987d1c2c3d1d64408f1f9 # v8.5.0\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n          LINTER_RULES_PATH: /\n          GITHUB_ACTIONS_CONFIG_FILE: .github/actionlint.yml\n          PYTHON_PYLINT_CONFIG_FILE: pyproject.toml\n          PYTHON_RUFF_CONFIG_FILE: pyproject.toml\n          PYTHON_RUFF_FORMAT_CONFIG_FILE: pyproject.toml\n          # Suppressed because it conflicts with clang-format in some cases\n          VALIDATE_CPP: false\n          # Suppressed because copy/paste is sometimes required at low level\n          VALIDATE_JSCPD: false\n          # Suppressed in favor of Ruff\n          VALIDATE_PYTHON_BLACK: false\n          VALIDATE_PYTHON_FLAKE8: false\n          VALIDATE_PYTHON_ISORT: false\n          # Suppressed because it even accuses book titles\n          VALIDATE_NATURAL_LANGUAGE: false\n          # Suppressed because it does not honor the ignore-paths option\n          VALIDATE_PYTHON_PYLINT: false\n      # super-linter 7 does not honor the ignore-paths option of pylint\n      # so we run pylint separately\n      - name: Set up Python\n        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0\n        with:\n          python-version: \"3.14\"\n      - name: Run pylint\n        run: |\n          pip install pylint\n          pylint --recursive=y .\n"
  },
  {
    "path": ".github/workflows/wheels.yml",
    "content": "---\nname: Wheel-Builder\n\non:\n  push:\n    tags:\n      - \"v*.*.*\"\n  workflow_dispatch:\n\npermissions: {}\n\njobs:\n  build_wheels:\n    name: Build wheel for ${{ matrix.platform }} ${{ matrix.archs }} ${{ matrix.build }} (runs on ${{ matrix.os }})\n    runs-on: ${{ matrix.os }}\n    strategy:\n      matrix:\n        os: [ubuntu-24.04]\n        archs: [x86_64, i686, aarch64, ppc64le, s390x]\n        build: [manylinux, musllinux]\n        include:\n          - os: windows-2025\n            archs: AMD64\n          - os: windows-2025\n            archs: x86\n          - os: windows-2025\n            archs: ARM64\n          - os: macos-26-intel\n            archs: x86_64\n          - os: macos-26\n            archs: arm64\n          - os: macos-26\n            archs: universal2\n          - os: ubuntu-24.04\n            platform: android\n            archs: x86_64\n            build: android\n          - os: macos-26\n            platform: android\n            archs: arm64_v8a\n            build: android\n          - os: macos-26\n            platform: ios\n            archs: arm64_iphoneos\n          - os: macos-26\n            platform: ios\n            archs: arm64_iphonesimulator\n          - os: macos-26-intel\n            platform: ios\n            archs: x86_64_iphonesimulator\n    steps:\n      - name: Checkout\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n      - name: Set up Python\n        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0\n        with:\n          python-version: \"3.14\"\n      - name: Set dev version for TestPyPI\n        if: github.event_name == 'workflow_dispatch'\n        shell: python\n        run: |\n          import re, datetime\n          timestamp = datetime.datetime.now(datetime.timezone.utc).strftime(\"%Y%m%d%H%M\")\n          text = open(\"pyproject.toml\", encoding=\"utf-8\").read()\n          m = re.search(r'version\\s*=\\s*\"(.+?)\"', text)\n          if not m:\n            raise RuntimeError(\"version field not found in pyproject.toml\")\n          version = m.group(1)\n          base_version = version.split(\"-\")[0]\n          new_text = re.sub(\n            r'version\\s*=\\s*\".*?\"',\n            f'version = \"{base_version}.dev{timestamp}\"',\n            text,\n            count=1\n          )\n          open(\"pyproject.toml\", \"w\", encoding=\"utf-8\").write(new_text)\n      - name: Set up QEMU\n        if: runner.os == 'Linux' && matrix.platform != 'android'\n        uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0\n      # https://github.blog/changelog/2024-04-02-github-actions-hardware-accelerated-android-virtualization-now-available/\n      - name: Set up KVM for Android emulation\n        if: runner.os == 'Linux' && matrix.platform == 'android'\n        run: |\n          echo 'KERNEL==\"kvm\", GROUP=\"kvm\", MODE=\"0666\", OPTIONS+=\"static_node=kvm\"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules\n          sudo udevadm control --reload-rules\n          sudo udevadm trigger --name-match=kvm\n      - name: Build wheels\n        uses: pypa/cibuildwheel@ee02a1537ce3071a004a6b08c41e72f0fdc42d9a # v3.4.0\n        with:\n          output-dir: wheelhouse\n        env:\n          CIBW_BUILD: \"{cp310,cp311,cp312,cp313,cp314,cp314t}-${{ matrix.build }}*\"\n          CIBW_PLATFORM: ${{ matrix.platform || 'auto' }}\n          CIBW_ARCHS: ${{ matrix.archs }}\n          CIBW_BUILD_FRONTEND: \"build\"\n          CIBW_TEST_REQUIRES: \"pytest\"\n          CIBW_TEST_SOURCES_ANDROID: \"./tests\"\n          CIBW_TEST_SOURCES_IOS: \"./tests\"\n          CIBW_TEST_COMMAND: \"pytest {project}\"\n          CIBW_TEST_COMMAND_ANDROID: \"python -m pytest ./tests\"\n          CIBW_TEST_COMMAND_IOS: \"python -m pytest ./tests\"\n          CIBW_TEST_SKIP: \"*-win_arm64 *-android_arm64_v8a\"\n      - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0\n        with:\n          name: Wheel-${{ matrix.os }}-${{ matrix.platform }}-${{ matrix.build }}${{ matrix.archs }}\n          path: ./wheelhouse/*.whl\n  build_sdist:\n    name: Build a source distribution\n    runs-on: ubuntu-24.04\n    steps:\n      - name: Checkout\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n      - name: Set up Python\n        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0\n        with:\n          python-version: \"3.14\"\n      - name: Set dev version for TestPyPI\n        if: github.event_name == 'workflow_dispatch'\n        shell: python\n        run: |\n          import re, datetime\n          timestamp = datetime.datetime.now(datetime.timezone.utc).strftime(\"%Y%m%d%H%M\")\n          text = open(\"pyproject.toml\", encoding=\"utf-8\").read()\n          m = re.search(r'version\\s*=\\s*\"(.+?)\"', text)\n          if not m:\n            raise RuntimeError(\"version field not found in pyproject.toml\")\n          version = m.group(1)\n          base_version = version.split(\"-\")[0]\n          new_text = re.sub(\n            r'version\\s*=\\s*\".*?\"',\n            f'version = \"{base_version}.dev{timestamp}\"',\n            text,\n            count=1\n          )\n          open(\"pyproject.toml\", \"w\", encoding=\"utf-8\").write(new_text)\n      - name: Build sdist\n        run: |\n          python -m pip install --upgrade pip\n          pip install setuptools build\n          python -m build --sdist\n      - name: Test building from the source distribution\n        shell: bash\n        run: |\n          pip install \".[test,type]\"\n          pip uninstall -y mmh3\n          python -m pip install dist/*.tar.gz\n          python -m pytest\n          mypy --strict tests\n      - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0\n        with:\n          path: dist/*.tar.gz\n  publish-to-pypi:\n    name: \"Publish artifacts to PyPI\"\n    if: startsWith(github.ref, 'refs/tags/')\n    needs: [build_wheels, build_sdist]\n    runs-on: ubuntu-24.04\n    environment:\n      name: pypi\n      url: https://pypi.org/p/mmh3\n    permissions:\n      id-token: write # IMPORTANT: this permission is mandatory for trusted publishing\n    steps:\n      - name: Set up built items\n        uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0\n        with:\n          path: dist\n          merge-multiple: true\n      - name: Publish package distributions to PyPI\n        uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0\n  publish-to-testpypi:\n    name: \"Publish artifacts to TestPyPI\"\n    if: github.event_name == 'workflow_dispatch' && github.ref == 'refs/heads/master'\n    needs: [build_wheels, build_sdist]\n    runs-on: ubuntu-24.04\n    environment:\n      name: testpypi\n      url: https://test.pypi.org/p/mmh3\n    permissions:\n      id-token: write # IMPORTANT: this permission is mandatory for trusted publishing\n    steps:\n      - name: Set up built items\n        uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0\n        with:\n          path: dist\n          merge-multiple: true\n      - name: Publish package distributions to TestPyPI\n        uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0\n        with:\n          repository-url: https://test.pypi.org/legacy/\n"
  },
  {
    "path": ".gitignore",
    "content": "# From https://github.com/github/gitignore\n# CC0 1.0 Universal\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# pytest\n.pytest_cache/\n\n# mypy\n.mypy_cache/\n\n# macOS\n.DS_Store\n\n# vscode\n.vscode/\n\n# Directory from an external source created by git submodule update --init\n# Adding this path is useful for other tools like markdwonlint and prettier\nutil/smhasher/\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"src/mmh3/_mmh3/smhasher\"]\n\tpath = util/smhasher\n\turl = https://github.com/aappleby/smhasher.git\n"
  },
  {
    "path": ".markdown-lint.yml",
    "content": "# MD024/no-duplicate-heading : Multiple headings with the same content : https://github.com/DavidAnson/markdownlint/blob/v0.34.0/doc/md024.md\nMD024:\n  # Only check sibling headings (default is false)\n  # Set to true to conform to the Keep a Changelog format\n  # See also https://github.com/olivierlacan/keep-a-changelog/issues/274#issuecomment-484065486\n  siblings_only: true\n"
  },
  {
    "path": ".readthedocs.yml",
    "content": "# Read the Docs configuration file for MkDocs projects\n# See https://docs.readthedocs.io/en/stable/config-file/v2.html for detail\n\n# Required\nversion: 2\n\n# Set the version of Python and other tools you might need\nbuild:\n  os: ubuntu-22.04\n  tools:\n    python: \"3.12\"\n\nsphinx:\n  builder: html\n  configuration: docs/conf.py\n\n# Build all formats\nformats:\n  - pdf\n  - epub\n\n# Optionally declare the Python requirements required to build your docs\npython:\n  install:\n    - method: pip\n      path: .\n      extra_requirements:\n        - docs\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented here. For a list of\ncontributors, see the\n[Contributors](https://mmh3.readthedocs.io/en/stable/CONTRIBUTORS.html) page.\n\nThe format is based on\n[Keep a Changelog 1.1.0](https://keepachangelog.com/en/1.1.0/).\nThis project has adhered to\n[Semantic Versioning 2.0.0](https://semver.org/spec/v2.0.0.html)\nsince version 3.0.0.\n\n## [5.2.1] - 2026-03-06\n\n### Added\n\n- Add support for the Android wheel for Python 3.14.\n\n### Removed\n\n- Drop support for Python 3.9, as it has reached the end of life on 2025-10-31.\n\n## [5.2.0] - 2025-07-29\n\n### Added\n\n- Add support for Python 3.14, including 3.14t (no-GIL) wheels. However, thread\n  safety for the no-GIL variant is not fully tested yet. Please report any\n  issues you encounter ([#134](https://github.com/hajimes/mmh3/pull/134),\n  [#136](https://github.com/hajimes/mmh3/pull/136)).\n- Add support for Android (Python 3.13 only) and iOS (Python 3.13 and 3.14) wheels,\n  enabled by the major version update of\n  [cibuildwheel](https://github.com/pypa/cibuildwheel)\n  ([#135](https://github.com/hajimes/mmh3/pull/135)).\n\n## [5.1.0] - 2025-01-25\n\n### Added\n\n- Improve the performance of `hash128()`, `hash64()`, and `hash_bytes()` by\n  using\n  [METH_FASTCALL](https://docs.python.org/3/c-api/structures.html#c.METH_FASTCALL),\n  reducing the overhead of function calls\n  ([#116](https://github.com/hajimes/mmh3/pull/116)).\n- Add the software paper for this library\n  ([doi:10.21105/joss.06124](https://doi.org/10.21105/joss.06124)), following\n  its publication in the\n  [_Journal of Open Source Software_](https://joss.theoj.org)\n  ([#118](https://github.com/hajimes/mmh3/pull/118)).\n\n### Removed\n\n- Drop support for Python 3.8, as it has reached the end of life on 2024-10-07\n  ([#117](https://github.com/hajimes/mmh3/pull/117)).\n\n## [5.0.1] - 2024-09-22\n\n### Fixed\n\n- Fix the issue that the package cannot be built from the source distribution\n  ([#90](https://github.com/hajimes/mmh3/issues/90)).\n\n## [5.0.0] - 2024-09-18\n\n### Added\n\n- Add support for Python 3.13.\n- Improve the performance of the `hash()` function with\n  [METH_FASTCALL](https://docs.python.org/3/c-api/structures.html#c.METH_FASTCALL),\n  reducing the overhead of function calls. For data sizes between 1–2 KB\n  (e.g., 48x48 favicons), performance is 10%–20% faster. For smaller data\n  (~500 bytes, like 16x16 favicons), performance increases by approximately 30%\n  ([#87](https://github.com/hajimes/mmh3/pull/87)).\n- Add `digest` functions that support the new buffer protocol\n  ([PEP 688](https://peps.python.org/pep-0688/)) as input\n  ([#75](https://github.com/hajimes/mmh3/pull/75)).\n  These functions are implemented with `METH_FASTCALL` too, offering improved\n  performance ([#84](https://github.com/hajimes/mmh3/pull/84)).\n- Slightly improve the performance of the `hash_bytes()` function\n  ([#88](https://github.com/hajimes/mmh3/pull/88))\n- Add Read the Docs documentation\n  ([#54](https://github.com/hajimes/mmh3/issues/54)).\n- Document benchmark results\n  ([#53](https://github.com/hajimes/mmh3/issues/53)).\n\n### Changed\n\n- **Backward-incompatible**: The `seed` argument is now strictly validated to\n  ensure it falls within the range [0, 0xFFFFFFFF]. A `ValueError` is raised\n  if the seed is out of range ([#84](https://github.com/hajimes/mmh3/pull/84)).\n- **Backward-incompatible**: Change the constructors of hasher classes to\n  accept a buffer as the first argument\n  ([#83](https://github.com/hajimes/mmh3/pull/83)).\n- The type of flag arguments has been changed from `bool` to `Any`\n  ([#84](https://github.com/hajimes/mmh3/pull/84)).\n- Change the format of CHANGELOG.md to conform to the\n  [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) standard\n  ([#63](https://github.com/hajimes/mmh3/pull/63)).\n\n### Deprecated\n\n- Deprecate the `hash_from_buffer()` function.\n  Use `mmh3_32_sintdigest()` or `mmh3_32_uintdigest()` as alternatives\n  ([#84](https://github.com/hajimes/mmh3/pull/84)).\n\n### Fixed\n\n- Fix a reference leak in the `hash_from_buffer()` function\n  ([#75](https://github.com/hajimes/mmh3/pull/75)).\n- Fix type hints ([#76](https://github.com/hajimes/mmh3/pull/76),\n  [#77](https://github.com/hajimes/mmh3/pull/77),\n  [#84](https://github.com/hajimes/mmh3/pull/84)).\n\n## [4.1.0] - 2024-01-09\n\n### Added\n\n- Add support for Python 3.12.\n\n### Fixed\n\n- Fix issues with Bazel by changing the directory structure of the project\n  ([#50](https://github.com/hajimes/mmh3/issues/50)).\n- Fix incorrect type hints ([#51](https://github.com/hajimes/mmh3/issues/51)).\n- Fix invalid results on s390x when the arg `x64arch` of `hash64` or\n  `hash_bytes()` is set to `False`\n  ([#52](https://github.com/hajimes/mmh3/issues/52)).\n\n## [4.0.1] - 2023-07-14\n\n### Changed\n\n- Refactor the project structure\n  ([#48](https://github.com/hajimes/mmh3/issues/48)).\n\n### Fixed\n\n- Fix incorrect type hints.\n\n## [4.0.0] - 2023-05-22\n\n### Added\n\n- Add experimental support for `hashlib`-compliant hasher classes\n  ([#39](https://github.com/hajimes/mmh3/issues/39)). Note that they are not yet\n  fully tuned for performance.\n- Add support for type hints ([#44](https://github.com/hajimes/mmh3/issues/44)).\n- Add wheels for more platforms (`musllinux`, `s390x`, `win_arm64`, and\n  `macosx_universal2`).\n- Add a code of conduct (the ACM Code of Ethics and Professional Conduct).\n\n### Changed\n\n- Switch license from CC0 to MIT\n  ([#43](https://github.com/hajimes/mmh3/issues/43)).\n\n### Removed\n\n- **Backward-incompatible**: A hash function now returns the same value under\n  big-endian platforms as that under little-endian ones\n  ([#47](https://github.com/hajimes/mmh3/issues/47)).\n- **Backward-incompatible**: Remove the `__version__` constant from the module\n  ([#42](https://github.com/hajimes/mmh3/issues/42)). Use `importlib.metadata`\n  instead.\n- Drop support for Python 3.7, as it will reach the end of life on 2023-06-27.\n\n## [3.1.0] - 2023-03-24\n\n### Added\n\n- Add support for Python 3.10 and 3.11\n  ([#35](https://github.com/hajimes/mmh3/pull/35),\n  [#37](https://github.com/hajimes/mmh3/pull/37)).\n- Add support for 32-bit architectures such as `i686` and `armv7l`. From now on,\n  `hash()` and `hash_from_buffer()` on these architectures will generate the\n  same hash values as those on other environments\n  ([#40](https://github.com/hajimes/mmh3/pull/40)).\n- In relation to the above, `manylinux2014_i686` wheels are now available.\n- Support for hashing huge data (>16GB)\n  ([#34](https://github.com/hajimes/mmh3/pull/34)).\n\n### Removed\n\n- Drop support for Python 3.6; remove legacy code for Python 2.x at the source\n  code level.\n\n## [3.0.0] - 2021-02-23\n\n### Added\n\n- Python wheels are now available, thanks to the power of\n  [cibuildwheel](https://github.com/joerick/cibuildwheel).\n  - Supported platforms are `manylinux1_x86_64`, `manylinux2010_x86_64`,\n    `manylinux2014_aarch64`, `win32`, `win_amd64`, `macosx_10_9_x86_64`, and\n    `macosx_11_0_arm64` (Apple Silicon).\n- Add support for newer macOS environments\n  ([#22](https://github.com/hajimes/mmh3/pull/22)).\n- Add support for Python 3.7, 3.8, and 3.9.\n\n### Changed\n\n- Migrate CI from Travis CI and AppVeyor to GitHub Actions.\n\n### Removed\n\n- Drop support for Python 2.7, 3.3, 3.4, and 3.5.\n\n## [2.5.1] - 2017-10-31\n\n### Fixed\n\n- Bugfix for `hash_bytes()` ([#15](https://github.com/hajimes/mmh3/pull/15)).\n\n## [2.5] - 2017-10-28\n\n### Added\n\n- Add `hash_from_buffer()` ([#13](https://github.com/hajimes/mmh3/pull/13)).\n- Add a keyword argument `signed`.\n\n## [2.4] - 2017-05-27\n\n### Added\n\n- Support seeds with 32-bit unsigned integers\n  ([#6](https://github.com/hajimes/mmh3/pull/6)).\n- Support 64-bit data (under 64-bit environments)\n- Add unit testing and continuous integration with Travis CI and AppVeyor.\n\n### Fixed\n\n- Fix compile errors for Python 3.6 under Windows systems.\n\n## [2.3.2] - 2017-05-26\n\n### Changed\n\n- Relicensed from public domain to CC0-1.0.\n\n## [2.3.1] - 2015-06-07\n\n### Fixed\n\n- Fix compile errors for gcc >=5.\n\n## [2.3] - 2013-12-08\n\n### Added\n\n- Add `hash128()`, which returns a 128-bit signed integer\n  ([#3](https://github.com/hajimes/mmh3/pull/3)).\n\n### Fixed\n\n- Fix a misplaced operator which could cause memory leak in a rare condition\n  ([#2](https://github.com/hajimes/mmh3/pull/2)).\n- Fix a malformed value to a Python/C API function which may cause runtime\n  errors in recent Python 3.x versions.\n\n## [2.2] - 2013-03-03\n\n### Added\n\n- Improve portability to support systems with old gcc (version < 4.4) such as\n  CentOS/RHEL 5.x\n  ([#1](https://github.com/hajimes/mmh3/pull/1)).\n\n## [2.1] - 2013-02-25\n\n### Added\n\n- Add `__version__` constant. Check if it exists when the following revision\n  matters for your application.\n\n### Changed\n\n- Incorporate the revision r147, which includes robustness improvement and minor\n  tweaks.\n\nBeware that due to this revision, **the result of 32-bit version of 2.1 is NOT\nthe same as that of 2.0**. E.g.,:\n\n```pycon\n>>> mmh3.hash(\"foo\") # in mmh3 2.0\n-292180858\n>>> mmh3.hash(\"foo\") # in mmh3 2.1\n-156908512\n```\n\nThe results of hash64 and hash_bytes remain unchanged. Austin Appleby, the\nauthor of Murmurhash, ensured this revision was the final modification to\nMurmurHash3's results and any future changes would be to improve performance\nonly.\n\n## [2.0] - 2011-06-07\n\n### Added\n\n- Support both Python 2.7 and 3.x.\n\n### Changed\n\n- Change the module interface.\n\n## [1.0] - 2011-04-27\n\n### Added\n\n- As\n  [Softpedia collected mmh3 1.0 on April 27, 2011](https://web.archive.org/web/20110430172027/https://linux.softpedia.com/get/Programming/Libraries/mmh3-68314.shtml),\n  it must have been uploaded to PyPI on or slightly before this date.\n\n[5.2.1]: https://github.com/hajimes/mmh3/compare/v5.2.0...v5.2.1\n[5.2.0]: https://github.com/hajimes/mmh3/compare/v5.1.0...v5.2.0\n[5.1.0]: https://github.com/hajimes/mmh3/compare/v5.0.1...v5.1.0\n[5.0.1]: https://github.com/hajimes/mmh3/compare/v5.0.0...v5.0.1\n[5.0.0]: https://github.com/hajimes/mmh3/compare/v4.1.0...v5.0.0\n[4.1.0]: https://github.com/hajimes/mmh3/compare/v4.0.1...v4.1.0\n[4.0.1]: https://github.com/hajimes/mmh3/compare/v4.0.0...v4.0.1\n[4.0.0]: https://github.com/hajimes/mmh3/compare/v3.1.0...v4.0.0\n[3.1.0]: https://github.com/hajimes/mmh3/compare/v3.0.0...v3.1.0\n[3.0.0]: https://github.com/hajimes/mmh3/compare/v2.5.1...v3.0.0\n[2.5.1]: https://github.com/hajimes/mmh3/compare/v2.5...v2.5.1\n[2.5]: https://github.com/hajimes/mmh3/compare/v2.4...v2.5\n[2.4]: https://github.com/hajimes/mmh3/compare/v2.3.2...v2.4\n[2.3.2]: https://github.com/hajimes/mmh3/compare/v2.3.1...v2.3.2\n[2.3.1]: https://github.com/hajimes/mmh3/compare/v2.3...v2.3.1\n[2.3]: https://github.com/hajimes/mmh3/compare/v2.2...v2.3\n[2.2]: https://github.com/hajimes/mmh3/compare/v2.1...v2.2\n[2.1]: https://github.com/hajimes/mmh3/compare/v2.0...v2.1\n[2.0]: https://github.com/hajimes/mmh3/releases/tag/v2.0\n[1.0]: https://web.archive.org/web/20110430172027/https://linux.softpedia.com/get/Programming/Libraries/mmh3-68314.shtml\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2011-2026 Hajime Senuma\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE."
  },
  {
    "path": "README.md",
    "content": "# mmh3\n\n[![Documentation Status](https://readthedocs.org/projects/mmh3/badge/?version=stable)](https://mmh3.readthedocs.io/en/stable/)\n[![GitHub Super-Linter](https://github.com/hajimes/mmh3/actions/workflows/superlinter.yml/badge.svg?branch=master)](https://github.com/hajimes/mmh3/actions?query=workflow%3ASuper-Linter+branch%3Amaster)\n[![Build](https://github.com/hajimes/mmh3/actions/workflows/build.yml/badge.svg?branch=master)](https://github.com/hajimes/mmh3/actions/workflows/build.yml?branch=master)\n[![PyPi Version](https://img.shields.io/pypi/v/mmh3.svg?style=flat-square&logo=pypi&logoColor=white)](https://pypi.org/project/mmh3/)\n[![Python Versions](https://img.shields.io/pypi/pyversions/mmh3.svg)](https://pypi.org/project/mmh3/)\n[![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/license/mit/)\n[![Total Downloads](https://static.pepy.tech/badge/mmh3)](https://pepy.tech/projects/mmh3?versions=*%2C5.*%2C4.*%2C3.*%2C2.*)\n[![Recent Downloads](https://static.pepy.tech/badge/mmh3/month)](https://pepy.tech/projects/mmh3?versions=*%2C5.*%2C4.*%2C3.*%2C2.*)\n[![DOI](https://joss.theoj.org/papers/10.21105/joss.06124/status.svg)](https://doi.org/10.21105/joss.06124)\n\n`mmh3` is a Python extension for\n[MurmurHash (MurmurHash3)](https://en.wikipedia.org/wiki/MurmurHash), a set of\nfast and robust non-cryptographic hash functions invented by Austin Appleby.\n\nBy combining `mmh3` with probabilistic techniques like\n[Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter),\n[MinHash](https://en.wikipedia.org/wiki/MinHash), and\n[feature hashing](https://en.wikipedia.org/wiki/Feature_hashing), you can\ndevelop high-performance systems in fields such as data mining, machine\nlearning, and natural language processing.\n\nAnother popular use of `mmh3` is to\n[calculate favicon hashes](https://gist.github.com/yehgdotnet/b9dfc618108d2f05845c4d8e28c5fc6a),\nwhich are utilized by [Shodan](https://www.shodan.io), the world's first IoT\nsearch engine.\n\nThis page provides a quick start guide. For more comprehensive information,\nplease refer to the [documentation](https://mmh3.readthedocs.io/en/stable/).\n\n## Installation\n\n```shell\npip install mmh3\n```\n\n## Usage\n\n### Basic usage\n\n```pycon\n>>> import mmh3\n>>> mmh3.hash(b\"foo\") # returns a 32-bit signed int\n-156908512\n>>> mmh3.hash(\"foo\") # accepts str (UTF-8 encoded)\n-156908512\n>>> mmh3.hash(b\"foo\", 42) # uses 42 as the seed\n-1322301282\n>>> mmh3.hash(b\"foo\", 0, False) # returns a 32-bit unsigned int\n4138058784\n```\n\n`mmh3.mmh3_x64_128_digest()`, introduced in version 5.0.0, efficienlty hashes\nbuffer objects that implement the buffer protocol\n([PEP 688](https://peps.python.org/pep-0688/)) without internal memory copying.\nThe function returns a `bytes` object of 16 bytes (128 bits). It is\nparticularly suited for hashing large memory views, such as\n`bytearray`, `memoryview`, and `numpy.ndarray`, and performs faster than\nthe 32-bit variants like `hash()` on 64-bit machines.\n\n```pycon\n>>> mmh3.mmh3_x64_128_digest(numpy.random.rand(100))\nb'\\x8c\\xee\\xc6z\\xa9\\xfeR\\xe8o\\x9a\\x9b\\x17u\\xbe\\xdc\\xee'\n```\n\nVarious alternatives are available, offering different return types (e.g.,\nsigned integers, tuples of unsigned integers) and optimized for different\narchitectures. For a comprehensive list of functions, refer to the\n[API Reference](https://mmh3.readthedocs.io/en/stable/api.html).\n\n### `hashlib`-style hashers\n\n`mmh3` implements hasher objects with interfaces similar to those in `hashlib`\nfrom the standard library, although they are still experimental. See\n[Hasher Classes](https://mmh3.readthedocs.io/en/stable/api.html#hasher-classes)\nin the API Reference for more information.\n\n## Changelog\n\nSee [Changelog (latest version)](https://mmh3.readthedocs.io/en/latest/changelog.html)\nfor the complete changelog.\n\n### [5.2.1] - 2026-03-06\n\n#### Added\n\n- Add support for the Android wheel for Python 3.14.\n\n#### Removed\n\n- Drop support for Python 3.9, as it has reached the end of life on 2025-10-31.\n\n### [5.2.0] - 2025-07-29\n\n#### Added\n\n- Add support for Python 3.14, including 3.14t (no-GIL) wheels. However, thread\n  safety for the no-GIL variant is not fully tested yet. Please report any\n  issues you encounter ([#134](https://github.com/hajimes/mmh3/pull/134),\n  [#136](https://github.com/hajimes/mmh3/pull/136)).\n- Add support for Android (Python 3.13 only) and iOS (Python 3.13 and 3.14) wheels,\n  enabled by the major version update of\n  [cibuildwheel](https://github.com/pypa/cibuildwheel)\n  ([#135](https://github.com/hajimes/mmh3/pull/135)).\n\n### [5.1.0] - 2025-01-25\n\n#### Added\n\n- Improve the performance of `hash128()`, `hash64()`, and `hash_bytes()` by\n  using\n  [METH_FASTCALL](https://docs.python.org/3/c-api/structures.html#c.METH_FASTCALL),\n  reducing the overhead of function calls\n  ([#116](https://github.com/hajimes/mmh3/pull/116)).\n- Add the software paper for this library\n  ([doi:10.21105/joss.06124](https://doi.org/10.21105/joss.06124)), following\n  its publication in the\n  [_Journal of Open Source Software_](https://joss.theoj.org)\n  ([#118](https://github.com/hajimes/mmh3/pull/118)).\n\n#### Removed\n\n- Drop support for Python 3.8, as it has reached the end of life on 2024-10-07\n  ([#117](https://github.com/hajimes/mmh3/pull/117)).\n\n## License\n\n[MIT](https://github.com/hajimes/mmh3/blob/master/LICENSE), unless otherwise\nnoted within a file.\n\n## Frequently Asked Questions\n\n### Different results from other MurmurHash3-based libraries\n\nBy default, `mmh3` returns **signed** values for the 32-bit and 64-bit versions\nand **unsigned** values for `hash128` due to historical reasons. To get the\ndesired result, use the `signed` keyword argument.\n\nStarting from version 4.0.0, **`mmh3` is endian-neutral**, meaning that its\nhash functions return the same values on big-endian platforms as they do on\nlittle-endian ones. In contrast, the original C++ library by Appleby is\nendian-sensitive. If you need results that comply with the original library on\nbig-endian systems, please use version 3.\\*.\n\nFor compatibility with [Google Guava (Java)](https://github.com/google/guava),\nsee\n<https://stackoverflow.com/questions/29932956/murmur3-hash-different-result-between-python-and-java-implementation>.\n\nFor compatibility with\n[murmur3 (Go)](https://pkg.go.dev/github.com/spaolacci/murmur3), see\n<https://github.com/hajimes/mmh3/issues/46>.\n\n### Handling errors with negative seeds\n\nFrom the version 5.0.0, `mmh3` functions accept only **unsigned** 32-bit integer\nseeds to enable faster type-checking and conversion. However, this change may\ncause issues if you need to calculate hash values using negative seeds within\nthe range of signed 32-bit integers. For instance,\n[Telegram-iOS](https://github.com/TelegramMessenger/Telegram-iOS) uses\n`-137723950` as a hard-coded seed (bitwise equivalent to `4157243346`). To\nhandle such cases, you can convert a signed 32-bit integer to its unsigned\nequivalent by applying a bitwise AND operation with `0xffffffff`. Here's an\nexample:\n\n```pycon\n>>> mmh3.hash(b\"quux\", 4294967295)\n258499980\n>>> d = -1\n>>> mmh3.hash(b\"quux\", d & 0xffffffff)\n258499980\n```\n\nAlternatively, if the seed is hard-coded (as in the Telegram-iOS case), you can\nprecompute the unsigned value for simplicity.\n\n## Contributing Guidelines\n\nSee [Contributing](https://mmh3.readthedocs.io/en/stable/CONTRIBUTING.html).\n\n## Authors\n\nMurmurHash3 was originally developed by Austin Appleby and distributed under\npublic domain\n[https://github.com/aappleby/smhasher](https://github.com/aappleby/smhasher).\n\nPorted and modified for Python by Hajime Senuma.\n\n## External Tutorials\n\n### High-performance computing\n\nThe following textbooks and tutorials are great resources for learning how to\nuse `mmh3` (and other hash algorithms in general) for high-performance computing.\n\n- Chapter 11: _Using Less Ram_ in Micha Gorelick and Ian Ozsvald. 2014. _High\n  Performance Python: Practical Performant Programming for Humans_. O'Reilly\n  Media. [ISBN: 978-1-4493-6159-4](https://www.amazon.com/dp/1449361595).\n  - 3rd edition of the above (2025).\n    [ISBN: 978-1098165963](https://www.amazon.com/dp/1098165969/).\n- Max Burstein. February 2, 2013.\n  _[Creating a Simple Bloom Filter](http://www.maxburstein.com/blog/creating-a-simple-bloom-filter/)_.\n- Duke University. April 14, 2016.\n  _[Efficient storage of data in memory](http://people.duke.edu/~ccc14/sta-663-2016/20B_Big_Data_Structures.html)_.\n- Bugra Akyildiz. August 24, 2016.\n  _[A Gentle Introduction to Bloom Filter](https://www.kdnuggets.com/2016/08/gentle-introduction-bloom-filter.html)_.\n  KDnuggets.\n\n### Internet of things\n\n[Shodan](https://www.shodan.io), the world's first\n[IoT](https://en.wikipedia.org/wiki/Internet_of_things) search engine, uses\nMurmurHash3 hash values for [favicons](https://en.wikipedia.org/wiki/Favicon)\n(icons associated with web pages). [ZoomEye](https://www.zoomeye.org) follows\nShodan's convention.\n[Calculating these values with mmh3](https://gist.github.com/yehgdotnet/b9dfc618108d2f05845c4d8e28c5fc6a)\nis useful for OSINT and cybersecurity activities.\n\n- Jan Kopriva. April 19, 2021.\n  _[Hunting phishing websites with favicon hashes](https://isc.sans.edu/diary/Hunting+phishing+websites+with+favicon+hashes/27326)_.\n  SANS Internet Storm Center.\n- Nikhil Panwar. May 2, 2022.\n  _[Using Favicons to Discover Phishing & Brand Impersonation Websites](https://bolster.ai/blog/how-to-use-favicons-to-find-phishing-websites)_.\n  Bolster.\n- Faradaysec. July 25, 2022.\n  _[Understanding Spring4Shell: How used is it?](https://faradaysec.com/understanding-spring4shell/)_.\n  Faraday Security.\n- Debjeet. August 2, 2022.\n  _[How To Find Assets Using Favicon Hashes](https://payatu.com/blog/favicon-hash/)_.\n  Payatu.\n\n## How to Cite This Library\n\nIf you use this library in your research, it would be appreciated if you could\ncite the following paper published in the\n[_Journal of Open Source Software_](https://joss.theoj.org):\n\nHajime Senuma. 2025.\n[mmh3: A Python extension for MurmurHash3](https://doi.org/10.21105/joss.06124).\n_Journal of Open Source Software_, 10(105):6124.\n\nIn BibTeX format:\n\n```tex\n@article{senumaMmh3PythonExtension2025,\n  title = {{mmh3}: A {Python} extension for {MurmurHash3}},\n  author = {Senuma, Hajime},\n  year = {2025},\n  month = jan,\n  journal = {Journal of Open Source Software},\n  volume = {10},\n  number = {105},\n  pages = {6124},\n  issn = {2475-9066},\n  doi = {10.21105/joss.06124},\n  copyright = {http://creativecommons.org/licenses/by/4.0/}\n}\n```\n\n## Related Libraries\n\n- <https://github.com/wc-duck/pymmh3>: mmh3 in pure python (Fredrik Kihlander\n  and Swapnil Gusani)\n- <https://github.com/escherba/python-cityhash>: Python bindings for CityHash\n  (Eugene Scherba)\n- <https://github.com/veelion/python-farmhash>: Python bindings for FarmHash\n  (Veelion Chong)\n- <https://github.com/escherba/python-metrohash>: Python bindings for MetroHash\n  (Eugene Scherba)\n- <https://github.com/ifduyue/python-xxhash>: Python bindings for xxHash (Yue\n  Du)\n\n[5.2.1]: https://github.com/hajimes/mmh3/compare/v5.2.0...v5.2.1\n[5.2.0]: https://github.com/hajimes/mmh3/compare/v5.1.0...v5.2.0\n[5.1.0]: https://github.com/hajimes/mmh3/compare/v5.0.1...v5.1.0\n"
  },
  {
    "path": "benchmark/benchmark.py",
    "content": "\"\"\"Benchmark module for various hash functions.\"\"\"\n\nimport hashlib\nimport itertools\nimport math\nimport random\nimport time\nfrom collections.abc import Callable\nfrom typing import Final\n\nimport pymmh3\nimport pyperf\nimport xxhash\n\nimport mmh3\n\nK1: Final[int] = 0b1001111000110111011110011011000110000101111010111100101010000111\nK2: Final[int] = 0b1100001010110010101011100011110100100111110101001110101101001111\nMASK: Final[int] = 0xFFFFFFFFFFFFFFFF\n\n\ndef init_buffer(ba: bytearray) -> bytearray:\n    \"\"\"Initializes a byte array with a pattern.\n\n    Initializes a byte array with a pattern based on xxHash's benchmarking.\n    https://github.com/Cyan4973/xxHash/blob/dev/tests/bench/benchHash.c\n\n    Args:\n        ba: The byte array to initialize.\n\n    Returns:\n        The initialized byte array.\n    \"\"\"\n    acc = K2\n\n    for i, _ in enumerate(ba):\n        acc = (acc * K1) & MASK\n        ba[i] = acc >> 56\n\n    return ba\n\n\ndef generate_size(size: int, p: float) -> int:\n    \"\"\"Generate a random size for a buffer.\n\n    Args:\n        size: The size of the buffer to hash.\n        p: The percentage of the buffer size to vary.\n\n    Returns:\n        The random size of the buffer.\n    \"\"\"\n    lower = math.ceil(size * (1 - p))\n    upper = math.floor(size * (1 + p))\n\n    return random.randint(lower, upper)\n\n\ndef perf_hash(loops: int, f: Callable, size: int) -> float:\n    \"\"\"Benchmark a hash function.\n\n    Args:\n        loops: The number of outer loops to run.\n        f: The hash function to benchmark\n        size: The size of the buffer to hash.\n\n    Returns:\n        The time taken to hash the buffer in fractional seconds.\n    \"\"\"\n    # pylint: disable=too-many-locals\n\n    if size <= 0:\n        raise ValueError(\"size must be greater than 0\")\n\n    range_it = itertools.repeat(None, loops)\n\n    data = bytearray(size + 9)\n    data = init_buffer(data)\n\n    data0 = bytes(data[0:size])\n    data1 = bytes(data[1 : size + 1])\n    data2 = bytes(data[2 : size + 2])\n    data3 = bytes(data[3 : size + 3])\n    data4 = bytes(data[4 : size + 4])\n    data5 = bytes(data[5 : size + 5])\n    data6 = bytes(data[6 : size + 6])\n    data7 = bytes(data[7 : size + 7])\n    data8 = bytes(data[8 : size + 8])\n    data9 = bytes(data[9 : size + 9])\n\n    t0 = time.perf_counter()\n    for _ in range_it:\n        f(data0)\n        f(data1)\n        f(data2)\n        f(data3)\n        f(data4)\n        f(data5)\n        f(data6)\n        f(data7)\n        f(data8)\n        f(data9)\n\n    return time.perf_counter() - t0\n\n\ndef perf_hash_random(loops: int, f: Callable, size: int) -> float:\n    \"\"\"Benchmark a hash function with varying data sizes.\n\n    Args:\n        loops: The number of outer loops to run.\n        f: The hash function to benchmark\n        size: The size of the buffer to hash.\n\n    Returns:\n        The time taken to hash the buffer in fractional seconds.\n    \"\"\"\n    # pylint: disable=too-many-locals\n\n    if size <= 0:\n        raise ValueError(\"size must be greater than 0\")\n\n    range_it = itertools.repeat(None, loops)\n    random.seed(42)\n    inner_loops = 10\n    extra_size = 255\n\n    data = bytearray(size + extra_size)\n    data = init_buffer(data)\n\n    pos_list = [random.randint(0, extra_size) for _ in range(inner_loops)]\n    size_list = [generate_size(size, 0.1) for _ in range(inner_loops)]\n\n    data0 = bytes(data[pos_list[0] : pos_list[0] + size_list[0]])\n    data1 = bytes(data[pos_list[1] : pos_list[1] + size_list[1]])\n    data2 = bytes(data[pos_list[2] : pos_list[2] + size_list[2]])\n    data3 = bytes(data[pos_list[3] : pos_list[3] + size_list[3]])\n    data4 = bytes(data[pos_list[4] : pos_list[4] + size_list[4]])\n    data5 = bytes(data[pos_list[5] : pos_list[5] + size_list[5]])\n    data6 = bytes(data[pos_list[6] : pos_list[6] + size_list[6]])\n    data7 = bytes(data[pos_list[7] : pos_list[7] + size_list[7]])\n    data8 = bytes(data[pos_list[8] : pos_list[8] + size_list[8]])\n    data9 = bytes(data[pos_list[9] : pos_list[9] + size_list[9]])\n\n    t0 = time.perf_counter()\n    for _ in range_it:\n        f(data0)\n        f(data1)\n        f(data2)\n        f(data3)\n        f(data4)\n        f(data5)\n        f(data6)\n        f(data7)\n        f(data8)\n        f(data9)\n\n    return time.perf_counter() - t0\n\n\ndef perf_hash_latency(loops: int, f: Callable, size: int) -> float:\n    \"\"\"Benchmark a hash function with overhead costs with varying data sizes.\n\n    Based on xxHash's ``benchLatency`` function.\n    https://github.com/Cyan4973/xxHash/blob/dev/tests/bench/benchHash.c\n\n    Args:\n        loops: The number of outer loops to run.\n        f: The hash function to benchmark\n        size: The size of the buffer to hash.\n\n    Returns:\n        The time taken to hash the buffer in fractional seconds.\n    \"\"\"\n    # pylint: disable=too-many-locals\n\n    if size <= 0:\n        raise ValueError(\"size must be greater than 0\")\n\n    range_it = itertools.repeat(None, loops)\n    random.seed(42)\n\n    n = 0\n\n    size0 = generate_size(size, 0.1)\n    size1 = generate_size(size, 0.1)\n    size2 = generate_size(size, 0.1)\n    size3 = generate_size(size, 0.1)\n    size4 = generate_size(size, 0.1)\n    size5 = generate_size(size, 0.1)\n    size6 = generate_size(size, 0.1)\n    size7 = generate_size(size, 0.1)\n    size8 = generate_size(size, 0.1)\n    size9 = generate_size(size, 0.1)\n\n    data = bytearray(math.floor(size * 1.1) + 255)\n    view_to_hash = memoryview(bytes(init_buffer(data)))\n\n    t0 = time.perf_counter()\n    for _ in range_it:\n        n = f(view_to_hash[n : n + size0])[0]\n        n = f(view_to_hash[n : n + size1])[0]\n        n = f(view_to_hash[n : n + size2])[0]\n        n = f(view_to_hash[n : n + size3])[0]\n        n = f(view_to_hash[n : n + size4])[0]\n        n = f(view_to_hash[n : n + size5])[0]\n        n = f(view_to_hash[n : n + size6])[0]\n        n = f(view_to_hash[n : n + size7])[0]\n        n = f(view_to_hash[n : n + size8])[0]\n        n = f(view_to_hash[n : n + size9])[0]\n\n    return time.perf_counter() - t0\n\n\ndef add_cmdline_args(cmd: list, args) -> None:\n    \"\"\"Add command line arguments to the runner.\n\n    Args:\n        cmd: The command line arguments to extend.\n        args: The parsed command line arguments.\n    \"\"\"\n    cmd.extend((\"--test-hash\", args.test_hash))\n    cmd.extend((\"--test-type\", args.test_type))\n    cmd.extend((\"--test-buffer-size-max\", str(args.test_buffer_size_max)))\n\n\n# \"if hasattr\" is used to check for the existence of the function in the\n# module, to compare the performance of the current implementation with the\n# old one (version 4.1.0), which does not implement the new functions.\n# These conditions should be removed in the future.\nHASHES = {\n    \"mmh3_base_hash\": mmh3.hash,\n    \"mmh3_32\": (\n        mmh3.mmh3_32_digest if hasattr(mmh3, \"mmh3_32_digest\") else mmh3.hash_bytes\n    ),\n    \"mmh3_128\": (\n        mmh3.mmh3_x64_128_digest\n        if hasattr(mmh3, \"mmh3_x64_128_digest\")\n        else mmh3.hash128\n    ),\n    \"xxh_32\": xxhash.xxh32_digest,\n    \"xxh_64\": xxhash.xxh64_digest,\n    \"xxh3_64\": xxhash.xxh3_64_digest,\n    \"xxh3_128\": xxhash.xxh3_128_digest,\n    \"md5\": lambda ba: hashlib.md5(ba).digest(),\n    \"sha1\": lambda ba: hashlib.sha1(ba).digest(),\n    \"pymmh3_32\": pymmh3.hash,\n    \"pymmh3_128\": pymmh3.hash128,\n}\n\nBENCHMARKING_TYPES = {\n    \"naive\": perf_hash,\n    \"random\": perf_hash_random,\n    \"latency\": perf_hash_latency,\n}\n\n\nif __name__ == \"__main__\":\n    runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)\n\n    runner.argparser.add_argument(\n        \"--test-hash\",\n        type=str,\n        help=\"Type of hash function to benchmark\",\n        required=True,\n        choices=HASHES.keys(),\n    )\n\n    runner.argparser.add_argument(\n        \"--test-type\",\n        type=str,\n        help=\"Type of benchmarking to perform (experimental)\",\n        choices=BENCHMARKING_TYPES.keys(),\n        default=\"random\",\n    )\n\n    runner.argparser.add_argument(\n        \"--test-buffer-size-max\",\n        type=int,\n        help=\"The maximum size of the buffer to hash (default: 1024)\",\n        default=1024,\n    )\n\n    process_args = runner.parse_args()\n    fib1, fib2 = 1, 2\n\n    while fib1 <= process_args.test_buffer_size_max:\n        runner.bench_time_func(\n            f\"{fib1} bytes\",\n            BENCHMARKING_TYPES[process_args.test_type],\n            HASHES[process_args.test_hash],\n            fib1,\n            inner_loops=10,\n        )\n        fib1, fib2 = fib2, fib1 + fib2\n"
  },
  {
    "path": "benchmark/generate_table.py",
    "content": "# pylint: disable=R0801\n\"\"\"An ad-hoc script to generate a markdown table of benchmarking results.\n\nThis file should be incorporated into the main plot module in the future.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nfrom typing import TypeVar\n\nimport pandas as pd\nimport pyperf\nimport xxhash\n\nimport mmh3\n\nT = TypeVar(\"T\")\n\n\ndef pad_with_nan(data: dict[T, list[float]]) -> dict[T, list[float]]:\n    \"\"\"Pad the data with NaN values to make the length of all lists equal.\n\n    Args:\n        data: The data to pad.\n\n    Returns:\n        The padded data.\n    \"\"\"\n\n    max_len = max(len(v) for v in data.values())\n    for k, v in data.items():\n        data[k] = v + [float(\"nan\")] * (max_len - len(v))\n\n    return data\n\n\ndef ordered_intersection(list1: list[T], list2: list[T]) -> list[T]:\n    \"\"\"Return the intersection of two lists in the order of the first list.\n\n    Args:\n        list1: The first list.\n        list2: The second list.\n\n    Returns:\n        The intersection of the two lists in the order of the first list.\n    \"\"\"\n\n    return [item for item in list1 if item in list2]\n\n\nDIGEST_SIZES = {\n    \"mmh3_base_hash\": mmh3.mmh3_32().digest_size,\n    \"mmh3_32\": mmh3.mmh3_32().digest_size,\n    \"mmh3_128\": mmh3.mmh3_x64_128().digest_size,\n    \"xxh_32\": xxhash.xxh32().digest_size,\n    \"xxh_64\": xxhash.xxh64().digest_size,\n    \"xxh3_64\": xxhash.xxh3_64().digest_size,\n    \"xxh3_128\": xxhash.xxh3_128().digest_size,\n    \"md5\": hashlib.md5().digest_size,\n    \"sha1\": hashlib.sha1().digest_size,\n    \"pymmh3_32\": mmh3.mmh3_32().digest_size,\n    \"pymmh3_128\": mmh3.mmh3_x64_128().digest_size,\n}\n\nXXHASH_REFERENCE = {\n    \"mmh3_32\": 3.9,\n    \"mmh3_128\": None,\n    \"xxh_32\": 9.7,\n    \"xxh_64\": 9.1,\n    \"xxh3_64\": 31.5,\n    \"xxh3_128\": 29.6,\n    \"md5\": 0.6,\n    \"sha1\": 0.8,\n}\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"filenames\", nargs=\"+\")\n    args = parser.parse_args()\n\n    result_latency: dict[str, list[float]] = {}\n    index: list[int] = []\n\n    for file_name in args.filenames:\n        suite = pyperf.BenchmarkSuite.load(file_name)\n        base_name = os.path.basename(file_name)\n        hash_name = os.path.splitext(base_name)[0]\n\n        result_latency[hash_name] = []\n        index = []\n\n        for bench_name in suite.get_benchmark_names():\n            bench = suite.get_benchmark(bench_name)\n            data_size = int(bench_name.split(\" \")[0])\n            index.append(data_size)\n            latency_seconds = bench.median()\n            result_latency[hash_name].append(latency_seconds)\n\n    result_latency = pad_with_nan(result_latency)\n\n    ordered_hash_names = ordered_intersection(\n        list(DIGEST_SIZES.keys()), list(result_latency.keys())\n    )\n    df_latency = pd.DataFrame(result_latency, index=index)\n    df_latency = df_latency[ordered_hash_names]\n\n    df_t = df_latency.copy()\n    df_t = df_t[df_t.index <= 256]\n\n    small_data_velocity = 0.000001 / df_t.mean()\n\n    max_row = df_latency.iloc[-1]\n\n    max_row = float(index[-1]) / max_row\n    max_row = max_row / (2**30)\n\n    input_bandwidth_df = pd.DataFrame(max_row)\n    input_bandwidth_df.index.name = \"Hash\"\n    input_bandwidth_df.columns = [\"Bandwidth\"]\n\n    digest_size_series = pd.Series(DIGEST_SIZES)[ordered_hash_names]\n    input_bandwidth_df[\"Width\"] = digest_size_series * 8\n    input_bandwidth_df.sort_values(\"Bandwidth\", ascending=False, inplace=True)\n    input_bandwidth_df = input_bandwidth_df[[\"Width\", \"Bandwidth\"]]\n\n    input_bandwidth_df[\"Small Data Velocity\"] = small_data_velocity\n\n    input_bandwidth_df[\"✕ Width\"] = (\n        input_bandwidth_df[\"Width\"] * input_bandwidth_df[\"Small Data Velocity\"]\n    ).round(0)\n\n    input_bandwidth_df[\"cf. Collet (2020)\"] = pd.Series(XXHASH_REFERENCE)\n\n    # Prettify the table\n    input_bandwidth_df[\"Bandwidth\"] = input_bandwidth_df[\"Bandwidth\"].map(\n        lambda x: f\"{x:.2f} GiB/s\"\n    )\n    input_bandwidth_df[\"Small Data Velocity\"] = input_bandwidth_df[\n        \"Small Data Velocity\"\n    ].map(lambda x: f\"{x:.2f}\")\n    input_bandwidth_df[\"cf. Collet (2020)\"] = input_bandwidth_df[\n        \"cf. Collet (2020)\"\n    ].map(lambda x: f\"{x:.1f} GiB/s\" if pd.notna(x) else \"N/A\")\n\n    print(input_bandwidth_df.to_markdown())\n"
  },
  {
    "path": "benchmark/plot_graph.py",
    "content": "\"\"\"Plot the graph of the benchmark results.\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nfrom typing import TypeVar\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pyperf\nimport xxhash\n\nimport mmh3\n\nT = TypeVar(\"T\")\n\n\ndef pad_with_nan(data: dict[T, list[float]]) -> dict[T, list[float]]:\n    \"\"\"Pad the data with NaN values to make the length of all lists equal.\n\n    Args:\n        data: The data to pad.\n\n    Returns:\n        The padded data.\n    \"\"\"\n\n    max_len = max(len(v) for v in data.values())\n    for k, v in data.items():\n        data[k] = v + [float(\"nan\")] * (max_len - len(v))\n\n    return data\n\n\ndef ordered_intersection(list1: list[T], list2: list[T]) -> list[T]:\n    \"\"\"Return the intersection of two lists in the order of the first list.\n\n    Args:\n        list1: The first list.\n        list2: The second list.\n\n    Returns:\n        The intersection of the two lists in the order of the first list.\n    \"\"\"\n\n    return [item for item in list1 if item in list2]\n\n\nDIGEST_SIZES = {\n    \"mmh3_base_hash\": mmh3.mmh3_32().digest_size,\n    \"mmh3_32\": mmh3.mmh3_32().digest_size,\n    \"mmh3_128\": mmh3.mmh3_x64_128().digest_size,\n    \"xxh_32\": xxhash.xxh32().digest_size,\n    \"xxh_64\": xxhash.xxh64().digest_size,\n    \"xxh3_64\": xxhash.xxh3_64().digest_size,\n    \"xxh3_128\": xxhash.xxh3_128().digest_size,\n    \"md5\": hashlib.md5().digest_size,\n    \"sha1\": hashlib.sha1().digest_size,\n    \"pymmh3_32\": mmh3.mmh3_32().digest_size,\n    \"pymmh3_128\": mmh3.mmh3_x64_128().digest_size,\n}\n\nTHROUGHPUT_FILE_NAME = \"throughput.png\"\nTHROUGHPUT_SMALL_FILE_NAME = \"throughput_small.png\"\nLATENCY_FILE_NAME = \"latency.png\"\nLATENCY_SMALL_FILE_NAME = \"latency_small.png\"\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--output-dir\", required=True)\n    parser.add_argument(\"filenames\", nargs=\"+\")\n    args = parser.parse_args()\n\n    result_latency: dict[str, list[float]] = {}\n    result_throughput: dict[str, list[float]] = {}\n    index: list[int] = []\n\n    for file_name in args.filenames:\n        suite = pyperf.BenchmarkSuite.load(file_name)\n        base_name = os.path.basename(file_name)\n        hash_name = os.path.splitext(base_name)[0]\n\n        result_throughput[hash_name] = []\n        result_latency[hash_name] = []\n        index = []\n\n        for bench_name in suite.get_benchmark_names():\n            bench = suite.get_benchmark(bench_name)\n            data_size = int(bench_name.split(\" \")[0])\n            index.append(data_size)\n            latency_seconds = bench.median()\n\n            result_throughput[hash_name].append(\n                DIGEST_SIZES[hash_name] / latency_seconds\n            )\n            result_latency[hash_name].append(latency_seconds)\n\n    result_throughput = pad_with_nan(result_throughput)\n    result_latency = pad_with_nan(result_latency)\n\n    ordered_hash_names = ordered_intersection(\n        list(DIGEST_SIZES.keys()), list(result_throughput.keys())\n    )\n    df_throughput = pd.DataFrame(result_throughput, index=index)\n    df_throughput = df_throughput[ordered_hash_names]\n    df_latency = pd.DataFrame(result_latency, index=index)\n    df_latency = df_latency[ordered_hash_names]\n\n    plt.rcParams[\"figure.dpi\"] = 72 * 3\n\n    plt.figure()\n\n    df_throughput_all = df_throughput / 1024\n    df_throughput_all.index = df_throughput_all.index / 1024\n    df_throughput_all.plot(\n        xlabel=\"Input size (KiB)\", ylabel=\"Throughput (KiB/s)\", logy=True\n    )\n    plt.savefig(os.path.join(args.output_dir, THROUGHPUT_FILE_NAME))\n\n    df_throughput_small = df_throughput / 1024 / 1024\n    df_throughput_small = df_throughput_small.drop(columns=[\"md5\", \"sha1\"])\n    df_throughput_small = df_throughput_small[df_throughput_small.index <= 2048]\n    df_throughput_small.plot(xlabel=\"Input size (bytes)\", ylabel=\"Throughput (MiB/s)\")\n    plt.savefig(os.path.join(args.output_dir, THROUGHPUT_SMALL_FILE_NAME))\n\n    df_latency_all = df_latency * 1000\n    df_latency_all.index = df_latency_all.index / 1024\n    df_latency_all.plot(xlabel=\"Input size (KiB)\", ylabel=\"Latency (ms)\")\n    plt.savefig(os.path.join(args.output_dir, LATENCY_FILE_NAME))\n\n    df_latency_small = df_latency * 1000 * 1000 * 1000\n    df_latency_small = df_latency_small.drop(columns=[\"md5\", \"sha1\"])\n    df_latency_small = df_latency_small[df_latency_small.index <= 2048]\n    df_latency_small.plot(xlabel=\"Input size (bytes)\", ylabel=\"Latency (ns)\")\n    plt.savefig(os.path.join(args.output_dir, LATENCY_SMALL_FILE_NAME))\n\n    df_throughput = pd.DataFrame(\n        result_throughput, index=df_latency.index / (1024 * 1024)\n    )\n\n    plt.close(\"all\")\n"
  },
  {
    "path": "benchmark/plot_graph_base_hash.py",
    "content": "# pylint: disable=R0801\n\"\"\"An ad-hoc script to plot the graph of the benchmark results for mmh3.hash.\n\nThis file should be incorporated into the main plot module in the future.\n\"\"\"\n\nimport argparse\nimport os\nfrom typing import TypeVar\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pyperf\n\nimport mmh3\n\nT = TypeVar(\"T\")\n\n\ndef pad_with_nan(data: dict[T, list[float]]) -> dict[T, list[float]]:\n    \"\"\"Pad the data with NaN values to make the length of all lists equal.\n\n    Args:\n        data: The data to pad.\n\n    Returns:\n        The padded data.\n    \"\"\"\n\n    max_len = max(len(v) for v in data.values())\n    for k, v in data.items():\n        data[k] = v + [float(\"nan\")] * (max_len - len(v))\n\n    return data\n\n\ndef ordered_intersection(list1: list[T], list2: list[T]) -> list[T]:\n    \"\"\"Return the intersection of two lists in the order of the first list.\n\n    Args:\n        list1: The first list.\n        list2: The second list.\n\n    Returns:\n        The intersection of the two lists in the order of the first list.\n    \"\"\"\n\n    return [item for item in list1 if item in list2]\n\n\nDIGEST_SIZES = {\n    \"mmh3_base_hash_500\": mmh3.mmh3_32().digest_size,\n    \"mmh3_base_hash_410\": mmh3.mmh3_32().digest_size,\n    \"mmh3_32_500\": mmh3.mmh3_32().digest_size,\n}\n\nLATENCY_FILE_NAME = \"latency_hash.png\"\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--output-dir\", required=True)\n    parser.add_argument(\"filenames\", nargs=\"+\")\n    args = parser.parse_args()\n\n    result_latency: dict[str, list[float]] = {}\n    index: list[int] = []\n\n    for file_name in args.filenames:\n        suite = pyperf.BenchmarkSuite.load(file_name)\n        base_name = os.path.basename(file_name)\n        hash_name = os.path.splitext(base_name)[0]\n\n        result_latency[hash_name] = []\n        index = []\n\n        for bench_name in suite.get_benchmark_names():\n            bench = suite.get_benchmark(bench_name)\n            data_size = int(bench_name.split(\" \")[0])\n            index.append(data_size)\n            latency_seconds = bench.median()\n\n            result_latency[hash_name].append(latency_seconds)\n\n    result_latency = pad_with_nan(result_latency)\n\n    ordered_hash_names = ordered_intersection(\n        list(DIGEST_SIZES.keys()), list(result_latency.keys())\n    )\n\n    df_latency = pd.DataFrame(result_latency, index=index)\n    df_latency = df_latency[ordered_hash_names]\n\n    plt.rcParams[\"figure.dpi\"] = 72 * 3\n\n    plt.figure()\n\n    df_latency_small = df_latency * 1000 * 1000 * 1000\n    df_latency_small = df_latency_small.drop(columns=[\"mmh3_32_500\"])\n    df_latency_small = df_latency_small.rename(\n        columns={\n            \"mmh3_base_hash_410\": \"hash() in mmh3 4.1.0\",\n            \"mmh3_base_hash_500\": \"hash() in mmh3 5.0.0\",\n        }\n    )\n    df_latency_small = df_latency_small[df_latency_small.index <= 2**12]\n    df_latency_small.plot(xlabel=\"Input size (bytes)\", ylabel=\"Latency (ns)\")\n    plt.savefig(os.path.join(args.output_dir, LATENCY_FILE_NAME))\n\n    plt.close(\"all\")\n"
  },
  {
    "path": "docs/CODE_OF_CONDUCT.md",
    "content": "# Code of Conduct\n\nContributors to this project are expected to follow the ACM Code of Ethics and\nProfessional Conduct, available at\n[https://www.acm.org/code-of-ethics](https://www.acm.org/code-of-ethics). The\ncurrent version of the Code and its guidelines was adopted by the ACM Council\non June 22, 2018.\n"
  },
  {
    "path": "docs/CONTRIBUTING.md",
    "content": "# Contributing\n\nThank you for your interest in contributing to the `mmh3` project. We\nappreciate your support and look forward to your contributions.\n\nPlease read [README](https://github.com/hajimes/mmh3/blob/master/README.md) to\nget an overview of the `mmh3` project, and follow our\n[Code of Conduct](./CODE_OF_CONDUCT) (ACM Code of Ethics and Professional\nConduct).\n\n## Submitting issues\n\nWe welcome your contributions, whether it's submitting a bug report or\nsuggesting a new feature through the\n[issue tracker](https://github.com/hajimes/mmh3/issues).\n\nBefore creating a new issue, please check the\n[Frequently Asked Questions section in README](https://github.com/hajimes/mmh3#frequently-asked-questions)\nto see if the problem has already been noted.\n\n## Project structure\n\nAs of version 5.1.0, the project layout is structured as follows:\n\n- `src/mmh3`\n  - `mmh3module.c`: the main file that serves as the interface between Python\n    and the MurmurHash3 c implementations.\n  - `murmurhash.c`: implementations of the MurmurHash3 family. Auto-generated\n    from Austin Appleby's original code. DO NOT edit this file manually. See\n    [README in the util directory](https://github.com/hajimes/mmh3/blob/master/util/README.md)\n    for details.\n  - `murmurhash.h`: headers and macros for MurmurHash3. Auto-generated from\n    `util/refresh.py`. DO NOT edit this file manually.\n  - `hashlib.h`: taken from\n    [CPython's code base](https://github.com/python/cpython/blob/9ce0f48e918860ffa32751a85b0fe7967723e2e3/Modules/hashlib.h).\n- `util`\n  - `refresh.py`: file that generates `src/mmh3/murmurhash.c` and\n    `src/mmh3/murmurhash.h` from the original MurmurHash3 C++ code. Edit this\n    file to modify the contents of these files.\n- `benchmark`\n  - `benchmark.py`: script to run benchmarks.\n  - `plot_graph.py`: script to plot benchmark results.\n- `docs`: project documentation directory\n- `paper`: directory containing the academic paper for this project\n- `.github/workflows`: GitHub Actions workflows\n\n## Project setup\n\nRun:\n\n```shell\ngit clone https://github.com/hajimes/mmh3.git\n```\n\nThis project uses `tox-uv` to automate testing and other tasks. You can install\n`tox-uv` by running:\n\n```shell\npipx install uv\nuv tool install tox --with tox-uv\n```\n\nIn addition, `npx` (included with `npm` >= 5.2.0) is required within the `tox`\nenvironments to run linters.\n\n## Testing and linting\n\nBefore submitting your changes, make sure to run the project's tests to ensure\neverything is working as expected.\n\nTo run all tests, use the following command:\n\n```shell\ntox\n```\n\nDuring development, you can run the tests for a specific environment by\nspecifying the environment name. For example, to run tests for a specific\nversion of Python (e.g., Python 3.13), use:\n\n```shell\ntox -e py313\n```\n\nFor type checking, run:\n\n```shell\ntox -e type\n```\n\nTo run linters with automated formatting, use:\n\n```shell\ntox -e lint\n```\n\n### (Optional) Testing on s390x\n\nWhen you have modified the code in a way which may cause endian issues, you may\nwant to locally test on s390x, the only big-endian platform officially supported\nby Python.\n\n[_Emulating a big-endian s390x with QEMU_](https://til.simonwillison.net/docker/emulate-s390x-with-qemu)\nby Simon Willison is a good introduction to Docker/QEMU settings for emulating\ns390x.\n\nIf the above does not work, you may also want to try the following:\n\n```shell\ndocker run --rm --privileged tonistiigi/binfmt --install all\ndocker buildx create --name mybuilder --use\ndocker run -it multiarch/ubuntu-core:s390x-focal /bin/bash\n```\n\n## Pull request\n\nOnce you've pushed your changes to your fork, you can\n[create a pull request (PR)](https://github.com/hajimes/mmh3/pulls) on the main\nproject repository. Please provide a clear and detailed description of your\nchanges in the PR, and reference any related issues.\n\n## util directory\n\n### Algorithm implementations used by the `mmh3` module\n\nThe `util` directory contains C files that were generated from the\n[SMHasher](https://github.com/aappleby/smhasher) C++ project by Austin Appleby.\n\nThe idea of the subproject directory loosely follows the\n[`hashlib` implementation of CPython](https://github.com/python/cpython/tree/main/Modules/_hacl).\n\n### Updating mmh3 core C code\n\nRun `tox -e build_cfiles`. This will fetch Appleby's original SMHasher project\nas a git submodule and then generate PEP 7-compliant C code from the original\nproject.\n\nTo perform further edits, add transformation code to the `refresh.py` script,\ninstead of editing `murmurhash3.*` files manually.\nThen, run `tox -e build_cfiles` again to update the `murmurhash3.*` files.\n\n### Local files\n\n1. `./util/README.md`\n1. `./util/refresh.py`\n1. `./util/FILE_HEADER`\n\n### Generated files\n\n1. `./src/mmh3/murmurhash3.c`\n1. `./src/mmh3/murmurhash3.h`\n\n## Benchmarking\n\nTo run benchmarks locally, try the following command:\n\n```shell\ntox -e benchmark -- -o OUTPUT_FILE \\\n            --test-hash HASH_NAME --test-buffer-size-max HASH_SIZE\n```\n\nwhere `OUTPUT_FILE` is the output file name (json formatted), `HASH_NAME` is\nthe name of the hash, and `HASH_SIZE` is the maximum buffer size to be tested\nin bytes.\n\nFor example,\n\n```shell\nmkdir -p _results\ntox -e benchmark -- -o _results/mmh3_128.json \\\n            --test-hash mmh3_128 --test-buffer-size-max 262144\n```\n\nAs of version 5.1.0, the following hash function identifiers are available for\nbenchmarking: `mmh3_32`, `mmh3_128`, `xxh_32`, `xxh_64`, `xxh3_64`, `xxh3_128`,\n`pymmh3_32`, `pymmh3_128`, `md5`, and `sha1`.\n\nThe owner of the repository can run the benchmark on GitHub Actions by using\nthe workflow defined in `.github/workflows/benchmark.yml`.\n\nAfter obtaining the benchmark results, you can plot graphs by `plot_graph.py`.\nThe following is an example of how to run the script:\n\n```shell\ntox -e plot -- --output-dir docs/_static RESULT_DIR/*.json\n```\n\nwhere `RESULT_DIR` is the directory containing the benchmark results.\nThe names of json files should be in the format of `HASH_IDENTIFER.json`, e.g.,\n`mmh3_128.json`.\n\n## Documentation\n\nProject documentation files are mainly written in the Markdown format and are\nlocated in the `docs`. The documentation is automatically built and\n[hosted on the Read the Docs](https://mmh3.readthedocs.io/en/latest/).\n\nTo build the documentation locally, use the following command:\n\n```shell\ntox -e docs\n```\n\nTo check the result of the built documentation, open\n`docs/_build/html/index.html` in your browser.\n"
  },
  {
    "path": "docs/CONTRIBUTORS.md",
    "content": "# Contributors\n\nThis page acknowledges contributors to the project. For details on the\nproject's history and changes, please refer to the [Changelog](./changelog.md)\npage. If you're interested in contributing, be sure to review the\n[Contributing](./CONTRIBUTING.md) guide.\n\n## Code Contributors\n\nWe gratefully acknowledge the contributions of the following individuals:\n\n- [Alexander Maznev](https://github.com/pik),\n  [#6](https://github.com/hajimes/mmh3/pull/6).\n- [@arieleizenberg](https://github.com/arieleizenberg),\n  [#34](https://github.com/hajimes/mmh3/pull/34).\n- [Micha Gorelick](https://github.com/mynameisfiber),\n  [#1](https://github.com/hajimes/mmh3/pull/1).\n- [Danil Shein](https://github.com/dshein-alt),\n  [#40](https://github.com/hajimes/mmh3/pull/40).\n- [Derek Wilson](https://github.com/underrun),\n  [#2](https://github.com/hajimes/mmh3/pull/2),\n  [#3](https://github.com/hajimes/mmh3/pull/3).\n- [Dimitri Vorona](https://github.com/alendit),\n  [#13](https://github.com/hajimes/mmh3/pull/13).\n- [@doozr](https://github.com/doozr),\n  [#15](https://github.com/hajimes/mmh3/pull/15).\n- [Dušan Nikolić](https://github.com/n-dusan),\n  [#37](https://github.com/hajimes/mmh3/pull/37).\n- [Matthew Honnibal](https://github.com/honnibal),\n  [#22](https://github.com/hajimes/mmh3/pull/22).\n- [wouter bolsterlee](https://github.com/wbolster),\n  [#35](https://github.com/hajimes/mmh3/pull/35).\n\n## Community Contributors\n\nWe would also like to thank the following contributors for their valuable\nbug reports, feature suggestions, and other contributions:\n\n- [Antoine Pitrou](https://github.com/pitrou),\n  [#10](https://github.com/hajimes/mmh3/issues/10).\n- [Benjamin Bengfort](https://github.com/bbengfort),\n  [#46](https://github.com/hajimes/mmh3/issues/46).\n- [Christian von Schultz](https://github.com/vonschultz),\n  [#50](https://github.com/hajimes/mmh3/issues/50).\n- [Dan Blanchard](https://github.com/dan-blanchard),\n  [#8](https://github.com/hajimes/mmh3/issues/8).\n- [Heather Lapointe](https://github.com/Alphadelta14),\n  [#25](https://github.com/hajimes/mmh3/issues/25).\n- [Jacques Dark](https://github.com/jqdark),\n  [#12](https://github.com/hajimes/mmh3/issues/12).\n- [Matej Spiller Muys](https://github.com/matejsp),\n  [#90](https://github.com/hajimes/mmh3/issues/90).\n- [Niklas Semmler](https://github.com/niklassemmler),\n  [#7](https://github.com/hajimes/mmh3/issues/7).\n- [Ryan](https://github.com/ryanfwy),\n  [#25](https://github.com/hajimes/mmh3/issues/25).\n- [Sebastian Kreft](https://github.com/sk-),\n  [#17](https://github.com/hajimes/mmh3/issues/17).\n- [Tom Mitchell](https://github.com/tcmitchell),\n  [#51](https://github.com/hajimes/mmh3/issues/51).\n- [Varunkumar Nagarajan](https://github.com/varunkumar),\n  [#39](https://github.com/hajimes/mmh3/issues/39).\n- [@xqdd](https://github.com/xqdd),\n  [#9](https://github.com/hajimes/mmh3/issues/9).\n- [@yzssbo](https://github.com/yzssbo),\n  [#25](https://github.com/hajimes/mmh3/issues/25).\n\n## Paper Editors and Reviewers\n\nWe extend our heartfelt thanks to the following editors and reviewers of the\n[_Journal of Open Source Software_](https://joss.theoj.org) (JOSS), whose\nfeedback greatly enhanced this project:\n\n- [Daniel S. Katz](https://github.com/danielskatz) (Managing Editor-in-Chief)\n- [Vince Knight](https://github.com/drvinceknight) (Editor)\n- [Marek Šuppa](https://github.com/mrshu) (Reviewer)\n- [Jules Pénuchot](https://github.com/JPenuchot) (Reviewer)\n- [Gaëtan Cassiers](https://github.com/cassiersg) (Reviewer)\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line, and also\n# from the environment for the first two.\nSPHINXOPTS    ?=\nSPHINXBUILD   ?= sphinx-build\nSOURCEDIR     = .\nBUILDDIR      = _build\n\n# Put it first so that \"make\" without argument is like \"make help\".\nhelp:\n\t@$(SPHINXBUILD) -M help \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n\n.PHONY: help Makefile\n\n# Catch-all target: route all unknown targets to Sphinx using the new\n# \"make mode\" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).\n%: Makefile\n\t@$(SPHINXBUILD) -M $@ \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n"
  },
  {
    "path": "docs/api.md",
    "content": "<!-- markdownlint-disable MD051 -->\n\n# API Reference\n\nThe MurmurHash3 algorithm has three variants:\n\n- MurmurHash3_x86_32: Generates 32-bit hashes using 32-bit arithmetic.\n- MurmurHash3_x64_128: Generates 128-bit hashes using 64-bit arithmetic.\n- MurmurHash3_x86_128: Generates 128-bit hashes using 32-bit arithmetic.\n\nThe `mmh3` library provides functions and classes for each variant.\n\nAlthough this API reference is comprehensive, you may find the following\nfunctions particularly useful:\n\n- [mmh3.hash()](#mmh3.hash): Uses the 32-bit variant as its backend and accepts\n  `bytes` or `str` as input (strings are UTF-8 encoded). This\n  function is slower than the x64_128 variant in 64-bit environments but is\n  portable across different architectures. It can also be used to calculate\n  favicon hash footprints for platforms like\n  [Shodan](https://www.shodan.io) and [ZoomEye](https://www.zoomeye.hk).\n- [mmh3.mmh3_x64_128_digest()](#mmh3.mmh3_x64_128_digest): Uses the x64_128\n  variant as its backend. This function accepts a buffer (e.g., `bytes`,\n  `bytearray`, `memoryview`, and `numpy` arrays) and returns a 128-bit hash as\n  a `bytes` object, similar to the `hashlib` module in the Python Standard\n  Library. It performs faster than the 32-bit variant on 64-bit machines.\n\nNote that **`mmh3` is endian-neutral**, while the original C++ library is\nendian-sensitive (see also\n[Frequently Asked Questions](https://github.com/hajimes/mmh3#frequently-asked-questions)).\nThis feature of `mmh3` is essential when portability across different\narchitectures is required, such as when calculating hash footprints for web\nservices.\n\n```{caution}\n[Buffer-accepting hash functions](#buffer-accepting-hash-functions) (except the\ndeprecated `hash_from_buffer`) accept positional-arguments only. Using keyword\narguments will raise a `TypeError`.\n```\n\n```{note}\nSupport for no-GIL mode (officially introduced in Python 3.14) was added in\nversion 5.2.0.\n- Basic hash functions are inherently thread-safe by design.\n- Buffer-accepting hash functions are thread-safe,\n  **provided the input buffer is thread-safe**.\n- Hasher classes are thread-safe,\n  again **assuming the input buffer is thread-safe**.\n\nHowever, thread safety under the no-GIL variant has not yet been\nfully tested as of 5.2.0. If you encounter any issues, please report them via\nthe [issue tracker](https://github.com/hajimes/mmh3/issues).\n```\n\n## Basic Hash Functions\n\nThe following functions are used to hash immutable types, specifically\n`bytes` and `str`. String inputs are automatically converted to `bytes` using\nUTF-8 encoding before hashing.\n\nAlthough `hash128()`, `hash64()`, and `mmh3.hash_bytes()` are provided for\ncompatibility with previous versions and are not marked for deprecation,\nthe [buffer-accepting hash functions](#buffer-accepting-hash-functions)\nintroduced in version 5.1.0 are recommended for new code.\n\n```{eval-rst}\n.. autofunction:: mmh3.hash\n.. autofunction:: mmh3.hash128\n.. autofunction:: mmh3.hash64\n.. autofunction:: mmh3.hash_bytes\n```\n\n## Buffer-Accepting Hash Functions\n\nThe following functions are used to hash types that implement the buffer\nprotocol such as `bytes`, `bytearray`, `memoryview`, and `numpy` arrays.\n\n```{seealso}\nThe buffer protocol,\n[originally implemented as a part of Python/C API](https://docs.python.org/3/c-api/buffer.html),\nwas formally defined as a Python-level API in\n[PEP 688](https://peps.python.org/pep-0688/)\nin 2022 and its corresponding type hint was introduced as\n[collections.abc.Buffer](https://docs.python.org/3/library/collections.abc.html#collections.abc.Buffer)\nin Python 3.12. For earlier Python versions, `mmh3` uses a type alias for the\ntype hint\n[\\_typeshed.ReadableBuffer](https://github.com/python/typeshed/blob/d326c9bd424ad60c2b63c2ca1c5c1006c61c3562/stdlib/_typeshed/__init__.pyi#L281),\nwhich is itself an alias for\n[typing_extensions.Buffer](https://typing-extensions.readthedocs.io/en/stable/#typing_extensions.Buffer),\nthe backported type hint for `collections.abc.Buffer`.\n```\n\n```{eval-rst}\n.. autofunction:: mmh3.hash_from_buffer\n.. autofunction:: mmh3.mmh3_32_digest\n.. autofunction:: mmh3.mmh3_32_sintdigest\n.. autofunction:: mmh3.mmh3_32_uintdigest\n.. autofunction:: mmh3.mmh3_x64_128_digest\n.. autofunction:: mmh3.mmh3_x64_128_sintdigest\n.. autofunction:: mmh3.mmh3_x64_128_stupledigest\n.. autofunction:: mmh3.mmh3_x64_128_uintdigest\n.. autofunction:: mmh3.mmh3_x64_128_utupledigest\n.. autofunction:: mmh3.mmh3_x86_128_digest\n.. autofunction:: mmh3.mmh3_x86_128_sintdigest\n.. autofunction:: mmh3.mmh3_x86_128_stupledigest\n.. autofunction:: mmh3.mmh3_x86_128_uintdigest\n.. autofunction:: mmh3.mmh3_x86_128_utupledigest\n```\n\n## Hasher Classes\n\n`mmh3` implements hashers with interfaces similar to those in `hashlib` from\nthe standard library: `mmh3_32()` for 32-bit hashing, `mmh3_x64_128()` for\n128-bit hashing optimized for x64 architectures, and `mmh3_x86_128()` for\n128-bit hashing optimized for x86 architectures.\n\nIn addition to the standard `digest()` method, each hasher provides\n`sintdigest()`, which returns a signed integer, and `uintdigest()`, which\nreturns an unsigned integer. The 128-bit hashers also include `stupledigest()`\nand `utupledigest()`, which return two 64 bit integers.\n\nPlease note that as of version 5.0.0, the implementation is still experimental,\nand performance may be unsatisfactory (particularly `mmh3_x86_128()`).\nAdditionally, `hexdigest()` is not supported; use `digest().hex()` instead.\n\n```pycon\n>>> import mmh3\n>>> hasher = mmh3.mmh3_x64_128(b\"foo\", 42) # seed=42\n>>> hasher.update(b\"bar\")\n>>> hasher.digest()\nb'\\x82_n\\xdd \\xac\\xb6j\\xef\\x99\\xb1e\\xc4\\n\\xc9\\xfd'\n>>> hasher.sintdigest() # 128 bit signed int\n-2943813934500665152301506963178627198\n>>> hasher.uintdigest() # 128 bit unsigned int\n337338552986437798311073100468589584258\n>>> hasher.stupledigest() # two 64 bit signed ints\n(7689522670935629698, -159584473158936081)\n>>> hasher.utupledigest() # two 64 bit unsigned ints\n(7689522670935629698, 18287159600550615535)\n```\n\n```{eval-rst}\n.. autoclass:: mmh3.mmh3_32\n   :members:\n```\n\n```{eval-rst}\n.. autoclass:: mmh3.mmh3_x64_128\n   :members:\n```\n\n```{eval-rst}\n.. autoclass:: mmh3.mmh3_x86_128\n   :members:\n```\n"
  },
  {
    "path": "docs/benchmark.md",
    "content": "# Benchmarks\n\n## Settings\n\n### Machine\n\n- Ubuntu 22.04 instance on GitHub Actions\n  - The benchmarking suits are implemented as GitHub Actions workflows.\n  - [4 processors, 16 GB RAM, 14 GB storage (SSD)](https://docs.github.com/en/actions/using-github-hosted-runners/using-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories)\n  - According to profiling with `pyperf`, each processor operates at a\n    frequency between 2.4 and 3.3 GHz.\n- Tuning by the folloiwing settings:\n  - All tests in a benchmarking suite are executed within the same GitHub\n    Actions job. For more details, refer to\n    [Rodríguez-Guerra (2021)](https://labs.quansight.org/blog/2021/08/github-actions-benchmarks).\n  - [CPU pinning](https://manuel.bernhardt.io/posts/2023-11-16-core-pinning/)\n    to isolate the benchmarking process.\n  - See the\n    [documentation of pyperf](https://pyperf.readthedocs.io/en/latest/system.html)\n    for more details on the following settings:\n    - Stop `irqbalance`.\n    - Set `/proc/irq/default_smp_affinity` to `3` (CPU 0 and 1), where\n      the benchmarking processes are pinned to CPU 2 and 3.\n    - Set `/proc/sys/kernel/perf_event_max_sample_rate` to `1`.\n    - `/proc/sys/kernel/randomize_va_space` = 2 (default)\n\n### Software\n\n- Python environment:\n  - CPython 3.12.5 (64-bit)\n- Hash libraries:\n  - mmh3 5.0.0-dev\n  - [python-xxhash](https://github.com/ifduyue/python-xxhash) 3.5.0\n  - [hashlib](https://docs.python.org/3/library/hashlib.html) (Standard library)\n    - `md5` is tested for `lambda x: hashlib.md5(x).digest()`, and so is `sha1`.\n      therefore, the results for these functions include\n      the overhead of creating the hash object and a function call.\n- Benchmarking library:\n  - [pyperf](https://github.com/psf/pyperf) 2.7.0\n    - Used the\n      [bench_time_func](https://pyperf.readthedocs.io/en/latest/api.html#Runner.bench_time_func)\n      interface to eliminate the overhead of the function call.\n    - Processed time are measured by\n      [time.perf_counter()](https://docs.python.org/3/library/time.html#time.perf_counter)\n      in nanoseconds.\n\n## Method\n\n- A benchmarking test is performed for each specified byte size, which is\n  derived from the Fibonacci sequence.\n- For each input size, the test generates a set of 10 `bytes` instances, where\n  each instance's size is pseudo-randomly selected from the range\n  `[ceil(input * 0.9), floor(input * 1.1)]`.\n  - This randomization is crucial as it increases the difficulty of branch\n    predictions, creating a more realistic scenario. For further details, see\n    [xxHash: Performance comparison](https://github.com/Cyan4973/xxHash/wiki/Performance-comparison#throughput-on-small-data-of-random-length-1-n).\n- This inner loop of 10 iterations is repeated for a certain number of cycles,\n  referred to as the outer loop, which is auto-calibrated by `pyperf`.\n- To avoid the overhead during the loop, iterators are pre-generated\n  using `itertools.repeat()` outside the loop. See\n  [Peters (2002)](https://www.oreilly.com/library/view/python-cookbook/0596001673/ch17.html)\n  and the real code of the `timeit` module in the Python Standard Library.\n- The final result are measured using the median, as it is more robust than the\n  mean, especially on untuned or unstable environments such as GitHub\n  Actions. For more details, see\n  [pyperf: Analyze benchmark results](https://pyperf.readthedocs.io/en/latest/analyze.html).\n\n## Results\n\nThe resulting graphs are plotted using the `pandas` and `matplotlib` libraries.\n\n### Comparison of Version Improvements\n\nJSON files containing the benchmark results are available at:\n[hajimes/mmh3-benchmarks/results_basic-hash/2024-09-17_6bb9987](https://github.com/hajimes/mmh3-benchmarks/tree/main/results_basic-hash/2024-09-17_6bb9987)\n\n```{figure} _static/latency_hash.png\n:alt: Latency for hash() in version 4.1.0 and 5.0.0.\n:align: center\n\nFigure 1: Latency for `mmh3.hash()` in version 4.1.0 and 5.0.0.\nSmaller is better.\n```\n\n### Comparison of Hash Functions Across Libraries\n\nJSON files containing the benchmark results are available at:\n[hajimes/mmh3-benchmarks/results/2024-09-17_30da46e](https://github.com/hajimes/mmh3-benchmarks/tree/main/results/2024-09-17_30da46e)\n\nIn the following graphs:\n\n- `mmh32_32` refers to `mmh3.mmh3_32_digest()`.\n  32-bit output using 32-bit arithmetic. Developed in 2011.\n- `mmh3_128` refers to `mmh3.mmh3_x64_128_digest()`.\n  128-bit output using 64-bit arithmetic. Developed in 2011.\n- `xxh_32` refers to `xxhash.xxh32_digest()`.\n  32-bit output using 32-bit arithmetic. Developed in 2014.\n- `xxh_64` refers to `xxhash.xxh64_digest()`.\n  64-bit output using 64-bit arithmetic. Developed in 2014.\n- `xxh3_64` refers to `xxhash.xxh3_64_digest()`.\n  64-bit output using vectorized arithmetic. Developed in 2020.\n- `xxh3_128` refers to `xxhash.xxh3_64_digest()`.\n  128-bit output using vectorized arithmetic. Developed in 2020.\n- `md5` refers to `hashlib.md5()`.\n  128-bit output using a cryptogprahic algorithm. Developed in 1992.\n- `sha1` refers to `hashlib.sha1()`.\n  160-bit output using a cryptogprahic algorithm. Developed in 1995.\n\n```{figure} _static/latency_small.png\n:alt: Latency for small data\n:align: center\n\nFigure 2: Latency for small data. Smaller is better.\n```\n\n```{figure} _static/latency.png\n:alt: Latency for large data\n:align: center\n\nFigure 3: Latency for large data. Smaller is better.\n```\n\nThe following graphs show the throughput, measured as the size of hash output\ngenerated per second by each function.\n\n```{figure} _static/throughput_small.png\n:alt: Throughput for small data\n:align: center\n\nFigure 4: Throughput for small data. Larger is better.\n```\n\n```{figure} _static/throughput.png\n:alt: Throughput for large data\n:align: center\n\nFigure 5: Throughput for large data. Larger is better. The y-axis is logscale.\n```\n\n## Concluding Remarks\n\nVersion 5.0.0 of the `mmh3` library has improved the performance of the\n`hash()` function and other new functions by adopting\n[METH_FASTCALL](https://docs.python.org/3/c-api/structures.html#c.METH_FASTCALL).\nThis enhancement reduces the overhead of function calls. For data sizes\nbetween 1–2 KB (such as 48x48 favicons), performance has improved by 10%–20%.\nFor smaller data (~500 bytes, like 16x16 favicons), performance increases by\napproximately 30%. However, the performance gain from this revision remains\nconstant, meaning the relative improvement diminishes as data size increases.\n\nWhen comparing hash functions across libraries, `mmh3 5.0.0` is the most\nperformant for small data sizes, while the `xxh3` families in `xxhash 3.5.0`\nexcel with larger data. This is largely due to the new version of `mmh3`\nutilizing `METH_FASTCALL`, which reduces the overhead of function calls.\nHowever, `xxhash` may adopt the same interface in the future, potentially\nmaking this advantage temporary. To further improve `mmh3` performance,\nthe core algorithm itself would need an overhaul.\n\nOverall, these benchmarking results serve as a useful reference when selecting\na hash function for your application, and they provide a solid foundation for\nfuture performance enhancements to our library.\n\n## References\n\n- Python Standard Library.\n  [timeit](https://docs.python.org/3/library/timeit.html).\n- [pyperf: Analyze benchmark results](https://pyperf.readthedocs.io/en/latest/analyze.html).\n- [pyperf: Tune the system for benchmarks](https://pyperf.readthedocs.io/en/latest/system.html).\n- [pyperf issues #1: Use a better measures than average and standard deviation #1](https://github.com/psf/pyperf/issues/1).\n- [pyperf issues #75: Reconsidering min()?](https://github.com/psf/pyperf/issues/75).\n- [pytest-benchmark: Usage](https://pytest-benchmark.readthedocs.io/en/latest/usage.html).\n- [xxHash: Performance comparison](https://github.com/Cyan4973/xxHash/wiki/Performance-comparison).\n- [xxHash benchmark program](https://github.com/Cyan4973/xxHash/tree/release/tests/bench).\n- Manuel Bernhardt. 2023.\n  [On pinning and isolating CPU cores](https://manuel.bernhardt.io/posts/2023-11-16-core-pinning/).\n- Micha Gorelick and Ian Ozsvald. 2020.\n  [High Performance Python: Practical Performant Programming for Humans, 2nd ed](https://www.oreilly.com/library/view/high-performance-python/9781492055013/).\n  O'Reilly Media. ISBN: 978-1492055020. Chapter 2.\n- Tim Peters. 2002.\n  [Chapter 17. Algorithms: Introduction](https://www.oreilly.com/library/view/python-cookbook/0596001673/ch17.html)\n  in _Python Cookbook_,\n  3rd ed. O'Reilly Media. ISBN: 978-0596001674.\n- Jaime Rodríguez-Guerra. 2021.\n  [Is GitHub Actions suitable for running benchmarks?](https://labs.quansight.org/blog/2021/08/github-actions-benchmarks).\n- Victor Stinner. 2016.\n  [My journey to stable benchmark, part 1 (system)](https://vstinner.github.io/journey-to-stable-benchmark-system.html).\n- Victor Stinner. 2016. [My journey to stable benchmark, part 3 (average)](https://vstinner.github.io/journey-to-stable-benchmark-average.html).\n"
  },
  {
    "path": "docs/changelog.md",
    "content": "<!-- markdownlint-disable -->\n\n```{include} ../CHANGELOG.md\n\n```\n"
  },
  {
    "path": "docs/conf.py",
    "content": "# pylint: disable=C0114,C0103\n# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# import os\n# import sys\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = \"mmh3\"\nproject_copyright = \"2011-2025, Hajime Senuma\"\nauthor = \"Hajime Senuma\"\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n    \"sphinx.ext.autodoc\",\n    \"sphinx.ext.napoleon\",\n    \"sphinx_copybutton\",\n    \"myst_parser\",\n]\n\ntemplates_path = [\"_templates\"]\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = \"shibuya\"\nhtml_static_path = [\"_static\"]\nhtml_theme_options = {\n    \"github_url\": \"https://github.com/hajimes/mmh3\",\n}\n\n# -- Options for autodoc -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html\n\nautodoc_member_order = \"groupwise\"\n\nmyst_heading_anchors = 3\n"
  },
  {
    "path": "docs/index.rst",
    "content": "mmh3 documentation\n==================\nmmh3 is a Python extension for\n`MurmurHash (MurmurHash3) <https://en.wikipedia.org/wiki/MurmurHash>`_,\na set of fast and robust non-cryptographic hash functions invented by Austin\nAppleby.\n\n.. toctree::\n   :maxdepth: 2\n   :caption: User Guideline\n\n   Quickstart<quickstart>\n   api\n   benchmark\n   Changelog<changelog>\n   CONTRIBUTORS\n\n.. toctree::\n   :maxdepth: 2\n   :caption: Project documentation\n\n   CONTRIBUTING\n   CODE_OF_CONDUCT\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`search`"
  },
  {
    "path": "docs/make.bat",
    "content": "@ECHO OFF\r\n\r\npushd %~dp0\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=sphinx-build\r\n)\r\nset SOURCEDIR=.\r\nset BUILDDIR=_build\r\n\r\n%SPHINXBUILD% >NUL 2>NUL\r\nif errorlevel 9009 (\r\n\techo.\r\n\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx\r\n\techo.installed, then set the SPHINXBUILD environment variable to point\r\n\techo.to the full path of the 'sphinx-build' executable. Alternatively you\r\n\techo.may add the Sphinx directory to PATH.\r\n\techo.\r\n\techo.If you don't have Sphinx installed, grab it from\r\n\techo.https://www.sphinx-doc.org/\r\n\texit /b 1\r\n)\r\n\r\nif \"%1\" == \"\" goto help\r\n\r\n%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%\r\ngoto end\r\n\r\n:help\r\n%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%\r\n\r\n:end\r\npopd\r\n"
  },
  {
    "path": "docs/quickstart.md",
    "content": "<!-- markdownlint-disable -->\n\n```{include} ../README.md\n\n```\n"
  },
  {
    "path": "paper/paper.bib",
    "content": "@article{adja_blockchain-based_2021,\n\ttitle        = {A blockchain-based certificate revocation management and status verification system},\n\tauthor       = {Adja, Yves Christian Elloh and Hammi, Badis and Serhrouchni, Ahmed and Zeadally, Sherali},\n\tyear         = 2021,\n\tjournal      = {Computers \\& Security},\n\tvolume       = 104,\n\tpages        = 102209,\n\tdoi          = {10.1016/j.cose.2021.102209},\n\tissn         = {0167-4048},\n\turl          = {https://www.sciencedirect.com/science/article/pii/S016740482100033X},\n\tkeywords     = {Authentication, Blockchain, Bloom filter, Certificate, Decentralization, PKI, Revocation, Security, X509}\n}\n@misc{appleby_murmurhash3_2011,\n\ttitle        = {{MurmurHash3} and {SMHasher}},\n\tauthor       = {Appleby, Austin},\n\tyear         = 2011,\n\turl          = {https://github.com/aappleby/smhasher}\n}\n@misc{Bernhardt2023,\n\ttitle        = {On pinning and isolating CPU cores},\n\tauthor       = {Bernhardt, Manuel},\n\tyear         = 2023,\n\turl          = {https://manuel.bernhardt.io/posts/2023-11-16-core-pinning/}\n}\n@article{Bloom1970,\n\ttitle        = {Space/Time Trade-Offs in Hash Coding with Allowable Errors},\n\tauthor       = {Bloom, Burton H.},\n\tyear         = 1970,\n\tmonth        = {jul},\n\tjournal      = {Commun. ACM},\n\tpublisher    = {Association for Computing Machinery},\n\taddress      = {New York, NY, USA},\n\tvolume       = 13,\n\tnumber       = 7,\n\tpages        = {422–426},\n\tdoi          = {10.1145/362686.362692},\n\tissn         = {0001-0782},\n\turl          = {https://doi.org/10.1145/362686.362692},\n\tissue_date   = {July 1970},\n\tnumpages     = 5,\n\tkeywords     = {retrieval efficiency, storage efficiency, hash addressing, scatter storage, searching, storage layout, retrieval trade-offs, hash coding}\n}\n@inproceedings{Broder1997a,\n\ttitle        = {On the resemblance and containment of documents},\n\tauthor       = {Broder, Andrei Z.},\n\tyear         = 1997,\n\tbooktitle    = {Proceedings. Compression and Complexity of {SEQUENCES} 1997},\n\tpublisher    = {IEEE Comput. Soc},\n\tpages        = {21--29},\n\tdoi          = {10.1109/SEQUEN.1997.666900},\n\tisbn         = {0-8186-8132-2},\n\turl          = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=666900},\n\tnote         = {ISSN: 0818681322},\n\tkeywords     = {MinHash}\n}\n@misc{collet_xxhash_2014,\n\ttitle        = {{xxHash}},\n\tauthor       = {Collet, Yan},\n\tyear         = 2014,\n\turl          = {https://github.com/Cyan4973/xxHash}\n}\n@misc{collet_xxhash_comparison_2020,\n\ttitle        = {{xxHash}: Performance comparison (2020)},\n\tauthor       = {Collet, Yan},\n\tyear         = 2020,\n\turl          = {https://github.com/Cyan4973/xxHash/wiki/Performance-comparison}\n}\n@misc{du_xxhash_2014,\n\ttitle        = {{xxhash}},\n\tauthor       = {Du, Yue},\n\tyear         = 2014,\n\turl          = {https://github.com/ifduyue/python-xxhash}\n}\n@misc{faraday_security_understanding_2022,\n\ttitle        = {Understanding {Spring4Shell}},\n\tauthor       = {{Faraday Security}},\n\tyear         = 2022,\n\tmonth        = jul,\n\turl          = {https://faradaysec.com/understanding-spring4shell/}\n}\n@book{gorelick_high_2020,\n\ttitle        = {High Performance {P}ython: Practical Performant Programming for Humans},\n\tauthor       = {Gorelick, Micha and Ozsvald, Ian},\n\tyear         = 2020,\n\tmonth        = jun,\n\tpublisher    = {O'Reilly Media},\n\tisbn         = {978-1-4920-5502-0},\n\tedition      = {2nd edition}\n}\n@software{hugo_van_kemenade_2024_13624792,\n  author       = {Van Kemenade, Hugo and\n                  Si, Richard and\n                  Dollenstein, Zsolt},\n  title        = {hugovk/top-pypi-packages: Release 2024.09},\n  month        = sep,\n  year         = 2024,\n  publisher    = {Zenodo},\n  version      = {2024.09},\n  doi          = {10.5281/zenodo.13624792},\n  url          = {https://doi.org/10.5281/zenodo.13624792}\n}\n@inproceedings{kakwani_indicnlpsuite_2020,\n\ttitle        = {{IndicNLPSuite}: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for {I}ndian Languages},\n\tauthor       = {Kakwani, Divyanshu and Kunchukuttan, Anoop and Golla, Satish and N.C., Gokul and Bhattacharyya, Avik and Khapra, Mitesh M. and Kumar, Pratyush},\n\tyear         = 2020,\n\tmonth        = nov,\n\tbooktitle    = {Findings of the {A}ssociation for {C}omputational {L}inguistics: {EMNLP} 2020},\n\tpublisher    = {Association for Computational Linguistics},\n\taddress      = {Online},\n\tpages        = {4948--4961},\n\tdoi          = {10.18653/v1/2020.findings-emnlp.445},\n\turl          = {https://aclanthology.org/2020.findings-emnlp.445}\n}\n@misc{kihlander_pymmh3_2013,\n\ttitle        = {{PYMMH3}},\n\tauthor       = {Kihlander, Fredrik and Gusani, Swapnil},\n\tyear         = 2013,\n\turl          = {https://github.com/wc-duck/pymmh3}\n}\n@techreport{kopriva_hunting_2021,\n\ttitle        = {Hunting phishing websites with favicon hashes},\n\tauthor       = {Kopriva, Jan},\n\tyear         = 2021,\n\tmonth        = apr,\n\turl          = {https://isc.sans.edu/diary/27326},\n\tinstitution  = {SANS Internet Storm Center}\n}\n@book{kumar_probabilistic_2021,\n\ttitle        = {Probabilistic Data Structures for Blockchain-Based {I}nternet of {T}hings Applications},\n\tauthor       = {Kumar, Neeraj and Miglani, Arzoo},\n\tyear         = 2021,\n\tmonth        = jan,\n\tpublisher    = {CRC Press},\n\tdoi          = {10.1201/9781003080046},\n\tisbn         = {978-0-367-52990-1}\n}\n@book{medjedovic_algorithms_2022,\n\ttitle        = {Algorithms and Data Structures for Massive Datasets},\n\tauthor       = {Medjedovic, Dzejla and Tahirovic, Emin and Dedovic, Ines},\n\tyear         = 2022,\n\tmonth        = jul,\n\tpublisher    = {Manning},\n\tisbn         = {978-1-61729-803-5}\n}\n@techreport{Matherly2017,\n\ttitle        = {Complete Guide to Shodan: Collect. Analyze. Visualize. Make Internet Intelligence Work for You.},\n\tauthor       = {Matherly, John},\n\tyear         = 2017,\n\tedition\t     = {Version 2017-08-23},\n\tinstitution  = {Shodan}\n}\n@misc{Matherly2024,\n\ttitle        = {Deep Dive: http.favicon},\n\tauthor       = {Matherly, John},\n\tyear         = 2024,\n\tmonth        = {jan},\n\turl          = {https://blog.shodan.io/deep-dive-http-favicon/},\n\tinstitution  = {Shodan}\n}\n@incollection{Peters2002,\n  author        = {Peters, Tim},\n  title         = {Algorithms: Introduction},\n  chapter       = {17},\n  year          = 2002,\n  month\t\t    = {jul},\n  booktitle     = {Python Cookbook},\n  publisher     = {O'Reilly Media},\n  editor        = {Martelli, Alex and Ascher, David},\n  edition       = {1st edition}\n}\n@techreport{RodriguezGuerra2021,\n\ttitle        = {Is GitHub Actions suitable for running benchmarks?},\n\tauthor       = {Rodríguez-Guerra, Jaime},\n\tyear         = 2021,\n\tmonth        = {aug},\n\turl          = {https://labs.quansight.org/blog/2021/08/github-actions-benchmarks},\n\tinstitution  = {Quansight Labs}\n}\n@inproceedings{Senuma2011,\n\ttitle        = {K-means Clustering with Feature Hashing},\n\tauthor       = {Senuma, Hajime},\n\tyear         = 2011,\n\tbooktitle    = {Proceedings of the 49th Annual Meeting of the {A}ssociation for {C}omputational {L}inguistics: Student Session},\n\tpages        = {122--126},\n\tnote         = {Issue: June}\n}\n@inproceedings{Senuma2016,\n\ttitle        = {Learning Succinct Models: Pipelined Compression with {L}1-Regularization, Hashing, {E}lias–{F}ano Indices, and Quantization},\n\tauthor       = {Senuma, Hajime and Aizawa, Akiko},\n\tyear         = 2016,\n\tbooktitle    = {Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers},\n\tpages        = {2774--2784}\n}\n@article{Shi2009,\n\ttitle        = {Hash Kernels for Structured Data},\n\tauthor       = {Shi, Qinfeng and Petterson, James and Dror, Gideon and Langford, John and Smola, Alex and Vishwanathan, S.V.N.},\n\tyear         = 2009,\n\tjournal      = {Journal of Machine Learning Research},\n\tvolume       = 10,\n\tpages        = {2615--2637}\n}\n@misc{shodan_its_2021,\n\ttitle        = {it's the {MMH3} hash of the http.html property. See: {PyPI} mmh3},\n\tauthor       = {Shodan},\n\tyear         = 2021,\n\tmonth        = may,\n\tjournal      = {Twitter},\n\turl          = {https://twitter.com/shodanhq/status/1395501365456261122}\n}\n@misc{Stinner2016,\n\ttitle        = {My journey to stable benchmark},\n\tauthor       = {Stinner, Victor},\n\tyear         = 2016,\n\turl          = {https://vstinner.github.io/journey-to-stable-benchmark-system.html}\n}\n@inproceedings{Tang2024,\n\ttitle        = {Data Splitting based Double Layer Encryption for\nSecure Ciphertext Deduplication in Cloud Storage},\n\tauthor       = {Tang, Xin and Jin, Luchao},\n\tyear         = 2024,\n\tjournal      = {2024 IEEE 17th International Conference on Cloud Computing (CLOUD)},\n\tpages        = {153--163},\n\tdoi          = {10.1109/CLOUD62652.2024.00027}\n}\n@inproceedings{Weinberger2009,\n\ttitle        = {Feature Hashing for Large Scale Multitask Learning},\n\tauthor       = {Weinberger, Kilian and Dasgupta, Anirban and Langford, John and Smola, Alex and Attenberg, Josh},\n\tyear         = 2009,\n\tbooktitle    = {Proceedings of the 26th International Conference on Machine Learning},\n\tdoi          = {10.1145/1553374.1553516},\n\tnote         = {arXiv: 0902.2206v5}\n}\n"
  },
  {
    "path": "paper/paper.md",
    "content": "---\ntitle: \"mmh3: A Python extension for MurmurHash3\"\ntags:\n  - Python\n  - hash\n  - high-performance computing\n  - artificial intelligence\n  - natural language processing\n  - internet of things\n  - cybersecurity\nauthors:\n  - name: Hajime Senuma\n    orcid: 0000-0001-8542-1768\n    affiliation: 1\naffiliations:\n  - name: National Institute of Informatics, Japan\n    index: 1\ndate: 15 Dec 2024\nbibliography: paper.bib\n---\n\n<!-- markdownlint-disable single-h1 -->\n\n# Summary\n\nIn recent years, artificial intelligence (AI) has rapidly evolved, particularly\nin natural language processing (NLP) with services like OpenAI's ChatGPT.\nLikewise, the Internet of Things (IoT) continues to grow as a key area of\nubiquitous computing, exemplified by Shodan, the first IoT search engine.\n\nUnderlying these advancements are high-performance algorithms and data\nstructures relying on non-cryptographic hash functions, which are\ncharacteristically fast, produce statistically well-distributed bits, exhibit\nan avalanche effect (where a one-bit change in the input alters at least half\nof the output), and are collision resistant. Because cryptographic strength is\nunnecessary in these cases, they benefit from the efficiency of\nnon-cryptographic hashes.\n\nMurmurHash3 and its test suite, SMHasher, was developed\nby @appleby_murmurhash3_2011 and is one of the earliest and most continuously\npopular hash functions specifically designed to implement the characteristics\nmentioned above.\n\n`mmh3` was launched in 2011 as a Python extension for MurmurHash3 and has been\nmaintained ever since. Its API is simple to use for Python programmers,\nas it offers both one-shot hash functions and hasher classes that allow\nincremental updating, whose methods are compliant to `hashlib`, a part of the\nPython Standard Library. The library provides Python wheels (i.e., pre-built\nbinary packages) for immediate use on various platforms, including Linux\n(x86_64, aarch64, i686, ppc64le, and s390x), Windows (win32, win_amd64,\nand win_arm64), and macOS (Intel Mac and Apple Silicon). From version 4.0.0,\n`mmh3` has been published under the MIT License, an OSI-approved permissive\nopen-source license.\n\nAs of September 1, 2024, `mmh3` was being downloaded more than 4 million times\nper month, and it ranks as the 973th most downloaded PyPI package\n(of around 566,000 projects), showing that only 0.17% of the remaining packages\nin the PyPI ecosystem are more popular [@hugo_van_kemenade_2024_13624792].\nAccording to PePy, as of September 1, 2024, the total downloads of\nthis library exceeded 130 millions.\n\nLibraries and organizations that use `mmh3` include\nShodan, Microsoft Azure SDK for Python,\nApache Iceberg (open table format for analytic datasets),\nFeast (feature store for machine learning),\nPyMilvus (Python SDK for Milvus, an open-source vector database),\nand pocsuite3 (open-source remote vulnerability testing framework).\n\n# Statement of need\n\n## AI and High-Performance Computing\n\nAI is one of the most resource-demanding fields in computer science\nand engineering. To mitigate this problem, various techniques are employed\nunder main systems, in which non-cryptographic hash functions play key roles\nin a number of algorithms and data structures.\n\nA notable technique is _feature hashing_ [@Weinberger2009; @Shi2009]. In its\nsimplest usage, when given a string-indexed data vector, it converts the\nvector into an integer-indexed data vector in which each index is the hash\nresult of the original string index; collision values are summed.\nDespite its simple and intuitive usage, a machine-learning process with feature\nhashing is statistically guaranteed to be nearly as accurate as its original\nprocess. Feature hashing has been shown to be useful for various situations,\nincluding K-means clustering [@Senuma2011]\nand succinct model learning [@Senuma2016].\n\nOther popular techniques that leverage non-cryptographic hash functions include\n_Bloom Filter_ [@Bloom1970], a compact data structure that tests whether an\nelement is a member of a certain set (with false positive matches), and\n_MinHash_ [@Broder1997a], an algorithm that quickly estimates the similarity of\ntwo sets.\n\n`mmh3` appears in scholarly papers on various topics,\nincluding Indian language NLP suites [@kakwani_indicnlpsuite_2020],\na secure system based on probabilistic structures [@adja_blockchain-based_2021],\nas well as secure ciphertext deduplication in cloud storage [@Tang2024].\nIt has also appeared in technical books and computer science texts\n[@gorelick_high_2020; @kumar_probabilistic_2021; @medjedovic_algorithms_2022].\n\n## Internet of Things\n\n`mmh3` is applicable to the IoT field. According to @shodan_its_2021,\nShodan [@Matherly2017] uses `mmh3` as its fingerprint for a favicon (i.e., an\nicon associated with a web page or website). @Matherly2024 explained\nthe adoption of `mmh3` due to its speed and compact hash size,\nnoting that cryptographic guarantees provided by `md5` and other hashes were\nnot necessary for their use case. ZoomEye, another popular IoT search engine,\nfollows Shodan’s convention.\n\nFor cybersecurity, @kopriva_hunting_2021 reported a method of discovering\npossible phishing websites by searching websites with Shodan, whose favicon’s\n`mmh3` hash value was the same as that of a genuine one. Another use case of\n`mmh3` in this area includes open-source intelligence (OSINT) activities,\nsuch as measuring the popularity of web frameworks\nand servers, as some users do not change their default favicon settings\nspecified by applications [@faraday_security_understanding_2022].\n\n# Related software\n\n`PYMMH` [@kihlander_pymmh3_2013] is a pure Python implementation of the\nMurmurHash3 algorithms. Among various other Python bindings for\nnon-cryptographic hashes, `python-xxhash` by Yue Du [@du_xxhash_2014] is another\npopular hash library, featuring xxHash developed by\nYan Collet [@collet_xxhash_2014].\n\n# Benchmarks\n\nWe conducted microbenchmarking experiments to compare the efficiency of\nPython-C hash libraries, balancing accuracy, reproducibility, and\nreliability. Our methodology follows practices from microbenchmarking\nliterature, including works by @Peters2002, @Stinner2016,\n@collet_xxhash_comparison_2020, @gorelick_high_2020, @RodriguezGuerra2021,\nand @Bernhardt2023.\n\n\\autoref{bandwidth} and \\autoref{latency} summarize the benchmarking results.\nWhile the `xxh3` family in `python-xxhash 3.5.0` shows superior\nperformance for large inputs, the `mmh3 5.0.0` implementation excels with\nsmaller inputs (common scenarios for non-cryptographic hashes), due to its use\nof `METH_FASTCALL`, an overhead-reducing interface introduced in Python 3.7.\n\nFor details, see the documentation of the project:\n<https://mmh3.readthedocs.io/en/stable/benchmark.html>.\nAdditionally, the benchmarking results are publicly available as JSON files in\nthe repository: <https://github.com/hajimes/mmh3-benchmarks>.\n\n<!-- markdownlint-capture -->\n<!-- markdownlint-disable line-length -->\n\n: \\label{bandwidth}Benchmarking results for Python extensions. Small data\nvelocity is defined as the inverse of the mean latency (in microseconds) for\ninputs in the range of 1–256 bytes. Collet (2020) refers to the results\nof original C implementations experimented by the author of xxHash, using a CPU\nclocked at 3.6–4.9 GHz (ours: 2.4–3.3 GHz).\n\n| Hash         |    Width | Bandwidth       | Small Data Velocity | cf. Collet (2020) |\n| :----------- | -------: | :-------------- | ------------------: | :---------------- |\n| xxh3_128     | 128 bits | **22.42 GiB/s** |                8.96 | 29.6 GiB/s        |\n| xxh3_64      |  64 bits | 22.41 GiB/s     |                 9.5 | 31.5 GiB/s        |\n| xxh_64       |  64 bits | 8.90 GiB/s      |                 9.3 | 9.1 GiB/s         |\n| **mmh3_128** | 128 bits | 6.91 GiB/s      |           **19.04** | N/A               |\n| xxh_32       |  32 bits | 6.15 GiB/s      |                8.91 | 9.7 GiB/s         |\n| **mmh3_32**  |  32 bits | 2.86 GiB/s      |               18.41 | 3.9 GiB/s         |\n| sha1         |  16 bits | 1.63 GiB/s      |                 2.4 | 0.8 GiB/s         |\n| md5          | 128 bits | 0.65 GiB/s      |                1.95 | 0.6 GiB/s         |\n\n<!-- markdownlint-restore -->\n\n![\\label{latency}Latency for small to medium-sized inputs. Lower is better.](../docs/_static/latency_small.png)\n\n# Acknowledgements\n\nThe author extends sincere gratitude to Akiko Aizawa for her helpful comments\non this paper. Appreciation is also given to all those involved in the\ndevelopment and maintenance of `mmh3`. Special thanks go to Micha Gorelick,\nwho made the first pull request to the project and later introduced the\nlibrary in her technical book [@gorelick_high_2020].\n\n# References\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\n# setuptools >= 74.1.0 required to build C extensions via pyproject.toml\nrequires = [\"setuptools >= 74.1.0\", \"wheel\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"mmh3\"\nversion = \"5.2.1\"\ndescription = \"Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions.\"\nreadme = \"README.md\"\nlicense = {file = \"LICENSE\"}\nkeywords = [\"utility\", \"hash\", \"MurmurHash\"]\nrequires-python = \">=3.10\"\nauthors = [\n  {name = \"Hajime Senuma\", email=\"hajime.senuma@gmail.com\"}\n]\nclassifiers = [\n  \"Development Status :: 5 - Production/Stable\",\n  \"Intended Audience :: Developers\",\n  \"License :: OSI Approved :: MIT License\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n  \"Programming Language :: Python :: 3.14\",\n  \"Programming Language :: Python :: Free Threading :: 2 - Beta\",\n  \"Topic :: Software Development :: Libraries\",\n  \"Topic :: Utilities\"\n]\n\n[project.optional-dependencies]\ntest = [\n  \"pytest == 9.0.2\",\n  \"pytest-sugar == 1.1.1\"\n]\nlint = [\n  \"actionlint-py == 1.7.11.24\",\n  \"clang-format == 22.1.0\",\n  \"codespell == 2.4.1\",\n  \"pylint == 4.0.5\",\n  \"ruff == 0.15.4\"\n]\ntype = [\n  \"mypy == 1.19.1\"\n]\ndocs = [\n  \"myst-parser == 5.0.0\",\n  \"shibuya == 2026.1.9\",\n  \"sphinx == 8.2.3\",\n  \"sphinx-copybutton == 0.5.2\"\n]\nbenchmark = [\n  \"pymmh3 == 0.0.5\",\n  \"pyperf == 2.10.0\",\n  \"xxhash == 3.6.0\"\n]\nplot = [\n  \"matplotlib == 3.10.8\",\n  \"pandas == 3.0.1\"\n]\n\n[project.urls]\nHomepage = \"https://pypi.org/project/mmh3/\"\nDocumentation = \"https://mmh3.readthedocs.io/\"\nRepository = \"https://github.com/hajimes/mmh3\"\nChangelog = \"https://github.com/hajimes/mmh3/blob/master/CHANGELOG.md\"\n\"Bug Tracker\" = \"https://github.com/hajimes/mmh3/issues\"\n\n[tool.codespell]\n# As of 2026-03-02, skip has an issue on super-linter\n# https://github.com/super-linter/super-linter/issues/7466\nskip = \"*/paper.bib,./build\"\n# Collet is a surname, Commun is an abbr for a journal name,\n# fo is used in several test strings, and Ines is also a surname.\nignore-words-list = \"Collet,Commun,fo,Ines\"\n\n[tool.ruff]\nsrc = [\"src/mmh3/__init__.pyi\", \"util\", \"tests\", \"benchmark\", \"docs\"]\n\n[tool.ruff.lint]\nselect = [\"E\", \"W\", \"F\", \"I\", \"UP\", \"B\", \"SIM\", \"C4\", \"ISC\", \"NPY\"]\n\n[tool.ruff.lint.isort]\nknown-first-party = [\"mmh3\"]\n\n[tool.setuptools]\ninclude-package-data = true\next-modules = [\n  {name = \"mmh3\", sources = [\"./src/mmh3/mmh3module.c\", \"./src/mmh3/murmurhash3.c\"]}\n]\n\n[tool.setuptools.package-data]\nmmh3 = [\"*.h\"]\n\n[tool.pylint]\nignore-paths = [\n  \"^build\",\n  \"^venv\",\n  \"^.venv\",\n  \"^.tox\",\n  \"^src/mmh3/__init__.pyi\"\n]\n# Use multiple processes to speed up Pylint.\n# The value 0 specifies the number of processors to be auto-detected.\n# This setting can be found in the template file of super-linter 7.0.0.\njobs = 0\n# import-error: An error tricky to resolve, especially on super-linter.\n# wrong-import-order: Respect Ruff's import order.\ndisable = [\n  \"import-error\",\n  \"wrong-import-order\"\n]\n"
  },
  {
    "path": "src/mmh3/__init__.pyi",
    "content": "import sys\nfrom typing import Any, final\n\nif sys.version_info >= (3, 12):\n    from collections.abc import Buffer\nelse:\n    from _typeshed import ReadableBuffer as Buffer\n\ndef hash(key: bytes | str, seed: int = 0, signed: Any = True) -> int: ...\ndef hash_from_buffer(key: Buffer | str, seed: int = 0, signed: Any = True) -> int: ...\ndef hash64(\n    key: bytes | str, seed: int = 0, x64arch: Any = True, signed: Any = True\n) -> tuple[int, int]: ...\ndef hash128(\n    key: bytes | str, seed: int = 0, x64arch: Any = True, signed: Any = False\n) -> int: ...\ndef hash_bytes(key: bytes | str, seed: int = 0, x64arch: Any = True) -> bytes: ...\ndef mmh3_32_digest(key: Buffer | str, seed: int = 0) -> bytes: ...\ndef mmh3_32_sintdigest(key: Buffer | str, seed: int = 0) -> int: ...\ndef mmh3_32_uintdigest(key: Buffer | str, seed: int = 0) -> int: ...\ndef mmh3_x64_128_digest(key: Buffer | str, seed: int = 0) -> bytes: ...\ndef mmh3_x64_128_sintdigest(key: Buffer | str, seed: int = 0) -> int: ...\ndef mmh3_x64_128_uintdigest(key: Buffer | str, seed: int = 0) -> int: ...\ndef mmh3_x64_128_stupledigest(key: Buffer | str, seed: int = 0) -> tuple[int, int]: ...\ndef mmh3_x64_128_utupledigest(key: Buffer | str, seed: int = 0) -> tuple[int, int]: ...\ndef mmh3_x86_128_digest(key: Buffer | str, seed: int = 0) -> bytes: ...\ndef mmh3_x86_128_sintdigest(key: Buffer | str, seed: int = 0) -> int: ...\ndef mmh3_x86_128_uintdigest(key: Buffer | str, seed: int = 0) -> int: ...\ndef mmh3_x86_128_stupledigest(key: Buffer | str, seed: int = 0) -> tuple[int, int]: ...\ndef mmh3_x86_128_utupledigest(key: Buffer | str, seed: int = 0) -> tuple[int, int]: ...\n\nclass Hasher:\n    def __init__(self, data: Buffer | None = None, seed: int = 0) -> None: ...\n    def update(self, data: Buffer) -> None: ...\n    def digest(self) -> bytes: ...\n    def sintdigest(self) -> int: ...\n    def uintdigest(self) -> int: ...\n    def copy(self) -> Hasher: ...\n    @property\n    def digest_size(self) -> int: ...\n    @property\n    def block_size(self) -> int: ...\n    @property\n    def name(self) -> str: ...\n\n@final\nclass mmh3_32(Hasher): ...\n\n@final\nclass mmh3_x64_128(Hasher):\n    def stupledigest(self) -> tuple[int, int]: ...\n    def utupledigest(self) -> tuple[int, int]: ...\n\n@final\nclass mmh3_x86_128(Hasher):\n    def stupledigest(self) -> tuple[int, int]: ...\n    def utupledigest(self) -> tuple[int, int]: ...\n"
  },
  {
    "path": "src/mmh3/hashlib.h",
    "content": "// This code was taken from a part of CPython's code base (Modules/hashlib.h)\n// at commit 9ce0f48e918860ffa32751a85b0fe7967723e2e3\n// Below is a copy of the license of CPython\n\n// PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\n// --------------------------------------------\n//\n// 1. This LICENSE AGREEMENT is between the Python Software Foundation\n// (\"PSF\"), and the Individual or Organization (\"Licensee\") accessing and\n// otherwise using this software (\"Python\") in source or binary form and\n// its associated documentation.\n//\n// 2. Subject to the terms and conditions of this License Agreement, PSF hereby\n// grants Licensee a nonexclusive, royalty-free, world-wide license to\n// reproduce, analyze, test, perform and/or display publicly, prepare\n// derivative works, distribute, and otherwise use Python alone or in any\n// derivative version, provided, however, that PSF's License Agreement and\n// PSF's notice of copyright, i.e., \"Copyright (c) 2001, 2002, 2003, 2004,\n// 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016,\n// 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; All\n// Rights Reserved\" are retained in Python alone or in any derivative version\n// prepared by Licensee.\n//\n// 3. In the event Licensee prepares a derivative work that is based on\n// or incorporates Python or any part thereof, and wants to make\n// the derivative work available to others as provided herein, then\n// Licensee hereby agrees to include in any such work a brief summary of\n// the changes made to Python.\n//\n// 4. PSF is making Python available to Licensee on an \"AS IS\"\n// basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\n// IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND\n// DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\n// FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT\n// INFRINGE ANY THIRD PARTY RIGHTS.\n//\n// 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\n// FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\n// A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,\n// OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n//\n// 6. This License Agreement will automatically terminate upon a material\n// breach of its terms and conditions.\n//\n// 7. Nothing in this License Agreement shall be deemed to create any\n// relationship of agency, partnership, or joint venture between PSF and\n// Licensee.  This License Agreement does not grant permission to use PSF\n// trademarks or trade name in a trademark sense to endorse or promote\n// products or services of Licensee, or any third party.\n//\n// 8. By copying, installing or otherwise using Python, Licensee\n// agrees to be bound by the terms and conditions of this License\n// Agreement.\n\n/*\n * Given a PyObject* obj, fill in the Py_buffer* viewp with the result\n * of PyObject_GetBuffer.  Sets an exception and issues the erraction\n * on any errors, e.g. 'return NULL' or 'goto error'.\n */\n#define GET_BUFFER_VIEW_OR_ERROR(obj, viewp, erraction)                   \\\n    do {                                                                  \\\n        if (PyUnicode_Check((obj))) {                                     \\\n            PyErr_SetString(PyExc_TypeError,                              \\\n                            \"Strings must be encoded before hashing\");    \\\n            erraction;                                                    \\\n        }                                                                 \\\n        if (!PyObject_CheckBuffer((obj))) {                               \\\n            PyErr_SetString(PyExc_TypeError,                              \\\n                            \"object supporting the buffer API required\"); \\\n            erraction;                                                    \\\n        }                                                                 \\\n        if (PyObject_GetBuffer((obj), (viewp), PyBUF_SIMPLE) == -1) {     \\\n            erraction;                                                    \\\n        }                                                                 \\\n        if ((viewp)->ndim > 1) {                                          \\\n            PyErr_SetString(PyExc_BufferError,                            \\\n                            \"Buffer must be single dimension\");           \\\n            PyBuffer_Release((viewp));                                    \\\n            erraction;                                                    \\\n        }                                                                 \\\n    } while (0)\n\n#define GET_BUFFER_VIEW_OR_ERROUT(obj, viewp) \\\n    GET_BUFFER_VIEW_OR_ERROR(obj, viewp, return NULL)"
  },
  {
    "path": "src/mmh3/mmh3module.c",
    "content": "// To handle 64-bit data; see https://docs.python.org/3/c-api/arg.html\n#ifndef PY_SSIZE_T_CLEAN\n#define PY_SSIZE_T_CLEAN\n#endif\n\n#include <Python.h>\n#include <stdio.h>\n#include <string.h>\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n#include <byteswap.h>\n#endif\n\n#include \"hashlib.h\"\n#include \"murmurhash3.h\"\n\n#if defined(_MSC_VER)\ntypedef signed __int8 int8_t;\ntypedef signed __int32 int32_t;\ntypedef signed __int64 int64_t;\ntypedef unsigned __int8 uint8_t;\ntypedef unsigned __int32 uint32_t;\ntypedef unsigned __int64 uint64_t;\n// Other compilers\n#else  // defined(_MSC_VER)\n#include <stdint.h>\n#endif  // defined(_MSC_VER)\n\n#define MMH3_32_DIGESTSIZE 4\n#define MMH3_128_DIGESTSIZE 16\n\n#define MMH3_32_BLOCKSIZE 12\n#define MMH3_128_BLOCKSIZE 32\n\n#define MMH3_VALIDATE_SEED_RETURN_NULL(seed)                       \\\n    if (seed < 0 || seed > 0xFFFFFFFF) {                           \\\n        PyErr_SetString(PyExc_ValueError, \"seed is out of range\"); \\\n        return NULL;                                               \\\n    }\n\n#define MMH3_VALIDATE_SEED_RETURN_INT(seed, buf)                   \\\n    if (seed < 0 || seed > 0xFFFFFFFF) {                           \\\n        PyBuffer_Release(&buf);                                    \\\n        PyErr_SetString(PyExc_ValueError, \"seed is out of range\"); \\\n        return -1;                                                 \\\n    }\n\n// obj: PyObject*\n// target_str: const char *\n// len: Py_ssize_t\n#define MMH3_HASH_VALIDATE_AND_SET_BYTES(obj, target_str, len)          \\\n    if (PyBytes_Check(obj)) {                                           \\\n        target_str_len = PyBytes_Size(obj);                             \\\n        target_str = PyBytes_AS_STRING(obj);                            \\\n    }                                                                   \\\n    else if (PyUnicode_Check(obj)) {                                    \\\n        target_str_len = PyUnicode_GET_LENGTH(obj);                     \\\n        target_str = PyUnicode_AsUTF8AndSize(obj, &target_str_len);     \\\n    }                                                                   \\\n    else {                                                              \\\n        PyErr_Format(PyExc_TypeError,                                   \\\n                     \"argument 1 must be read-only bytes-like object, \" \\\n                     \"not '%s'\",                                        \\\n                     Py_TYPE(obj)->tp_name);                            \\\n        return NULL;                                                    \\\n    }\n\n// obj: PyObject*\n// seed: unsigned long\n#define MMH3_HASH_VALIDATE_AND_SET_SEED(obj, seed)                      \\\n    if (!PyLong_Check(obj)) {                                           \\\n        PyErr_Format(PyExc_TypeError,                                   \\\n                     \"'%s' object cannot be interpreted as an integer\", \\\n                     Py_TYPE(obj)->tp_name);                            \\\n        return NULL;                                                    \\\n    }                                                                   \\\n    seed = PyLong_AsUnsignedLong(obj);                                  \\\n    if (seed == (unsigned long)-1 && PyErr_Occurred()) {                \\\n        if (PyErr_ExceptionMatches(PyExc_OverflowError)) {              \\\n            PyErr_SetString(PyExc_ValueError, \"seed is out of range\");  \\\n            return NULL;                                                \\\n        }                                                               \\\n    }                                                                   \\\n    if (seed > 0xFFFFFFFF) {                                            \\\n        PyErr_SetString(PyExc_ValueError, \"seed is out of range\");      \\\n        return NULL;                                                    \\\n    }\n\n// nargs: Py_ssize_t\n// name: const char *\n// pos: int\n#define MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, name, pos) \\\n    if (nargs >= pos) {                                      \\\n        PyErr_Format(PyExc_TypeError,                        \\\n                     \"argument for function given by name \"  \\\n                     \"('%s') and position (%d)\",             \\\n                     name, pos);                             \\\n        return NULL;                                         \\\n    }\n\n#define MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed)                  \\\n    if (nargs < 1) {                                                        \\\n        PyErr_SetString(PyExc_TypeError,                                    \\\n                        \"function takes at least 1 argument (0 given)\");    \\\n        return NULL;                                                        \\\n    }                                                                       \\\n    if (nargs > 2) {                                                        \\\n        PyErr_Format(PyExc_TypeError,                                       \\\n                     \"function takes at most 2 arguments (%d given)\",       \\\n                     (int)nargs);                                           \\\n        return NULL;                                                        \\\n    }                                                                       \\\n    if (nargs == 2) {                                                       \\\n        if (!PyLong_Check(args[1])) {                                       \\\n            PyErr_Format(PyExc_TypeError,                                   \\\n                         \"'%s' object cannot be interpreted as an integer\", \\\n                         Py_TYPE(args[1])->tp_name);                        \\\n            return NULL;                                                    \\\n        }                                                                   \\\n        const unsigned long seed_tmp = PyLong_AsUnsignedLong(args[1]);      \\\n        if (seed_tmp == (unsigned long)-1 && PyErr_Occurred()) {            \\\n            if (PyErr_ExceptionMatches(PyExc_OverflowError)) {              \\\n                PyErr_SetString(PyExc_ValueError, \"seed is out of range\");  \\\n                return NULL;                                                \\\n            }                                                               \\\n        }                                                                   \\\n        if (seed_tmp > 0xFFFFFFFF) {                                        \\\n            PyErr_SetString(PyExc_ValueError, \"seed is out of range\");      \\\n            return NULL;                                                    \\\n        }                                                                   \\\n        seed = (uint32_t)seed_tmp;                                          \\\n    }\n\n//-----------------------------------------------------------------------------\n// Helpers for mutex manipulations for hashers\n\n#ifdef Py_GIL_DISABLED\n#define MMH3_HASHER_LOCK(obj) PyMutex_Lock(&(obj->mutex))\n#define MMH3_HASHER_UNLOCK(obj) PyMutex_Unlock(&(obj->mutex))\n#define MMH3_HASHER_INIT_MUTEX(obj) \\\n    PyMutex t = {0};                \\\n    obj->mutex = t;\n\n#else\n#define MMH3_HASHER_LOCK(obj) (void)0\n#define MMH3_HASHER_UNLOCK(obj) (void)0\n#define MMH3_HASHER_INIT_MUTEX(obj) (void)0\n#endif\n\n//-----------------------------------------------------------------------------\n// One shot functions\n\nPyDoc_STRVAR(\n    mmh3_hash_doc,\n    \"hash(key, seed=0, signed=True) -> int\\n\"\n    \"\\n\"\n    \"Return a hash as a 32-bit integer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x86_32 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (bytes | str): The input data to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"    signed (Any): If True, return a signed integer. Otherwise, return\\n\"\n    \"        an unsigned integer.\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    int: The hash value as a 32-bit integer.\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.0.0\\n\"\n    \"    The ``seed`` argument is now strictly checked for valid range.\\n\"\n    \"    The type of the ``signed`` argument has been changed from\\n\"\n    \"    ``bool`` to ``Any``. Performance improvements have been made.\\n\");\n\nstatic PyObject *\nmmh3_hash(PyObject *self, PyObject *const *args, Py_ssize_t nargs,\n          PyObject *kwnames)\n{\n    const char *target_str;\n    Py_ssize_t target_str_len;\n    unsigned long seed = 0;\n    int32_t result[1];\n    long long_result = 0;\n    int is_signed = 1;\n\n#ifndef _MSC_VER\n#if __LONG_WIDTH__ == 64 || defined(__APPLE__)\n    static uint64_t mask[] = {0x0ffffffff, 0xffffffffffffffff};\n#endif\n#endif\n\n    if ((nargs < 1) && kwnames == NULL) {\n        PyErr_SetString(PyExc_TypeError,\n                        \"function missing required argument 'key' (pos 1)\");\n        return NULL;\n    }\n\n    if (nargs > 3) {\n        PyErr_Format(PyExc_TypeError,\n                     \"function takes at most 3 arguments (%d given)\",\n                     (int)nargs);\n        return NULL;\n    }\n\n    if (nargs >= 1) {\n        MMH3_HASH_VALIDATE_AND_SET_BYTES(args[0], target_str, target_str_len);\n    }\n\n    if (nargs >= 2) {\n        MMH3_HASH_VALIDATE_AND_SET_SEED(args[1], seed);\n    }\n\n    if (nargs >= 3) {\n        is_signed = PyObject_IsTrue(args[2]);\n    }\n\n    if (kwnames) {\n        for (Py_ssize_t i = 0; i < PyTuple_Size(kwnames); i++) {\n            const char *kwname = PyUnicode_AsUTF8(PyTuple_GetItem(kwnames, i));\n            if (strcmp(kwname, \"key\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"key\", 1);\n                MMH3_HASH_VALIDATE_AND_SET_BYTES(args[nargs + i], target_str,\n                                                 target_str_len);\n            }\n            else if (strcmp(kwname, \"seed\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"seed\", 2);\n                MMH3_HASH_VALIDATE_AND_SET_SEED(args[nargs + i], seed);\n            }\n            else if (strcmp(kwname, \"signed\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"signed\", 3);\n                is_signed = PyObject_IsTrue(args[nargs + i]);\n            }\n            else {\n                PyErr_Format(\n                    PyExc_TypeError,\n                    \"'%s' is an invalid keyword argument for this function\",\n                    kwname);\n                return NULL;\n            }\n        }\n    }\n\n    murmurhash3_x86_32(target_str, target_str_len, (uint32_t)seed, result);\n\n#if defined(_MSC_VER)\n    /* for Windows envs */\n    long_result = result[0];\n    if (is_signed == 1) {\n        return PyLong_FromLong(long_result);\n    }\n    else {\n        return PyLong_FromUnsignedLong(long_result);\n    }\n#else  // defined(_MSC_VER)\n    /* for standard envs */\n#if __LONG_WIDTH__ == 64 || defined(__APPLE__)\n    long_result = result[0] & mask[is_signed];\n    return PyLong_FromLong(long_result);\n#else   // __LONG_WIDTH__ == 64 || defined(__APPLE__)\n    long_result = result[0];\n    if (is_signed == 1) {\n        return PyLong_FromLong(long_result);\n    }\n    else {\n        return PyLong_FromUnsignedLong(long_result);\n    }\n#endif  // __LONG_WIDTH__ == 64 || defined(__APPLE__)\n#endif  // defined(_MSC_VER)\n}\n\nPyDoc_STRVAR(\n    mmh3_hash_from_buffer_doc,\n    \"hash_from_buffer(key, seed=0, signed=True) -> int\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a 32-bit integer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x86_32 algorithm. Designed for large \"\n    \"memory-views such as numpy arrays.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer | str): The buffer to hash. String inputs are also\\n\"\n    \"        supported and are automatically converted to `bytes` using\\n\"\n    \"        UTF-8 encoding before hashing.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"    signed (Any): If True, return a signed integer. Otherwise, return\\n\"\n    \"        an unsigned integer.\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    int: The hash value as a 32-bit integer.\\n\"\n    \"\\n\"\n    \".. deprecated:: 5.0.0\\n\"\n    \"    Use ``mmh3_32_sintdigest()`` or ``mmh3_32_uintdigest()`` instead.\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.0.0\\n\"\n    \"    The ``seed`` argument is now strictly checked for valid range.\\n\"\n    \"    The type of the ``signed`` argument has been changed from\\n\"\n    \"    ``bool`` to ``Any``.\\n\");\n\nstatic PyObject *\nmmh3_hash_from_buffer(PyObject *self, PyObject *args, PyObject *keywds)\n{\n    Py_buffer target_buf;\n    long long seed = 0;\n    int32_t result[1];\n    long long_result = 0;\n    int is_signed = 1;\n\n    static char *kwlist[] = {\"key\", \"seed\", \"signed\", NULL};\n\n#ifndef _MSC_VER\n#if __LONG_WIDTH__ == 64 || defined(__APPLE__)\n    static uint64_t mask[] = {0x0ffffffff, 0xffffffffffffffff};\n#endif\n#endif\n\n    if (!PyArg_ParseTupleAndKeywords(args, keywds, \"s*|Lp\", kwlist,\n                                     &target_buf, &seed, &is_signed)) {\n        return NULL;\n    }\n\n    MMH3_VALIDATE_SEED_RETURN_NULL(seed);\n\n    murmurhash3_x86_32(target_buf.buf, target_buf.len, (uint32_t)seed, result);\n\n    PyBuffer_Release(&target_buf);\n\n#if defined(_MSC_VER)\n    /* for Windows envs */\n    long_result = result[0];\n    if (is_signed == 1) {\n        return PyLong_FromLong(long_result);\n    }\n    else {\n        return PyLong_FromUnsignedLong(long_result);\n    }\n#else  // defined(_MSC_VER)\n/* for standard envs */\n#if __LONG_WIDTH__ == 64 || defined(__APPLE__)\n    long_result = result[0] & mask[is_signed];\n    return PyLong_FromLong(long_result);\n#else   // __LONG_WIDTH__ == 64 || defined(__APPLE__)\n    long_result = result[0];\n    if (is_signed == 1) {\n        return PyLong_FromLong(long_result);\n    }\n    else {\n        return PyLong_FromUnsignedLong(long_result);\n    }\n#endif  // __LONG_WIDTH__ == 64 || defined(__APPLE__)\n#endif  // defined(_MSC_VER)\n}\n\nPyDoc_STRVAR(\n    mmh3_hash64_doc,\n    \"hash64(key, seed=0, x64arch=True, signed=True) -> tuple[int, int]\\n\"\n    \"\\n\"\n    \"Return a hash as a tuple of two 64-bit integers.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x{64, 86}_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (bytes | str): The input data to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"    x64arch (Any): If True, use an algorithm optimized for 64-bit\\n\"\n    \"        architecture. Otherwise, use one optimized for 32-bit\\n\"\n    \"        architecture.\\n\"\n    \"    signed (Any): If True, return a signed integer. Otherwise, return\\n\"\n    \"        an unsigned integer.\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    tuple[int, int]: The hash value as a tuple of two 64-bit \"\n    \"integers.\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.1.0\\n\"\n    \"    Performance improvements.\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.0.0\\n\"\n    \"    The ``seed`` argument is now strictly checked for valid range.\\n\"\n    \"    The type of the ``x64arch`` and ``signed`` arguments has been\\n\"\n    \"    changed from ``bool`` to ``Any``.\\n\");\n\nstatic PyObject *\nmmh3_hash64(PyObject *self, PyObject *const *args, Py_ssize_t nargs,\n            PyObject *kwnames)\n{\n    const char *target_str;\n    Py_ssize_t target_str_len;\n    long long seed = 0;\n    uint64_t result[2];\n    int x64arch = 1;\n    int is_signed = 1;\n\n    static char *valflag[] = {\"KK\", \"LL\"};\n\n    if ((nargs < 1) && kwnames == NULL) {\n        PyErr_SetString(PyExc_TypeError,\n                        \"function missing required argument 'key' (pos 1)\");\n        return NULL;\n    }\n\n    if (nargs > 4) {\n        PyErr_Format(PyExc_TypeError,\n                     \"function takes at most 4 arguments (%d given)\",\n                     (int)nargs);\n        return NULL;\n    }\n\n    if (nargs >= 1) {\n        MMH3_HASH_VALIDATE_AND_SET_BYTES(args[0], target_str, target_str_len);\n    }\n\n    if (nargs >= 2) {\n        MMH3_HASH_VALIDATE_AND_SET_SEED(args[1], seed);\n    }\n\n    if (nargs >= 3) {\n        x64arch = PyObject_IsTrue(args[2]);\n    }\n\n    if (nargs >= 4) {\n        is_signed = PyObject_IsTrue(args[2]);\n    }\n\n    if (kwnames) {\n        for (Py_ssize_t i = 0; i < PyTuple_Size(kwnames); i++) {\n            const char *kwname = PyUnicode_AsUTF8(PyTuple_GetItem(kwnames, i));\n            if (strcmp(kwname, \"key\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"key\", 1);\n                MMH3_HASH_VALIDATE_AND_SET_BYTES(args[nargs + i], target_str,\n                                                 target_str_len);\n            }\n            else if (strcmp(kwname, \"seed\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"seed\", 2);\n                MMH3_HASH_VALIDATE_AND_SET_SEED(args[nargs + i], seed);\n            }\n            else if (strcmp(kwname, \"x64arch\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"x64arch\", 3);\n                x64arch = PyObject_IsTrue(args[nargs + i]);\n            }\n            else if (strcmp(kwname, \"signed\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"signed\", 4);\n                is_signed = PyObject_IsTrue(args[nargs + i]);\n            }\n            else {\n                PyErr_Format(\n                    PyExc_TypeError,\n                    \"'%s' is an invalid keyword argument for this function\",\n                    kwname);\n                return NULL;\n            }\n        }\n    }\n\n    if (x64arch == 1) {\n        murmurhash3_x64_128(target_str, target_str_len, (uint32_t)seed,\n                            result);\n    }\n    else {\n        murmurhash3_x86_128(target_str, target_str_len, (uint32_t)seed,\n                            result);\n    }\n\n    PyObject *retval = Py_BuildValue(valflag[is_signed], result[0], result[1]);\n    return retval;\n}\n\nPyDoc_STRVAR(\n    mmh3_hash128_doc,\n    \"hash128(key, seed=0, x64arch=True, signed=False) -> int\\n\"\n    \"\\n\"\n    \"Return a hash as a 128-bit integer.\\n\\n\"\n    \"Calculated by the MurmurHash3_x{64, 86}_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (bytes | str): The input data to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"    x64arch (Any): If True, use an algorithm optimized for 64-bit\\n\"\n    \"        architecture. Otherwise, use one optimized for 32-bit\\n\"\n    \"        architecture.\\n\"\n    \"    signed (Any): If True, return a signed integer. Otherwise, return\\n\"\n    \"        an unsigned integer.\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    int: The hash value as a 128-bit integer.\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.1.0\\n\"\n    \"    Performance improvements.\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.0.0\\n\"\n    \"    The ``seed`` argument is now strictly checked for valid range.\\n\"\n    \"    The type of the ``x64arch`` and ``signed`` arguments has been\\n\"\n    \"    changed from ``bool`` to ``Any``.\\n\");\n\nstatic PyObject *\nmmh3_hash128(PyObject *self, PyObject *const *args, Py_ssize_t nargs,\n             PyObject *kwnames)\n{\n    const char *target_str;\n    Py_ssize_t target_str_len;\n    long long seed = 0;\n    uint64_t result[2];\n    int x64arch = 1;\n    int is_signed = 0;\n\n    if ((nargs < 1) && kwnames == NULL) {\n        PyErr_SetString(PyExc_TypeError,\n                        \"function missing required argument 'key' (pos 1)\");\n        return NULL;\n    }\n\n    if (nargs > 4) {\n        PyErr_Format(PyExc_TypeError,\n                     \"function takes at most 4 arguments (%d given)\",\n                     (int)nargs);\n        return NULL;\n    }\n\n    if (nargs >= 1) {\n        MMH3_HASH_VALIDATE_AND_SET_BYTES(args[0], target_str, target_str_len);\n    }\n\n    if (nargs >= 2) {\n        MMH3_HASH_VALIDATE_AND_SET_SEED(args[1], seed);\n    }\n\n    if (nargs >= 3) {\n        x64arch = PyObject_IsTrue(args[2]);\n    }\n\n    if (nargs >= 4) {\n        is_signed = PyObject_IsTrue(args[2]);\n    }\n\n    if (kwnames) {\n        for (Py_ssize_t i = 0; i < PyTuple_Size(kwnames); i++) {\n            const char *kwname = PyUnicode_AsUTF8(PyTuple_GetItem(kwnames, i));\n            if (strcmp(kwname, \"key\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"key\", 1);\n                MMH3_HASH_VALIDATE_AND_SET_BYTES(args[nargs + i], target_str,\n                                                 target_str_len);\n            }\n            else if (strcmp(kwname, \"seed\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"seed\", 2);\n                MMH3_HASH_VALIDATE_AND_SET_SEED(args[nargs + i], seed);\n            }\n            else if (strcmp(kwname, \"x64arch\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"x64arch\", 3);\n                x64arch = PyObject_IsTrue(args[nargs + i]);\n            }\n            else if (strcmp(kwname, \"signed\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"signed\", 4);\n                is_signed = PyObject_IsTrue(args[nargs + i]);\n            }\n            else {\n                PyErr_Format(\n                    PyExc_TypeError,\n                    \"'%s' is an invalid keyword argument for this function\",\n                    kwname);\n                return NULL;\n            }\n        }\n    }\n\n    if (x64arch == 1) {\n        murmurhash3_x64_128(target_str, target_str_len, seed, result);\n    }\n    else {\n        murmurhash3_x86_128(target_str, target_str_len, seed, result);\n    }\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result[0] = bswap_64(result[0]);\n    result[1] = bswap_64(result[1]);\n#endif\n\n    /**\n     * _PyLong_FromByteArray is not a part of the official Python/C API\n     * and may be removed in the future (although it is practically stable).\n     * cf.\n     * https://mail.python.org/pipermail/python-list/2006-August/372365.html\n     */\n    PyObject *retval = _PyLong_FromByteArray(\n        (unsigned char *)result, MMH3_128_DIGESTSIZE, 1, is_signed);\n\n    return retval;\n}\n\nPyDoc_STRVAR(\n    mmh3_hash_bytes_doc,\n    \"hash_bytes(key, seed=0, x64arch=True) -> bytes\\n\"\n    \"\\n\"\n    \"Return a 16-byte hash of the ``bytes`` type.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (bytes | str): The input data to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"    x64arch (Any): If True, use an algorithm optimized for 64-bit\\n\"\n    \"        architecture. Otherwise, use one optimized for 32-bit\\n\"\n    \"        architecture.\\n\"\n    \"Returns:\\n\"\n    \"    bytes: The hash value as the ``bytes`` type with a length of 16\\n\"\n    \"    bytes (128 bits).\\n\")\n    \"\\n\"\n    \".. versionchanged:: 5.1.0\\n\"\n    \"    Performance improvements.\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.0.0\\n\"\n    \"    The ``seed`` argument is now strictly checked for valid range.\\n\"\n    \"    The type of the ``x64arch`` argument has been changed from\\n\"\n    \"    ``bool`` to ``Any``.\\n\";\n\nstatic PyObject *\nmmh3_hash_bytes(PyObject *self, PyObject *const *args, Py_ssize_t nargs,\n                PyObject *kwnames)\n{\n    const char *target_str;\n    Py_ssize_t target_str_len;\n    long long seed = 0;\n    uint64_t result[2];\n    int x64arch = 1;\n\n    if ((nargs < 1) && kwnames == NULL) {\n        PyErr_SetString(PyExc_TypeError,\n                        \"function missing required argument 'key' (pos 1)\");\n        return NULL;\n    }\n\n    if (nargs > 3) {\n        PyErr_Format(PyExc_TypeError,\n                     \"function takes at most 3 arguments (%d given)\",\n                     (int)nargs);\n        return NULL;\n    }\n\n    if (nargs >= 1) {\n        MMH3_HASH_VALIDATE_AND_SET_BYTES(args[0], target_str, target_str_len);\n    }\n\n    if (nargs >= 2) {\n        MMH3_HASH_VALIDATE_AND_SET_SEED(args[1], seed);\n    }\n\n    if (nargs >= 3) {\n        x64arch = PyObject_IsTrue(args[2]);\n    }\n\n    if (kwnames) {\n        for (Py_ssize_t i = 0; i < PyTuple_Size(kwnames); i++) {\n            const char *kwname = PyUnicode_AsUTF8(PyTuple_GetItem(kwnames, i));\n            if (strcmp(kwname, \"key\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"key\", 1);\n                MMH3_HASH_VALIDATE_AND_SET_BYTES(args[nargs + i], target_str,\n                                                 target_str_len);\n            }\n            else if (strcmp(kwname, \"seed\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"seed\", 2);\n                MMH3_HASH_VALIDATE_AND_SET_SEED(args[nargs + i], seed);\n            }\n            else if (strcmp(kwname, \"x64arch\") == 0) {\n                MMH3_HASH_VALIDATE_ARG_DUPLICATION(nargs, \"x64arch\", 3);\n                x64arch = PyObject_IsTrue(args[nargs + i]);\n            }\n            else {\n                PyErr_Format(\n                    PyExc_TypeError,\n                    \"'%s' is an invalid keyword argument for this function\",\n                    kwname);\n                return NULL;\n            }\n        }\n    }\n\n    if (x64arch == 1) {\n        murmurhash3_x64_128(target_str, target_str_len, seed, result);\n    }\n    else {\n        murmurhash3_x86_128(target_str, target_str_len, seed, result);\n    }\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result[0] = bswap_64(result[0]);\n    result[1] = bswap_64(result[1]);\n#endif\n\n    return PyBytes_FromStringAndSize((char *)result, MMH3_128_DIGESTSIZE);\n}\n\n//-----------------------------------------------------------------------------\n// Functions that accept a buffer\n\nPyDoc_STRVAR(\n    mmh3_mmh3_32_digest_doc,\n    \"mmh3_32_digest(key, seed=0, /) -> bytes\\n\"\n    \"\\n\"\n    \"Return a 4-byte hash of the ``bytes`` type for the buffer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x86_32 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    bytes: The hash value as the ``bytes`` type with a length of\\n\"\n    \"    4 bytes (32 bits).\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_32_digest(PyObject *self, PyObject *const *args, Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    char result[MMH3_32_DIGESTSIZE];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x86_32(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    ((uint32_t *)result)[0] = bswap_32(((uint32_t *)result)[0]);\n#endif\n\n    return PyBytes_FromStringAndSize((char *)result, MMH3_32_DIGESTSIZE);\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_32_sintdigest_doc,\n    \"mmh3_32_sintdigest(key, seed=0, /) -> int\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a 32-bit signed integer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x86_32 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    int: The hash value as a 32-bit signed integer.\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_32_sintdigest(PyObject *self, PyObject *const *args,\n                        Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    int32_t result[1];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x86_32(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n    return PyLong_FromLong(result[0]);\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_32_uintdigest_doc,\n    \"mmh3_32_uintdigest(key, seed=0, /) -> int\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a 32-bit unsigned integer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x86_32 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    int: The hash value as a 32-bit unsigned integer.\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_32_uintdigest(PyObject *self, PyObject *const *args,\n                        Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint32_t result[1];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x86_32(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n    return PyLong_FromUnsignedLong(result[0]);\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_x64_128_digest_doc,\n    \"mmh3_x64_128_digest(key, seed=0, /) -> bytes\\n\"\n    \"\\n\"\n    \"Return a 16-byte hash of the ``bytes`` type for the buffer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x64_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    bytes: The hash value as the ``bytes`` type with a length of\\n\"\n    \"    16 bytes (128 bits).\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_x64_128_digest(PyObject *self, PyObject *const *args,\n                         Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint64_t result[2];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x64_128(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result[0] = bswap_64(result[0]);\n    result[1] = bswap_64(result[1]);\n#endif\n\n    return PyBytes_FromStringAndSize((char *)result, MMH3_128_DIGESTSIZE);\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_x64_128_sintdigest_doc,\n    \"mmh3_x64_128_sintdigest(key, seed=0, /) -> int\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a 128-bit signed integer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x64_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    int: The hash value as a 128-bit signed integer.\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_x64_128_sintdigest(PyObject *self, PyObject *const *args,\n                             Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint64_t result[2];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x64_128(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result[0] = bswap_64(result[0]);\n    result[1] = bswap_64(result[1]);\n#endif\n\n    /**\n     * _PyLong_FromByteArray is not a part of the official Python/C API\n     * and may be removed in the future (although it is practically stable).\n     * cf.\n     * https://mail.python.org/pipermail/python-list/2006-August/372365.html\n     */\n    PyObject *retval = _PyLong_FromByteArray((unsigned char *)result,\n                                             MMH3_128_DIGESTSIZE, 1, 1);\n\n    return retval;\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_x64_128_uintdigest_doc,\n    \"mmh3_x64_128_uintdigest(key, seed=0, /) -> int\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a 128-bit unsigned integer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x64_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    int: The hash value as a 128-bit unsigned integer.\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_x64_128_uintdigest(PyObject *self, PyObject *const *args,\n                             Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint64_t result[2];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x64_128(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result[0] = bswap_64(result[0]);\n    result[1] = bswap_64(result[1]);\n#endif\n\n    /**\n     * _PyLong_FromByteArray is not a part of the official Python/C API\n     * and may be removed in the future (although it is practically stable).\n     * cf.\n     * https://mail.python.org/pipermail/python-list/2006-August/372365.html\n     */\n    PyObject *retval = _PyLong_FromByteArray((unsigned char *)result,\n                                             MMH3_128_DIGESTSIZE, 1, 0);\n\n    return retval;\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_x64_128_stupledigest_doc,\n    \"mmh3_x64_128_stupledigest(key, seed=0, /) -> tuple[int, int]\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a tuple of two 64-bit signed integers.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x64_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    tuple[int, int]: The hash value as a tuple of two 64-bit signed\\n\"\n    \"    integers.\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_x64_128_stupledigest(PyObject *self, PyObject *const *args,\n                               Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint64_t result[2];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x64_128(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n    PyObject *retval = Py_BuildValue(\"LL\", result[0], result[1]);\n    return retval;\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_x64_128_utupledigest_doc,\n    \"mmh3_x64_128_utupledigest(key, seed=0, /) -> tuple[int, int]\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a tuple of two 64-bit unsigned \"\n    \"integers.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x64_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    tuple[int, int]: The hash value as a tuple of two 64-bit unsigned\\n\"\n    \"    integers.\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_x64_128_utupledigest(PyObject *self, PyObject *const *args,\n                               Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint64_t result[2];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x64_128(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n    PyObject *retval = Py_BuildValue(\"KK\", result[0], result[1]);\n    return retval;\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_x86_128_digest_doc,\n    \"mmh3_x86_128_digest(key, seed=0, /) -> bytes\\n\"\n    \"\\n\"\n    \"Return a 16-byte hash of the ``bytes`` type for the buffer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x86_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    bytes: The hash value as the ``bytes`` type with a length of\\n\"\n    \"    16 bytes (128 bits).\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_x86_128_digest(PyObject *self, PyObject *const *args,\n                         Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint64_t result[2];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x86_128(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result[0] = bswap_64(result[0]);\n    result[1] = bswap_64(result[1]);\n#endif\n\n    return PyBytes_FromStringAndSize((char *)result, MMH3_128_DIGESTSIZE);\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_x86_128_sintdigest_doc,\n    \"mmh3_x86_128_sintdigest(key, seed=0, /) -> int\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a 128-bit signed integer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x86_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    int: The hash value as an signed 128-bit integer.\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_x86_128_sintdigest(PyObject *self, PyObject *const *args,\n                             Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint64_t result[2];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x86_128(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result[0] = bswap_64(result[0]);\n    result[1] = bswap_64(result[1]);\n#endif\n\n    /**\n     * _PyLong_FromByteArray is not a part of the official Python/C API\n     * and may be removed in the future (although it is practically stable).\n     * cf.\n     * https://mail.python.org/pipermail/python-list/2006-August/372365.html\n     */\n    PyObject *retval = _PyLong_FromByteArray((unsigned char *)result,\n                                             MMH3_128_DIGESTSIZE, 1, 1);\n\n    return retval;\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_x86_128_uintdigest_doc,\n    \"mmh3_x86_128_uintdigest(key, seed=0, /) -> int\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a 128-bit unsigned integer.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x86_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    int: The hash value as a 128-bit unsigned integer.\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_x86_128_uintdigest(PyObject *self, PyObject *const *args,\n                             Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint64_t result[2];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x86_128(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result[0] = bswap_64(result[0]);\n    result[1] = bswap_64(result[1]);\n#endif\n\n    /**\n     * _PyLong_FromByteArray is not a part of the official Python/C API\n     * and may be removed in the future (although it is practically stable).\n     * cf.\n     * https://mail.python.org/pipermail/python-list/2006-August/372365.html\n     */\n    PyObject *retval = _PyLong_FromByteArray((unsigned char *)result,\n                                             MMH3_128_DIGESTSIZE, 1, 0);\n\n    return retval;\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_x86_128_stupledigest_doc,\n    \"mmh3_x86_128_stupledigest(key, seed=0, /) -> tuple[int, int]\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a tuple of two 64-bit signed integers.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x86_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    tuple[int, int]: The hash value as a tuple of two 64-bit signed\\n\"\n    \"    integers.\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_x86_128_stupledigest(PyObject *self, PyObject *const *args,\n                               Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint64_t result[2];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x86_128(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n    PyObject *retval = Py_BuildValue(\"LL\", result[0], result[1]);\n    return retval;\n}\n\nPyDoc_STRVAR(\n    mmh3_mmh3_x86_128_utupledigest_doc,\n    \"mmh3_x86_128_utupledigest(key, seed=0, /) -> tuple[int, int]\\n\"\n    \"\\n\"\n    \"Return a hash for the buffer as a tuple of two 64-bit unsigned \"\n    \"integers.\\n\"\n    \"\\n\"\n    \"Calculated by the MurmurHash3_x86_128 algorithm.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    key (Buffer): The input buffer to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    tuple[int, int]: The hash value as a tuple of two 64-bit unsigned\\n\"\n    \"    integers.\\n\"\n    \"\\n\"\n    \".. versionadded:: 5.0.0\\n\");\n\nstatic PyObject *\nmmh3_mmh3_x86_128_utupledigest(PyObject *self, PyObject *const *args,\n                               Py_ssize_t nargs)\n{\n    Py_buffer target_buf;\n    uint32_t seed = 0;\n    uint64_t result[2];\n\n    MMH3_VALIDATE_ARGS_AND_SET_SEED(nargs, args, seed);\n\n    GET_BUFFER_VIEW_OR_ERROUT(args[0], &target_buf);\n\n    murmurhash3_x86_128(target_buf.buf, target_buf.len, seed, result);\n    PyBuffer_Release(&target_buf);\n\n    PyObject *retval = Py_BuildValue(\"KK\", result[0], result[1]);\n    return retval;\n}\n\n// Casting to PyCFunction is mandatory for\n//   METH_VARARGS | METH_KEYWORDS functions.\n// See\n// https://docs.python.org/3/extending/extending.html#keyword-parameters-for-extension-functions\nstatic PyMethodDef Mmh3Methods[] = {\n    {\"hash\", (PyCFunction)mmh3_hash, METH_FASTCALL | METH_KEYWORDS,\n     mmh3_hash_doc},\n    {\"hash_from_buffer\", (PyCFunction)mmh3_hash_from_buffer,\n     METH_VARARGS | METH_KEYWORDS, mmh3_hash_from_buffer_doc},\n    {\"hash64\", (PyCFunction)mmh3_hash64, METH_FASTCALL | METH_KEYWORDS,\n     mmh3_hash64_doc},\n    {\"hash128\", (PyCFunction)mmh3_hash128, METH_FASTCALL | METH_KEYWORDS,\n     mmh3_hash128_doc},\n    {\"hash_bytes\", (PyCFunction)mmh3_hash_bytes, METH_FASTCALL | METH_KEYWORDS,\n     mmh3_hash_bytes_doc},\n    {\"mmh3_32_digest\", (PyCFunction)mmh3_mmh3_32_digest, METH_FASTCALL,\n     mmh3_mmh3_32_digest_doc},\n    {\"mmh3_32_sintdigest\", (PyCFunction)mmh3_mmh3_32_sintdigest, METH_FASTCALL,\n     mmh3_mmh3_32_sintdigest_doc},\n    {\"mmh3_32_uintdigest\", (PyCFunction)mmh3_mmh3_32_uintdigest, METH_FASTCALL,\n     mmh3_mmh3_32_uintdigest_doc},\n    {\"mmh3_x64_128_digest\", (PyCFunction)mmh3_mmh3_x64_128_digest,\n     METH_FASTCALL, mmh3_mmh3_x64_128_digest_doc},\n    {\"mmh3_x64_128_sintdigest\", (PyCFunction)mmh3_mmh3_x64_128_sintdigest,\n     METH_FASTCALL, mmh3_mmh3_x64_128_sintdigest_doc},\n    {\"mmh3_x64_128_uintdigest\", (PyCFunction)mmh3_mmh3_x64_128_uintdigest,\n     METH_FASTCALL, mmh3_mmh3_x64_128_uintdigest_doc},\n    {\"mmh3_x64_128_stupledigest\", (PyCFunction)mmh3_mmh3_x64_128_stupledigest,\n     METH_FASTCALL, mmh3_mmh3_x64_128_stupledigest_doc},\n    {\"mmh3_x64_128_utupledigest\", (PyCFunction)mmh3_mmh3_x64_128_utupledigest,\n     METH_FASTCALL, mmh3_mmh3_x64_128_utupledigest_doc},\n    {\"mmh3_x86_128_digest\", (PyCFunction)mmh3_mmh3_x86_128_digest,\n     METH_FASTCALL, mmh3_mmh3_x86_128_digest_doc},\n    {\"mmh3_x86_128_sintdigest\", (PyCFunction)mmh3_mmh3_x86_128_sintdigest,\n     METH_FASTCALL, mmh3_mmh3_x86_128_sintdigest_doc},\n    {\"mmh3_x86_128_uintdigest\", (PyCFunction)mmh3_mmh3_x86_128_uintdigest,\n     METH_FASTCALL, mmh3_mmh3_x86_128_uintdigest_doc},\n    {\"mmh3_x86_128_stupledigest\", (PyCFunction)mmh3_mmh3_x86_128_stupledigest,\n     METH_FASTCALL, mmh3_mmh3_x86_128_stupledigest_doc},\n    {\"mmh3_x86_128_utupledigest\", (PyCFunction)mmh3_mmh3_x86_128_utupledigest,\n     METH_FASTCALL, mmh3_mmh3_x86_128_utupledigest_doc},\n    {NULL, NULL, 0, NULL}};\n\n//-----------------------------------------------------------------------------\n// Hasher classes\n//\n// The design of hasher classes are loosely based on the Google Guava\n// implementation (Java)\n\n//-----------------------------------------------------------------------------\n// Hasher for murmurhash3_x86_32\ntypedef struct {\n    PyObject_HEAD uint32_t h;\n    uint64_t buffer;\n    uint8_t shift;\n    Py_ssize_t length;\n#ifdef Py_GIL_DISABLED\n    PyMutex mutex;\n#endif\n} MMH3Hasher32;\n\nstatic PyTypeObject MMH3Hasher32Type;\n\nstatic FORCE_INLINE void\nupdate32_impl(MMH3Hasher32 *self, Py_buffer *buf)\n{\n    Py_ssize_t i = 0;\n    uint32_t h1 = 0;\n    uint32_t k1 = 0;\n    const uint32_t c1 = 0xe6546b64;\n    const uint64_t mask = 0xffffffffUL;\n\n    MMH3_HASHER_LOCK(self);\n    h1 = self->h;\n\n    for (; i + 4 <= buf->len; i += 4) {\n        k1 = getblock32(buf->buf, i / 4);\n        self->buffer |= (k1 & mask) << self->shift;\n        self->length += 4;\n\n        h1 ^= mixK1(self->buffer);\n        h1 = mixH1(h1, 0, 13, c1);\n        self->buffer >>= 32;\n    }\n\n    for (; i < buf->len; i++) {\n        k1 = ((uint8_t *)buf->buf)[i];\n        self->buffer |= (k1 & mask) << self->shift;\n        self->shift += 8;\n        self->length += 1;\n\n        if (self->shift >= 32) {\n            h1 ^= mixK1(self->buffer);\n            h1 = mixH1(h1, 0, 13, c1);\n            self->buffer >>= 32;\n            self->shift -= 32;\n        }\n    }\n\n    self->h = h1;\n\n    MMH3_HASHER_UNLOCK(self);\n\n    PyBuffer_Release(buf);\n\n    return;\n}\n\nstatic void\nMMH3Hasher32_dealloc(MMH3Hasher32 *self)\n{\n    Py_TYPE(self)->tp_free((PyObject *)self);\n}\n\nstatic PyObject *\nMMH3Hasher32_new(PyTypeObject *type, PyObject *args, PyObject *kwds)\n{\n    MMH3Hasher32 *self;\n    self = (MMH3Hasher32 *)type->tp_alloc(type, 0);\n    if (self != NULL) {\n        self->h = 0;\n        self->buffer = 0;\n        self->shift = 0;\n        self->length = 0;\n        MMH3_HASHER_INIT_MUTEX(self);\n    }\n    return (PyObject *)self;\n}\n\n/* It is impossible to add docstring for __init__ in Python C extension.\n  Therefore, the constructor docstring should be described in the class\n  docstring. See also https://stackoverflow.com/q/11913492 */\nstatic int\nMMH3Hasher32_init(MMH3Hasher32 *self, PyObject *args, PyObject *kwds)\n{\n    Py_buffer target_buf = {0};\n    long long seed = 0;\n    static char *kwlist[] = {\"data\", \"seed\", NULL};\n\n    if (!PyArg_ParseTupleAndKeywords(args, kwds, \"|y*L\", kwlist, &target_buf,\n                                     &seed))\n        return -1;\n\n    MMH3_VALIDATE_SEED_RETURN_INT(seed, target_buf);\n\n    self->h = (uint32_t)seed;\n\n    if (target_buf.buf != NULL) {\n        // target_buf will be released in update32_impl\n        update32_impl(self, &target_buf);\n    }\n\n    return 0;\n}\n\nPyDoc_STRVAR(\n    MMH3Hasher_update_doc,\n    \"update(data)\\n\"\n    \"\\n\"\n    \"Update this hash object's state with the provided bytes-like object.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    data (Buffer): The buffer to hash.\\n\");\n\nstatic PyObject *\nMMH3Hasher32_update(MMH3Hasher32 *self, PyObject *obj)\n{\n    Py_buffer buf;\n\n    GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);\n\n    // buf will be released in update32_impl\n    update32_impl(self, &buf);\n\n    Py_RETURN_NONE;\n}\n\nstatic FORCE_INLINE uint32_t\ndigest32_impl(uint32_t h, uint64_t k1, Py_ssize_t length)\n{\n    h ^= mixK1(k1);\n    h ^= length;\n    h = fmix32(h);\n    return h;\n}\n\nPyDoc_STRVAR(MMH3Hasher_digest_doc,\n             \"digest() -> bytes\\n\"\n             \"\\n\"\n             \"Return the digest value as a ``bytes`` object.\\n\"\n             \"\\n\"\n             \"Returns:\\n\"\n             \"    bytes: The digest value.\\n\");\n\nstatic PyObject *\nMMH3Hasher32_digest(MMH3Hasher32 *self, PyObject *Py_UNUSED(ignored))\n{\n    MMH3_HASHER_LOCK(self);\n    uint32_t h = digest32_impl(self->h, self->buffer, self->length);\n    MMH3_HASHER_UNLOCK(self);\n\n    char out[MMH3_32_DIGESTSIZE];\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    ((uint32_t *)out)[0] = bswap_32(h);\n#else\n    ((uint32_t *)out)[0] = h;\n#endif\n\n    return PyBytes_FromStringAndSize(out, MMH3_32_DIGESTSIZE);\n}\n\nPyDoc_STRVAR(MMH3Hasher_sintdigest_doc,\n             \"sintdigest() -> int\\n\"\n             \"\\n\"\n             \"Return the digest value as a signed integer.\\n\"\n             \"\\n\"\n             \"Returns:\\n\"\n             \"    int: The digest value as a signed integer.\\n\");\n\nstatic PyObject *\nMMH3Hasher32_sintdigest(MMH3Hasher32 *self, PyObject *Py_UNUSED(ignored))\n{\n    MMH3_HASHER_LOCK(self);\n    uint32_t h = digest32_impl(self->h, self->buffer, self->length);\n    MMH3_HASHER_UNLOCK(self);\n\n    // Note that simple casting (\"(int32_t) h\") is an undefined behavior\n    int32_t result = *(int32_t *)&h;\n\n    return PyLong_FromLong(result);\n}\n\nPyDoc_STRVAR(MMH3Hasher_uintdigest_doc,\n             \"uintdigest() -> int\\n\"\n             \"\\n\"\n             \"Return the digest value as an unsigned integer.\\n\"\n             \"\\n\"\n             \"Returns:\\n\"\n             \"    int: The digest value as an unsigned integer.\\n\");\n\nstatic PyObject *\nMMH3Hasher32_uintdigest(MMH3Hasher32 *self, PyObject *Py_UNUSED(ignored))\n{\n    MMH3_HASHER_LOCK(self);\n    uint32_t h = digest32_impl(self->h, self->buffer, self->length);\n    MMH3_HASHER_UNLOCK(self);\n\n    return PyLong_FromUnsignedLong(h);\n}\n\nPyDoc_STRVAR(MMH3Hasher32_copy_doc,\n             \"copy() -> mmh3_32\\n\"\n             \"\\n\"\n             \"Return a copy of the hash object..\\n\"\n             \"\\n\"\n             \"Returns:\\n\"\n             \"    mmh3_32: A copy of this hash object.\\n\");\n\nstatic PyObject *\nMMH3Hasher32_copy(MMH3Hasher32 *self, PyObject *Py_UNUSED(ignored))\n{\n    MMH3Hasher32 *p;\n\n    if ((p = PyObject_New(MMH3Hasher32, &MMH3Hasher32Type)) == NULL) {\n        return NULL;\n    }\n\n    MMH3_HASHER_LOCK(self);\n    p->h = self->h;\n    p->buffer = self->buffer;\n    p->shift = self->shift;\n    p->length = self->length;\n    MMH3_HASHER_INIT_MUTEX(p);\n    MMH3_HASHER_UNLOCK(self);\n\n    return (PyObject *)p;\n}\n\nstatic PyMethodDef MMH3Hasher32_methods[] = {\n    {\"update\", (PyCFunction)MMH3Hasher32_update, METH_O,\n     MMH3Hasher_update_doc},\n    {\n        \"digest\",\n        (PyCFunction)MMH3Hasher32_digest,\n        METH_NOARGS,\n        MMH3Hasher_digest_doc,\n    },\n    {\"sintdigest\", (PyCFunction)MMH3Hasher32_sintdigest, METH_NOARGS,\n     MMH3Hasher_sintdigest_doc},\n    {\"uintdigest\", (PyCFunction)MMH3Hasher32_uintdigest, METH_NOARGS,\n     MMH3Hasher_uintdigest_doc},\n    {\"copy\", (PyCFunction)MMH3Hasher32_copy, METH_NOARGS,\n     MMH3Hasher32_copy_doc},\n    {NULL} /* Sentinel */\n};\n\nstatic PyObject *\nMMH3Hasher32_get_digest_size(PyObject *self, void *closure)\n{\n    return PyLong_FromLong(MMH3_32_DIGESTSIZE);\n}\n\nstatic PyObject *\nMMH3Hasher32_get_block_size(PyObject *self, void *closure)\n{\n    return PyLong_FromLong(MMH3_32_BLOCKSIZE);\n}\n\nstatic PyObject *\nMMH3Hasher32_get_name(PyObject *self, void *closure)\n{\n    return PyUnicode_FromStringAndSize(\"mmh3_32\", 7);\n}\n\nstatic PyGetSetDef MMH3Hasher32_getsetters[] = {\n    {\"digest_size\", (getter)MMH3Hasher32_get_digest_size, NULL,\n     \"int: Number of bytes in this hashes output\", NULL},\n    {\"block_size\", (getter)MMH3Hasher32_get_block_size, NULL,\n     \"int: Number of bytes of the internal block of this algorithm\", NULL},\n    {\"name\", (getter)MMH3Hasher32_get_name, NULL,\n     \"str: The hash algorithm being used by this object\", NULL},\n    {NULL} /* Sentinel */\n};\n\nPyDoc_STRVAR(\n    MMH3Hasher32Type_doc,\n    \"__init__(data=None, seed=0)\\n\"\n    \"\\n\"\n    \"Hasher for incrementally calculating the murmurhash3_x86_32 hash.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    data (Buffer | None): The initial data to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.2.0\\n\"\n    \"    Experimental no-GIL support; thread safety not fully verified.\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.0.0\\n\"\n    \"    Added the optional ``data`` parameter as the first argument.\\n\"\n    \"    The ``seed`` argument is now strictly checked for valid range.\\n\");\n\nstatic PyTypeObject MMH3Hasher32Type = {\n    PyVarObject_HEAD_INIT(NULL, 0).tp_name = \"mmh3.mmh3_32\",\n    .tp_doc = MMH3Hasher32Type_doc,\n    .tp_basicsize = sizeof(MMH3Hasher32),\n    .tp_itemsize = 0,\n    .tp_flags = Py_TPFLAGS_DEFAULT,\n    .tp_new = MMH3Hasher32_new,\n    .tp_init = (initproc)MMH3Hasher32_init,\n    .tp_dealloc = (destructor)MMH3Hasher32_dealloc,\n    .tp_methods = MMH3Hasher32_methods,\n    .tp_getset = MMH3Hasher32_getsetters,\n};\n\n//-----------------------------------------------------------------------------\n// Hasher for murmurhash3_x64_128\ntypedef struct {\n    PyObject_HEAD uint64_t h1;\n    uint64_t h2;\n    uint64_t buffer1;\n    uint64_t buffer2;\n    uint8_t shift;\n    Py_ssize_t length;\n#ifdef Py_GIL_DISABLED\n    PyMutex mutex;\n#endif\n} MMH3Hasher128x64;\n\nstatic PyTypeObject MMH3Hasher128x64Type;\n\nstatic FORCE_INLINE void\nupdate_x64_128_impl(MMH3Hasher128x64 *self, Py_buffer *buf)\n{\n    Py_ssize_t i = 0;\n    uint64_t h1 = 0;\n    uint64_t h2 = 0;\n    uint64_t k1 = 0;\n    uint64_t k2 = 0;\n\n    MMH3_HASHER_LOCK(self);\n    h1 = self->h1;\n    h2 = self->h2;\n\n    for (; i + 16 <= buf->len; i += 16) {\n        k1 = getblock64(buf->buf, (i / 16) * 2);\n        k2 = getblock64(buf->buf, (i / 16) * 2 + 1);\n\n        if (self->shift == 0) {  // TODO: use bit ops\n            self->buffer1 = k1;\n            self->buffer2 = k2;\n        }\n        else if (self->shift < 64) {\n            self->buffer1 |= k1 << self->shift;\n            self->buffer2 = (k1 >> (64 - self->shift)) | (k2 << self->shift);\n        }\n        else if (self->shift == 64) {\n            self->buffer2 = k1;\n        }\n        else {\n            self->buffer2 |= k1 << (self->shift - 64);\n        }\n\n        h1 ^= mixK1_x64_128(self->buffer1);\n        h1 = mixH_x64_128(h1, h2, 27, 0x52dce729UL);\n        h2 ^= mixK2_x64_128(self->buffer2);\n        h2 = mixH_x64_128(h2, h1, 31, 0x38495ab5UL);\n\n        self->length += 16;\n        if (self->shift == 0) {  // TODO: use bit ops\n            self->buffer1 = 0;\n            self->buffer2 = 0;\n        }\n        else if (self->shift < 64) {\n            self->buffer1 = k2 >> (64 - self->shift);\n            self->buffer2 = 0;\n        }\n        else if (self->shift == 64) {\n            self->buffer1 = k2;\n            self->buffer2 = 0;\n        }\n        else {\n            self->buffer1 =\n                k1 >> (128 - self->shift) | (k2 << (self->shift - 64));\n            self->buffer2 = k2 >> (128 - self->shift);\n        }\n    }\n\n    for (; i < buf->len; i++) {\n        k1 = ((uint8_t *)buf->buf)[i];\n        if (self->shift < 64) {  // TODO: use bit ops\n            self->buffer1 |= k1 << self->shift;\n        }\n        else {\n            self->buffer2 |= k1 << (self->shift - 64);\n        }\n        self->shift += 8;\n        self->length += 1;\n\n        if (self->shift >= 128) {\n            h1 ^= mixK1_x64_128(self->buffer1);\n            h1 = mixH_x64_128(h1, h2, 27, 0x52dce729UL);\n            h2 ^= mixK2_x64_128(self->buffer2);\n            h2 = mixH_x64_128(h2, h1, 31, 0x38495ab5UL);\n\n            self->buffer1 = 0;\n            self->buffer2 = 0;\n            self->shift -= 128;\n        }\n    }\n\n    self->h1 = h1;\n    self->h2 = h2;\n    MMH3_HASHER_UNLOCK(self);\n\n    PyBuffer_Release(buf);\n}\n\nstatic void\nMMH3Hasher128x64_dealloc(MMH3Hasher128x64 *self)\n{\n    Py_TYPE(self)->tp_free((PyObject *)self);\n}\n\nstatic PyObject *\nMMH3Hasher128x64_new(PyTypeObject *type, PyObject *args, PyObject *kwds)\n{\n    MMH3Hasher128x64 *self;\n    self = (MMH3Hasher128x64 *)type->tp_alloc(type, 0);\n    if (self != NULL) {\n        self->h1 = 0;\n        self->h2 = 0;\n        self->buffer1 = 0;\n        self->buffer2 = 0;\n        self->shift = 0;\n        self->length = 0;\n        MMH3_HASHER_INIT_MUTEX(self);\n    }\n    return (PyObject *)self;\n}\n\nstatic int\nMMH3Hasher128x64_init(MMH3Hasher128x64 *self, PyObject *args, PyObject *kwds)\n{\n    Py_buffer target_buf = {0};\n    long long seed = 0;\n    static char *kwlist[] = {\"data\", \"seed\", NULL};\n\n    if (!PyArg_ParseTupleAndKeywords(args, kwds, \"|y*L\", kwlist, &target_buf,\n                                     &seed))\n        return -1;\n\n    MMH3_VALIDATE_SEED_RETURN_INT(seed, target_buf);\n\n    self->h1 = (uint64_t)seed;\n    self->h2 = self->h1;\n\n    if (target_buf.buf != NULL) {\n        // target_buf will be released in update_x64_128_impl\n        update_x64_128_impl(self, &target_buf);\n    }\n\n    return 0;\n}\n\nstatic PyObject *\nMMH3Hasher128x64_update(MMH3Hasher128x64 *self, PyObject *obj)\n{\n    Py_buffer buf;\n\n    GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);\n\n    // buf will be released in update_x64_128_impl\n    update_x64_128_impl(self, &buf);\n\n    Py_RETURN_NONE;\n}\n\nstatic PyObject *\nMMH3Hasher128x64_digest(MMH3Hasher128x64 *self, PyObject *Py_UNUSED(ignored))\n{\n    const char out[MMH3_128_DIGESTSIZE];\n    MMH3_HASHER_LOCK(self);\n    digest_x64_128_impl(self->h1, self->h2, self->buffer1, self->buffer2,\n                        self->length, out);\n    MMH3_HASHER_UNLOCK(self);\n    return PyBytes_FromStringAndSize(out, MMH3_128_DIGESTSIZE);\n}\n\nstatic PyObject *\nMMH3Hasher128x64_sintdigest(MMH3Hasher128x64 *self,\n                            PyObject *Py_UNUSED(ignored))\n{\n    const char out[MMH3_128_DIGESTSIZE];\n    MMH3_HASHER_LOCK(self);\n    digest_x64_128_impl(self->h1, self->h2, self->buffer1, self->buffer2,\n                        self->length, out);\n    MMH3_HASHER_UNLOCK(self);\n    const int little_endian = 1;\n    const int is_signed = 1;\n\n    /**\n     * _PyLong_FromByteArray is not a part of the official Python/C API\n     * and may be removed in the future (although it is practically stable).\n     * cf.\n     * https://mail.python.org/pipermail/python-list/2006-August/372365.html\n     */\n    PyObject *retval = _PyLong_FromByteArray(\n        (unsigned char *)out, MMH3_128_DIGESTSIZE, little_endian, is_signed);\n\n    return retval;\n}\n\nstatic PyObject *\nMMH3Hasher128x64_uintdigest(MMH3Hasher128x64 *self,\n                            PyObject *Py_UNUSED(ignored))\n{\n    const char out[MMH3_128_DIGESTSIZE];\n    MMH3_HASHER_LOCK(self);\n    digest_x64_128_impl(self->h1, self->h2, self->buffer1, self->buffer2,\n                        self->length, out);\n    MMH3_HASHER_UNLOCK(self);\n    const int little_endian = 1;\n    const int is_signed = 0;\n\n    /**\n     * _PyLong_FromByteArray is not a part of the official Python/C API\n     * and may be removed in the future (although it is practically stable).\n     * cf.\n     * https://mail.python.org/pipermail/python-list/2006-August/372365.html\n     */\n    PyObject *retval = _PyLong_FromByteArray(\n        (unsigned char *)out, MMH3_128_DIGESTSIZE, little_endian, is_signed);\n\n    return retval;\n}\n\nPyDoc_STRVAR(MMH3Hasher128_stupledigest_doc,\n             \"stupledigest() -> tuple[int, int]\\n\"\n             \"\\n\"\n             \"Return the digest value as a tuple of two signed integers.\\n\"\n             \"\\n\"\n             \"Returns:\\n\"\n             \"    tuple[int, int]: The digest value as a tuple of two signed\\n\"\n             \"    integers.\\n\");\n\nstatic PyObject *\nMMH3Hasher128x64_stupledigest(MMH3Hasher128x64 *self,\n                              PyObject *Py_UNUSED(ignored))\n{\n    const char out[MMH3_128_DIGESTSIZE];\n    MMH3_HASHER_LOCK(self);\n    digest_x64_128_impl(self->h1, self->h2, self->buffer1, self->buffer2,\n                        self->length, out);\n    MMH3_HASHER_UNLOCK(self);\n\n    const char *valflag = \"LL\";\n    uint64_t result1 = ((uint64_t *)out)[0];\n    uint64_t result2 = ((uint64_t *)out)[1];\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result1 = bswap_64(result1);\n    result2 = bswap_64(result2);\n#endif\n\n    return Py_BuildValue(valflag, result1, result2);\n}\n\nPyDoc_STRVAR(\n    MMH3Hasher128_utupledigest_doc,\n    \"utupledigest() -> tuple[int, int]\\n\"\n    \"\\n\"\n    \"Return the digest value as a tuple of two unsigned integers.\\n\"\n    \"\\n\"\n    \"Returns:\\n\"\n    \"    tuple[int, int]: The digest value as a tuple of two unsigned\\n\"\n    \"    integers.\\n\");\n\nstatic PyObject *\nMMH3Hasher128x64_utupledigest(MMH3Hasher128x64 *self,\n                              PyObject *Py_UNUSED(ignored))\n{\n    const char out[MMH3_128_DIGESTSIZE];\n    MMH3_HASHER_LOCK(self);\n    digest_x64_128_impl(self->h1, self->h2, self->buffer1, self->buffer2,\n                        self->length, out);\n    MMH3_HASHER_UNLOCK(self);\n\n    const char *valflag = \"KK\";\n    uint64_t result1 = ((uint64_t *)out)[0];\n    uint64_t result2 = ((uint64_t *)out)[1];\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result1 = bswap_64(result1);\n    result2 = bswap_64(result2);\n#endif\n\n    return Py_BuildValue(valflag, result1, result2);\n}\n\nPyDoc_STRVAR(MMH3Hasher128x64_copy_doc,\n             \"copy() -> mmh3_128x64\\n\"\n             \"\\n\"\n             \"Return a copy of the hash object..\\n\"\n             \"\\n\"\n             \"Returns:\\n\"\n             \"    mmh3_128x64: A copy of this hash object.\\n\");\n\nstatic PyObject *\nMMH3Hasher128x64_copy(MMH3Hasher128x64 *self, PyObject *Py_UNUSED(ignored))\n{\n    MMH3Hasher128x64 *p;\n\n    if ((p = PyObject_New(MMH3Hasher128x64, &MMH3Hasher128x64Type)) == NULL) {\n        return NULL;\n    }\n\n    MMH3_HASHER_LOCK(self);\n    p->h1 = self->h1;\n    p->h2 = self->h2;\n    p->buffer1 = self->buffer1;\n    p->buffer2 = self->buffer2;\n    p->shift = self->shift;\n    p->length = self->length;\n    MMH3_HASHER_INIT_MUTEX(p);\n    MMH3_HASHER_UNLOCK(self);\n\n    return (PyObject *)p;\n}\n\nstatic PyMethodDef MMH3Hasher128x64_methods[] = {\n    {\"update\", (PyCFunction)MMH3Hasher128x64_update, METH_O,\n     MMH3Hasher_update_doc},\n    {\"digest\", (PyCFunction)MMH3Hasher128x64_digest, METH_NOARGS,\n     MMH3Hasher_digest_doc},\n    {\"sintdigest\", (PyCFunction)MMH3Hasher128x64_sintdigest, METH_NOARGS,\n     MMH3Hasher_sintdigest_doc},\n    {\"uintdigest\", (PyCFunction)MMH3Hasher128x64_uintdigest, METH_NOARGS,\n     MMH3Hasher_uintdigest_doc},\n    {\"stupledigest\", (PyCFunction)MMH3Hasher128x64_stupledigest, METH_NOARGS,\n     MMH3Hasher128_stupledigest_doc},\n    {\"utupledigest\", (PyCFunction)MMH3Hasher128x64_utupledigest, METH_NOARGS,\n     MMH3Hasher128_utupledigest_doc},\n    {\"copy\", (PyCFunction)MMH3Hasher128x64_copy, METH_NOARGS,\n     MMH3Hasher128x64_copy_doc},\n    {NULL} /* Sentinel */\n};\n\nstatic PyObject *\nMMH3Hasher128x64_get_digest_size(PyObject *self, void *closure)\n{\n    return PyLong_FromLong(MMH3_128_DIGESTSIZE);\n}\n\nstatic PyObject *\nMMH3Hasher128x64_get_block_size(PyObject *self, void *closure)\n{\n    return PyLong_FromLong(MMH3_128_BLOCKSIZE);\n}\n\nstatic PyObject *\nMMH3Hasher128x64_get_name(PyObject *self, void *closure)\n{\n    return PyUnicode_FromStringAndSize(\"mmh3_x64_128\", 12);\n}\n\nstatic PyGetSetDef MMH3Hasher128x64_getsetters[] = {\n    {\"digest_size\", (getter)MMH3Hasher128x64_get_digest_size, NULL,\n     \"int: Number of bytes in this hashes output.\", NULL},\n    {\"block_size\", (getter)MMH3Hasher128x64_get_block_size, NULL,\n     \"int: Number of bytes of the internal block of this algorithm.\", NULL},\n    {\"name\", (getter)MMH3Hasher128x64_get_name, NULL,\n     \"str: The hash algorithm being used by this object.\", NULL},\n    {NULL} /* Sentinel */\n};\n\nPyDoc_STRVAR(\n    MMH3Hasher128x64Type_doc,\n    \"__init__(data=None, seed=0)\\n\"\n    \"\\n\"\n    \"Hasher for incrementally calculating the murmurhash3_x64_128 hash.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    data (Buffer | None): The initial data to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range\\n\"\n    \"        [0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.2.0\\n\"\n    \"    Experimental no-GIL support; thread safety not fully verified.\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.0.0\\n\"\n    \"    Added the optional ``data`` parameter as the first argument.\\n\"\n    \"    The ``seed`` argument is now strictly checked for valid range.\\n\");\n\nstatic PyTypeObject MMH3Hasher128x64Type = {\n    PyVarObject_HEAD_INIT(NULL, 0).tp_name = \"mmh3.mmh3_x64_128\",\n    .tp_doc = MMH3Hasher128x64Type_doc,\n    .tp_basicsize = sizeof(MMH3Hasher128x64),\n    .tp_itemsize = 0,\n    .tp_flags = Py_TPFLAGS_DEFAULT,\n    .tp_new = MMH3Hasher128x64_new,\n    .tp_init = (initproc)MMH3Hasher128x64_init,\n    .tp_dealloc = (destructor)MMH3Hasher128x64_dealloc,\n    .tp_methods = MMH3Hasher128x64_methods,\n    .tp_getset = MMH3Hasher128x64_getsetters,\n};\n\n//-----------------------------------------------------------------------------\n// Hasher for murmurhash3_x86_128\ntypedef struct {\n    PyObject_HEAD uint32_t h1;\n    uint32_t h2;\n    uint32_t h3;\n    uint32_t h4;\n    uint32_t buffer1;\n    uint32_t buffer2;\n    uint32_t buffer3;\n    uint32_t buffer4;\n    uint8_t shift;\n    Py_ssize_t length;\n#ifdef Py_GIL_DISABLED\n    PyMutex mutex;\n#endif\n} MMH3Hasher128x86;\n\nstatic PyTypeObject MMH3Hasher128x86Type;\n\nstatic FORCE_INLINE void\nupdate_x86_128_impl(MMH3Hasher128x86 *self, Py_buffer *buf)\n{\n    Py_ssize_t i = 0;\n    uint32_t h1 = 0;\n    uint32_t h2 = 0;\n    uint32_t h3 = 0;\n    uint32_t h4 = 0;\n    uint32_t k1 = 0;\n\n    MMH3_HASHER_LOCK(self);\n    h1 = self->h1;\n    h2 = self->h2;\n    h3 = self->h3;\n    h4 = self->h4;\n\n    for (; i < buf->len; i++) {\n        k1 = ((uint8_t *)buf->buf)[i];\n        if (self->shift < 32) {  // TODO: use bit ops\n            self->buffer1 |= k1 << self->shift;\n        }\n        else if (self->shift < 64) {\n            self->buffer2 |= k1 << (self->shift - 32);\n        }\n        else if (self->shift < 96) {\n            self->buffer3 |= k1 << (self->shift - 64);\n        }\n        else {\n            self->buffer4 |= k1 << (self->shift - 96);\n        }\n        self->shift += 8;\n        self->length += 1;\n\n        if (self->shift >= 128) {\n            const uint32_t c1 = 0x239b961b;\n            const uint32_t c2 = 0xab0e9789;\n            const uint32_t c3 = 0x38b34ae5;\n            const uint32_t c4 = 0xa1e38b93;\n\n            h1 ^= mixK_x86_128(self->buffer1, 15, c1, c2);\n            h1 = mixH1(h1, h2, 19, 0x561ccd1bUL);\n\n            h2 ^= mixK_x86_128(self->buffer2, 16, c2, c3);\n            h2 = mixH1(h2, h3, 17, 0x0bcaa747UL);\n\n            h3 ^= mixK_x86_128(self->buffer3, 17, c3, c4);\n            h3 = mixH1(h3, h4, 15, 0x96cd1c35UL);\n\n            h4 ^= mixK_x86_128(self->buffer4, 18, c4, c1);\n            h4 = mixH1(h4, h1, 13, 0x32ac3b17UL);\n\n            self->buffer1 = 0;\n            self->buffer2 = 0;\n            self->buffer3 = 0;\n            self->buffer4 = 0;\n            self->shift -= 128;\n        }\n    }\n\n    self->h1 = h1;\n    self->h2 = h2;\n    self->h3 = h3;\n    self->h4 = h4;\n    MMH3_HASHER_UNLOCK(self);\n\n    PyBuffer_Release(buf);\n}\n\nstatic void\nMMH3Hasher128x86_dealloc(MMH3Hasher128x86 *self)\n{\n    Py_TYPE(self)->tp_free((PyObject *)self);\n}\n\nstatic PyObject *\nMMH3Hasher128x86_new(PyTypeObject *type, PyObject *args, PyObject *kwds)\n{\n    MMH3Hasher128x86 *self;\n    self = (MMH3Hasher128x86 *)type->tp_alloc(type, 0);\n    if (self != NULL) {\n        self->h1 = 0;\n        self->h2 = 0;\n        self->h3 = 0;\n        self->h4 = 0;\n        self->buffer1 = 0;\n        self->buffer2 = 0;\n        self->buffer3 = 0;\n        self->buffer4 = 0;\n        self->shift = 0;\n        self->length = 0;\n        MMH3_HASHER_INIT_MUTEX(self);\n    }\n    return (PyObject *)self;\n}\n\nstatic int\nMMH3Hasher128x86_init(MMH3Hasher128x86 *self, PyObject *args, PyObject *kwds)\n{\n    Py_buffer target_buf = {0};\n    long long seed = 0;\n    static char *kwlist[] = {\"data\", \"seed\", NULL};\n\n    if (!PyArg_ParseTupleAndKeywords(args, kwds, \"|y*L\", kwlist, &target_buf,\n                                     &seed))\n        return -1;\n\n    MMH3_VALIDATE_SEED_RETURN_INT(seed, target_buf);\n    self->h1 = (uint32_t)seed;\n    self->h2 = self->h1;\n    self->h3 = self->h1;\n    self->h4 = self->h1;\n\n    if (target_buf.buf != NULL) {\n        // target_buf will be released in update_x86_128_impl\n        update_x86_128_impl(self, &target_buf);\n    }\n\n    return 0;\n}\n\nstatic PyObject *\nMMH3Hasher128x86_update(MMH3Hasher128x86 *self, PyObject *obj)\n{\n    Py_buffer buf;\n\n    GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);\n\n    // buf will be released in update_x86_128_impl\n    update_x86_128_impl(self, &buf);\n\n    Py_RETURN_NONE;\n}\n\nstatic PyObject *\nMMH3Hasher128x86_digest(MMH3Hasher128x86 *self, PyObject *Py_UNUSED(ignored))\n{\n    char out[MMH3_128_DIGESTSIZE];\n    MMH3_HASHER_LOCK(self);\n    digest_x86_128_impl(self->h1, self->h2, self->h3, self->h4, self->buffer1,\n                        self->buffer2, self->buffer3, self->buffer4,\n                        self->length, out);\n    MMH3_HASHER_UNLOCK(self);\n    return PyBytes_FromStringAndSize(out, MMH3_128_DIGESTSIZE);\n}\n\nstatic PyObject *\nMMH3Hasher128x86_sintdigest(MMH3Hasher128x86 *self,\n                            PyObject *Py_UNUSED(ignored))\n{\n    const char out[MMH3_128_DIGESTSIZE];\n    MMH3_HASHER_LOCK(self);\n    digest_x86_128_impl(self->h1, self->h2, self->h3, self->h4, self->buffer1,\n                        self->buffer2, self->buffer3, self->buffer4,\n                        self->length, out);\n    MMH3_HASHER_UNLOCK(self);\n    const int little_endian = 1;\n    const int is_signed = 1;\n\n    /**\n     * _PyLong_FromByteArray is not a part of the official Python/C API\n     * and may be removed in the future (although it is practically stable).\n     * cf.\n     * https://mail.python.org/pipermail/python-list/2006-August/372365.html\n     */\n    PyObject *retval = _PyLong_FromByteArray(\n        (unsigned char *)out, MMH3_128_DIGESTSIZE, little_endian, is_signed);\n\n    return retval;\n}\n\nstatic PyObject *\nMMH3Hasher128x86_uintdigest(MMH3Hasher128x86 *self,\n                            PyObject *Py_UNUSED(ignored))\n{\n    const char out[MMH3_128_DIGESTSIZE];\n    MMH3_HASHER_LOCK(self);\n    digest_x86_128_impl(self->h1, self->h2, self->h3, self->h4, self->buffer1,\n                        self->buffer2, self->buffer3, self->buffer4,\n                        self->length, out);\n    MMH3_HASHER_UNLOCK(self);\n    const int little_endian = 1;\n    const int is_signed = 0;\n\n    /**\n     * _PyLong_FromByteArray is not a part of the official Python/C API\n     * and may be removed in the future (although it is practically stable).\n     * cf.\n     * https://mail.python.org/pipermail/python-list/2006-August/372365.html\n     */\n    PyObject *retval = _PyLong_FromByteArray(\n        (unsigned char *)out, MMH3_128_DIGESTSIZE, little_endian, is_signed);\n\n    return retval;\n}\n\nstatic PyObject *\nMMH3Hasher128x86_stupledigest(MMH3Hasher128x86 *self,\n                              PyObject *Py_UNUSED(ignored))\n{\n    const char out[MMH3_128_DIGESTSIZE];\n    MMH3_HASHER_LOCK(self);\n    digest_x86_128_impl(self->h1, self->h2, self->h3, self->h4, self->buffer1,\n                        self->buffer2, self->buffer3, self->buffer4,\n                        self->length, out);\n    MMH3_HASHER_UNLOCK(self);\n\n    const char *valflag = \"LL\";\n    uint64_t result1 = ((uint64_t *)out)[0];\n    uint64_t result2 = ((uint64_t *)out)[1];\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result1 = bswap_64(result1);\n    result2 = bswap_64(result2);\n#endif\n\n    return Py_BuildValue(valflag, result1, result2);\n}\n\nstatic PyObject *\nMMH3Hasher128x86_utupledigest(MMH3Hasher128x86 *self,\n                              PyObject *Py_UNUSED(ignored))\n{\n    const char out[MMH3_128_DIGESTSIZE];\n    MMH3_HASHER_LOCK(self);\n    digest_x86_128_impl(self->h1, self->h2, self->h3, self->h4, self->buffer1,\n                        self->buffer2, self->buffer3, self->buffer4,\n                        self->length, out);\n    MMH3_HASHER_UNLOCK(self);\n\n    const char *valflag = \"KK\";\n    uint64_t result1 = ((uint64_t *)out)[0];\n    uint64_t result2 = ((uint64_t *)out)[1];\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    result1 = bswap_64(result1);\n    result2 = bswap_64(result2);\n#endif\n\n    return Py_BuildValue(valflag, result1, result2);\n}\n\nPyDoc_STRVAR(MMH3Hasher128x86_copy_doc,\n             \"copy() -> mmh3_128x86\\n\"\n             \"\\n\"\n             \"Return a copy of the hash object..\\n\"\n             \"\\n\"\n             \"Returns:\\n\"\n             \"    mmh3_128x86: A copy of this hash object.\\n\");\n\nstatic PyObject *\nMMH3Hasher128x86_copy(MMH3Hasher128x86 *self, PyObject *Py_UNUSED(ignored))\n{\n    MMH3Hasher128x86 *p;\n\n    if ((p = PyObject_New(MMH3Hasher128x86, &MMH3Hasher128x86Type)) == NULL) {\n        return NULL;\n    }\n\n    MMH3_HASHER_LOCK(self);\n    p->h1 = self->h1;\n    p->h2 = self->h2;\n    p->h3 = self->h3;\n    p->h4 = self->h4;\n    p->buffer1 = self->buffer1;\n    p->buffer2 = self->buffer2;\n    p->buffer3 = self->buffer3;\n    p->buffer4 = self->buffer4;\n    p->shift = self->shift;\n    p->length = self->length;\n    MMH3_HASHER_INIT_MUTEX(p);\n    MMH3_HASHER_UNLOCK(self);\n\n    return (PyObject *)p;\n}\n\nstatic PyMethodDef MMH3Hasher128x86_methods[] = {\n    {\"update\", (PyCFunction)MMH3Hasher128x86_update, METH_O,\n     MMH3Hasher_update_doc},\n    {\"digest\", (PyCFunction)MMH3Hasher128x86_digest, METH_NOARGS,\n     MMH3Hasher_digest_doc},\n    {\"sintdigest\", (PyCFunction)MMH3Hasher128x86_sintdigest, METH_NOARGS,\n     MMH3Hasher_sintdigest_doc},\n    {\"uintdigest\", (PyCFunction)MMH3Hasher128x86_uintdigest, METH_NOARGS,\n     MMH3Hasher_uintdigest_doc},\n    {\"stupledigest\", (PyCFunction)MMH3Hasher128x86_stupledigest, METH_NOARGS,\n     MMH3Hasher128_stupledigest_doc},\n    {\"utupledigest\", (PyCFunction)MMH3Hasher128x86_utupledigest, METH_NOARGS,\n     MMH3Hasher128_utupledigest_doc},\n    {\"copy\", (PyCFunction)MMH3Hasher128x86_copy, METH_NOARGS,\n     MMH3Hasher128x86_copy_doc},\n    {NULL} /* Sentinel */\n};\n\nstatic PyObject *\nMMH3Hasher128x86_get_digest_size(PyObject *self, void *closure)\n{\n    return PyLong_FromLong(MMH3_128_DIGESTSIZE);\n}\n\nstatic PyObject *\nMMH3Hasher128x86_get_block_size(PyObject *self, void *closure)\n{\n    return PyLong_FromLong(MMH3_128_BLOCKSIZE);\n}\n\nstatic PyObject *\nMMH3Hasher128x86_get_name(PyObject *self, void *closure)\n{\n    return PyUnicode_FromStringAndSize(\"mmh3_x86_128\", 12);\n}\n\nstatic PyGetSetDef MMH3Hasher128x86_getsetters[] = {\n    {\"digest_size\", (getter)MMH3Hasher128x86_get_digest_size, NULL,\n     \"int: Number of bytes in this hashes output\", NULL},\n    {\"block_size\", (getter)MMH3Hasher128x86_get_block_size, NULL,\n     \"int: Number of bytes of the internal block of this algorithm\", NULL},\n    {\"name\", (getter)MMH3Hasher128x86_get_name, NULL,\n     \"str: The hash algorithm being used by this object\", NULL},\n    {NULL} /* Sentinel */\n};\n\nPyDoc_STRVAR(\n    MMH3Hasher128x86Type_doc,\n    \"__init__(data=None, seed=0)\\n\"\n    \"\\n\"\n    \"Hasher for incrementally calculating the murmurhash3_x86_128 hash.\\n\"\n    \"\\n\"\n    \"Args:\\n\"\n    \"    data (Buffer | None): The initial data to hash.\\n\"\n    \"    seed (int): The seed value. Must be an integer in the range \"\n    \"[0, 0xFFFFFFFF].\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.2.0\\n\"\n    \"    Experimental no-GIL support; thread safety not fully verified.\\n\"\n    \"\\n\"\n    \".. versionchanged:: 5.0.0\\n\"\n    \"    Added the optional ``data`` parameter as the first argument.\\n\"\n    \"    The ``seed`` argument is now strictly checked for valid range.\\n\");\n\nstatic PyTypeObject MMH3Hasher128x86Type = {\n    PyVarObject_HEAD_INIT(NULL, 0).tp_name = \"mmh3.mmh3_x86_128\",\n    .tp_doc = MMH3Hasher128x86Type_doc,\n    .tp_basicsize = sizeof(MMH3Hasher128x86),\n    .tp_itemsize = 0,\n    .tp_flags = Py_TPFLAGS_DEFAULT,\n    .tp_new = MMH3Hasher128x86_new,\n    .tp_init = (initproc)MMH3Hasher128x86_init,\n    .tp_dealloc = (destructor)MMH3Hasher128x86_dealloc,\n    .tp_methods = MMH3Hasher128x86_methods,\n    .tp_getset = MMH3Hasher128x86_getsetters,\n};\n\n//-----------------------------------------------------------------------------\n// Module\n\nstatic struct PyModuleDef mmh3module = {\n    PyModuleDef_HEAD_INIT,\n    \"mmh3\",\n    \"A Python front-end to MurmurHash3.\\n\"\n    \"\\n\"\n    \"A Python front-end to MurmurHash3, \"\n    \"a fast and robust non-cryptographic hash library \"\n    \"created by Austin Appleby (http://code.google.com/p/smhasher/).\\n\"\n    \"\\n\"\n    \"Ported by Hajime Senuma <hajime.senuma@gmail.com>. \"\n    \"If you find any bugs, please submit an issue via \"\n    \"https://github.com/hajimes/mmh3.\\n\"\n    \"\\n\"\n    \"Typical usage example:\\n\"\n    \"\\n\"\n    \"  mmh3.hash(\\\"foobar\\\", 42)\",\n    -1,\n    Mmh3Methods,\n    NULL,\n    NULL,\n    NULL,\n    NULL};\n\nPyMODINIT_FUNC\nPyInit_mmh3(void)\n{\n    if (PyType_Ready(&MMH3Hasher32Type) < 0)\n        return NULL;\n\n    if (PyType_Ready(&MMH3Hasher128x64Type) < 0)\n        return NULL;\n\n    if (PyType_Ready(&MMH3Hasher128x86Type) < 0)\n        return NULL;\n\n    PyObject *module = PyModule_Create(&mmh3module);\n\n    if (module == NULL)\n        return NULL;\n\n#ifdef Py_GIL_DISABLED\n    PyUnstable_Module_SetGIL(module, Py_MOD_GIL_NOT_USED);\n#endif\n\n    Py_INCREF(&MMH3Hasher32Type);\n    if (PyModule_AddObject(module, \"mmh3_32\", (PyObject *)&MMH3Hasher32Type) <\n        0) {\n        Py_DECREF(&MMH3Hasher32Type);\n        Py_DECREF(module);\n        return NULL;\n    }\n\n    Py_INCREF(&MMH3Hasher128x64Type);\n    if (PyModule_AddObject(module, \"mmh3_x64_128\",\n                           (PyObject *)&MMH3Hasher128x64Type) < 0) {\n        Py_DECREF(&MMH3Hasher128x64Type);\n        Py_DECREF(module);\n        return NULL;\n    }\n\n    Py_INCREF(&MMH3Hasher128x86Type);\n    if (PyModule_AddObject(module, \"mmh3_x86_128\",\n                           (PyObject *)&MMH3Hasher128x86Type) < 0) {\n        Py_DECREF(&MMH3Hasher128x86Type);\n        Py_DECREF(module);\n        return NULL;\n    }\n\n    return module;\n}"
  },
  {
    "path": "src/mmh3/murmurhash3.c",
    "content": "/***\n * This file is under MIT <year> Hajime Senuma, just like other files.\n * See LICENSE for details.\n *\n * It was originally written by Austin Appleby in C++ under the public domain,\n * but ported to PEP 7 C for Python 3.6 and later by the mmh3 project.\n *\n * Any issues should be reported to https://github.com/hajimes/mmh3/issues.\n *\n * The following is the original public domain notice by Austin Appleby.\n */\n\n//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the public\n// domain. The author hereby disclaims copyright to this source code.\n\n// Note - The x86 and x64 versions do _not_ produce the same results, as the\n// algorithms are optimized for their respective platforms. You can still\n// compile and run any of them on any platform, but your performance with the\n// non-native version will be less than optimal.\n\n#include \"murmurhash3.h\"\n\n//-----------------------------------------------------------------------------\n\nvoid\nmurmurhash3_x86_32(const void *key, Py_ssize_t len, uint32_t seed, void *out)\n{\n    const uint8_t *data = (const uint8_t *)key;\n    const Py_ssize_t nblocks = len / 4;\n\n    uint32_t h1 = seed;\n\n    const uint32_t c1 = 0xcc9e2d51;\n    const uint32_t c2 = 0x1b873593;\n\n    //----------\n    // body\n\n    const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4);\n\n    for (Py_ssize_t i = -nblocks; i; i++) {\n        uint32_t k1 = getblock32(blocks, i);\n\n        k1 *= c1;\n        k1 = ROTL32(k1, 15);\n        k1 *= c2;\n\n        h1 ^= k1;\n        h1 = ROTL32(h1, 13);\n        h1 = h1 * 5 + 0xe6546b64;\n    }\n\n    //----------\n    // tail\n\n    const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);\n\n    uint32_t k1 = 0;\n\n    switch (len & 3) {\n        case 3:\n            k1 ^= tail[2] << 16;\n        case 2:\n            k1 ^= tail[1] << 8;\n        case 1:\n            k1 ^= tail[0];\n            k1 *= c1;\n            k1 = ROTL32(k1, 15);\n            k1 *= c2;\n            h1 ^= k1;\n    };\n\n    //----------\n    // finalization\n\n    h1 ^= len;\n\n    h1 = fmix32(h1);\n\n    *(uint32_t *)out = h1;\n}\n\n//-----------------------------------------------------------------------------\n\nvoid\nmurmurhash3_x86_128(const void *key, const Py_ssize_t len, uint32_t seed,\n                    void *out)\n{\n    const uint8_t *data = (const uint8_t *)key;\n    const Py_ssize_t nblocks = len / 16;\n\n    uint32_t h1 = seed;\n    uint32_t h2 = seed;\n    uint32_t h3 = seed;\n    uint32_t h4 = seed;\n\n    const uint32_t c1 = 0x239b961b;\n    const uint32_t c2 = 0xab0e9789;\n    const uint32_t c3 = 0x38b34ae5;\n    const uint32_t c4 = 0xa1e38b93;\n\n    //----------\n    // body\n\n    const uint32_t *blocks = (const uint32_t *)(data + nblocks * 16);\n\n    for (Py_ssize_t i = -nblocks; i; i++) {\n        uint32_t k1 = getblock32(blocks, i * 4 + 0);\n        uint32_t k2 = getblock32(blocks, i * 4 + 1);\n        uint32_t k3 = getblock32(blocks, i * 4 + 2);\n        uint32_t k4 = getblock32(blocks, i * 4 + 3);\n\n        k1 *= c1;\n        k1 = ROTL32(k1, 15);\n        k1 *= c2;\n        h1 ^= k1;\n\n        h1 = ROTL32(h1, 19);\n        h1 += h2;\n        h1 = h1 * 5 + 0x561ccd1b;\n\n        k2 *= c2;\n        k2 = ROTL32(k2, 16);\n        k2 *= c3;\n        h2 ^= k2;\n\n        h2 = ROTL32(h2, 17);\n        h2 += h3;\n        h2 = h2 * 5 + 0x0bcaa747;\n\n        k3 *= c3;\n        k3 = ROTL32(k3, 17);\n        k3 *= c4;\n        h3 ^= k3;\n\n        h3 = ROTL32(h3, 15);\n        h3 += h4;\n        h3 = h3 * 5 + 0x96cd1c35;\n\n        k4 *= c4;\n        k4 = ROTL32(k4, 18);\n        k4 *= c1;\n        h4 ^= k4;\n\n        h4 = ROTL32(h4, 13);\n        h4 += h1;\n        h4 = h4 * 5 + 0x32ac3b17;\n    }\n\n    //----------\n    // tail\n\n    const uint8_t *tail = (const uint8_t *)(data + nblocks * 16);\n\n    uint32_t k1 = 0;\n    uint32_t k2 = 0;\n    uint32_t k3 = 0;\n    uint32_t k4 = 0;\n\n    switch (len & 15) {\n        case 15:\n            k4 ^= tail[14] << 16;\n        case 14:\n            k4 ^= tail[13] << 8;\n        case 13:\n            k4 ^= tail[12] << 0;\n            k4 *= c4;\n            k4 = ROTL32(k4, 18);\n            k4 *= c1;\n            h4 ^= k4;\n\n        case 12:\n            k3 ^= tail[11] << 24;\n        case 11:\n            k3 ^= tail[10] << 16;\n        case 10:\n            k3 ^= tail[9] << 8;\n        case 9:\n            k3 ^= tail[8] << 0;\n            k3 *= c3;\n            k3 = ROTL32(k3, 17);\n            k3 *= c4;\n            h3 ^= k3;\n\n        case 8:\n            k2 ^= tail[7] << 24;\n        case 7:\n            k2 ^= tail[6] << 16;\n        case 6:\n            k2 ^= tail[5] << 8;\n        case 5:\n            k2 ^= tail[4] << 0;\n            k2 *= c2;\n            k2 = ROTL32(k2, 16);\n            k2 *= c3;\n            h2 ^= k2;\n\n        case 4:\n            k1 ^= tail[3] << 24;\n        case 3:\n            k1 ^= tail[2] << 16;\n        case 2:\n            k1 ^= tail[1] << 8;\n        case 1:\n            k1 ^= tail[0] << 0;\n            k1 *= c1;\n            k1 = ROTL32(k1, 15);\n            k1 *= c2;\n            h1 ^= k1;\n    };\n\n    //----------\n    // finalization\n\n    h1 ^= len;\n    h2 ^= len;\n    h3 ^= len;\n    h4 ^= len;\n\n    h1 += h2;\n    h1 += h3;\n    h1 += h4;\n    h2 += h1;\n    h3 += h1;\n    h4 += h1;\n\n    h1 = fmix32(h1);\n    h2 = fmix32(h2);\n    h3 = fmix32(h3);\n    h4 = fmix32(h4);\n\n    h1 += h2;\n    h1 += h3;\n    h1 += h4;\n    h2 += h1;\n    h3 += h1;\n    h4 += h1;\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    ((uint32_t *)out)[0] = h2;\n    ((uint32_t *)out)[1] = h1;\n    ((uint32_t *)out)[2] = h4;\n    ((uint32_t *)out)[3] = h3;\n#else\n    ((uint32_t *)out)[0] = h1;\n    ((uint32_t *)out)[1] = h2;\n    ((uint32_t *)out)[2] = h3;\n    ((uint32_t *)out)[3] = h4;\n#endif\n}\n\n//-----------------------------------------------------------------------------\n\nvoid\nmurmurhash3_x64_128(const void *key, const Py_ssize_t len, const uint32_t seed,\n                    void *out)\n{\n    const uint8_t *data = (const uint8_t *)key;\n    const Py_ssize_t nblocks = len / 16;\n\n    uint64_t h1 = seed;\n    uint64_t h2 = seed;\n\n    const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);\n    const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);\n\n    //----------\n    // body\n\n    const uint64_t *blocks = (const uint64_t *)(data);\n\n    for (Py_ssize_t i = 0; i < nblocks; i++) {\n        uint64_t k1 = getblock64(blocks, i * 2 + 0);\n        uint64_t k2 = getblock64(blocks, i * 2 + 1);\n\n        k1 *= c1;\n        k1 = ROTL64(k1, 31);\n        k1 *= c2;\n        h1 ^= k1;\n\n        h1 = ROTL64(h1, 27);\n        h1 += h2;\n        h1 = h1 * 5 + 0x52dce729;\n\n        k2 *= c2;\n        k2 = ROTL64(k2, 33);\n        k2 *= c1;\n        h2 ^= k2;\n\n        h2 = ROTL64(h2, 31);\n        h2 += h1;\n        h2 = h2 * 5 + 0x38495ab5;\n    }\n\n    //----------\n    // tail\n\n    const uint8_t *tail = (const uint8_t *)(data + nblocks * 16);\n\n    uint64_t k1 = 0;\n    uint64_t k2 = 0;\n\n    switch (len & 15) {\n        case 15:\n            k2 ^= ((uint64_t)tail[14]) << 48;\n        case 14:\n            k2 ^= ((uint64_t)tail[13]) << 40;\n        case 13:\n            k2 ^= ((uint64_t)tail[12]) << 32;\n        case 12:\n            k2 ^= ((uint64_t)tail[11]) << 24;\n        case 11:\n            k2 ^= ((uint64_t)tail[10]) << 16;\n        case 10:\n            k2 ^= ((uint64_t)tail[9]) << 8;\n        case 9:\n            k2 ^= ((uint64_t)tail[8]) << 0;\n            k2 *= c2;\n            k2 = ROTL64(k2, 33);\n            k2 *= c1;\n            h2 ^= k2;\n\n        case 8:\n            k1 ^= ((uint64_t)tail[7]) << 56;\n        case 7:\n            k1 ^= ((uint64_t)tail[6]) << 48;\n        case 6:\n            k1 ^= ((uint64_t)tail[5]) << 40;\n        case 5:\n            k1 ^= ((uint64_t)tail[4]) << 32;\n        case 4:\n            k1 ^= ((uint64_t)tail[3]) << 24;\n        case 3:\n            k1 ^= ((uint64_t)tail[2]) << 16;\n        case 2:\n            k1 ^= ((uint64_t)tail[1]) << 8;\n        case 1:\n            k1 ^= ((uint64_t)tail[0]) << 0;\n            k1 *= c1;\n            k1 = ROTL64(k1, 31);\n            k1 *= c2;\n            h1 ^= k1;\n    };\n\n    //----------\n    // finalization\n\n    h1 ^= len;\n    h2 ^= len;\n\n    h1 += h2;\n    h2 += h1;\n\n    h1 = fmix64(h1);\n    h2 = fmix64(h2);\n\n    h1 += h2;\n    h2 += h1;\n\n    ((uint64_t *)out)[0] = h1;\n    ((uint64_t *)out)[1] = h2;\n}\n\n//-----------------------------------------------------------------------------\n"
  },
  {
    "path": "src/mmh3/murmurhash3.h",
    "content": "/***\n * This file is under MIT <year> Hajime Senuma, just like other files.\n * See LICENSE for details.\n *\n * It was originally written by Austin Appleby in C++ under the public domain,\n * but ported to PEP 7 C for Python 3.6 and later by the mmh3 project.\n *\n * Any issues should be reported to https://github.com/hajimes/mmh3/issues.\n *\n * The following is the original public domain notice by Austin Appleby.\n */\n\n//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the public\n// domain. The author hereby disclaims copyright to this source code.\n\n#ifndef _MURMURHASH3_H_\n#define _MURMURHASH3_H_\n\n// To handle 64-bit data; see https://docs.python.org/3/c-api/arg.html\n#ifndef PY_SSIZE_T_CLEAN\n#define PY_SSIZE_T_CLEAN\n#endif\n#include <Python.h>\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n#include <byteswap.h>\n#endif\n\n//-----------------------------------------------------------------------------\n// Platform-specific functions and macros\n\n// Microsoft Visual Studio\n\n#if defined(_MSC_VER) && (_MSC_VER < 1600)\n\ntypedef signed __int8 int8_t;\ntypedef signed __int32 int32_t;\ntypedef signed __int64 int64_t;\ntypedef unsigned __int8 uint8_t;\ntypedef unsigned __int32 uint32_t;\ntypedef unsigned __int64 uint64_t;\n\n// Other compilers\n\n#else  // defined(_MSC_VER)\n\n#include <stdint.h>\n\n#endif  // !defined(_MSC_VER)\n\n//-----------------------------------------------------------------------------\n// Platform-specific functions and macros\n\n// Microsoft Visual Studio\n\n#if defined(_MSC_VER)\n\n#define FORCE_INLINE __forceinline\n\n#include <stdlib.h>\n\n#define ROTL32(x, y) _rotl(x, y)\n#define ROTL64(x, y) _rotl64(x, y)\n\n#define BIG_CONSTANT(x) (x)\n\n// Other compilers\n\n#else  // defined(_MSC_VER)\n\n#if ((__GNUC__ > 4) || (__GNUC__ == 4 && GNUC_MINOR >= 4))\n/* gcc version >= 4.4 4.1 = RHEL 5, 4.4 = RHEL 6. Don't inline for RHEL 5 gcc\n * which is 4.1*/\n#define FORCE_INLINE inline __attribute__((always_inline))\n#else\n#define FORCE_INLINE\n#endif\n\nstatic FORCE_INLINE uint32_t\nrotl32(uint32_t x, int8_t r)\n{\n    return (x << r) | (x >> (32 - r));\n}\n\nstatic FORCE_INLINE uint64_t\nrotl64(uint64_t x, int8_t r)\n{\n    return (x << r) | (x >> (64 - r));\n}\n\n#define ROTL32(x, y) rotl32(x, y)\n#define ROTL64(x, y) rotl64(x, y)\n\n#define BIG_CONSTANT(x) (x##LLU)\n\n#endif  // !defined(_MSC_VER)\n\n//-----------------------------------------------------------------------------\n// Block read - if your platform needs to do endian-swapping or can only\n// handle aligned reads, do the conversion here\n\nstatic FORCE_INLINE uint32_t\ngetblock32(const uint32_t *p, Py_ssize_t i)\n{\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    return bswap_32(p[i]);\n#else\n    return p[i];\n#endif\n}\n\nstatic FORCE_INLINE uint64_t\ngetblock64(const uint64_t *p, Py_ssize_t i)\n{\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    return bswap_64(p[i]);\n#else\n    return p[i];\n#endif\n}\n\n//-----------------------------------------------------------------------------\n// Building blocks for multiply and rotate (MUR) operations.\n// Names are taken from Google Guava's implementation\n\nstatic FORCE_INLINE uint32_t\nmixK1(uint32_t k1)\n{\n    const uint32_t c1 = 0xcc9e2d51;\n    const uint32_t c2 = 0x1b873593;\n\n    k1 *= c1;\n    k1 = ROTL32(k1, 15);\n    k1 *= c2;\n\n    return k1;\n}\nstatic FORCE_INLINE uint32_t\nmixH1(uint32_t h1, const uint32_t h2, const uint8_t shift, const uint32_t c1)\n{\n    h1 = ROTL32(h1, shift);\n    h1 += h2;\n    h1 = h1 * 5 + c1;\n\n    return h1;\n}\nstatic FORCE_INLINE uint64_t\nmixK_x64_128(uint64_t k1, const uint8_t shift, const uint64_t c1,\n             const uint64_t c2)\n{\n    k1 *= c1;\n    k1 = ROTL64(k1, shift);\n    k1 *= c2;\n\n    return k1;\n}\nstatic FORCE_INLINE uint64_t\nmixK1_x64_128(uint64_t k1)\n{\n    const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);\n    const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);\n\n    k1 *= c1;\n    k1 = ROTL64(k1, 31);\n    k1 *= c2;\n\n    return k1;\n}\nstatic FORCE_INLINE uint64_t\nmixK2_x64_128(uint64_t k2)\n{\n    const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);\n    const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);\n\n    k2 *= c2;\n    k2 = ROTL64(k2, 33);\n    k2 *= c1;\n\n    return k2;\n}\nstatic FORCE_INLINE uint64_t\nmixH_x64_128(uint64_t h1, uint64_t h2, const uint8_t shift, const uint32_t c)\n{\n    h1 = ROTL64(h1, shift);\n    h1 += h2;\n    h1 = h1 * 5 + c;\n\n    return h1;\n}\nstatic FORCE_INLINE uint64_t\nmixK_x86_128(uint32_t k, const uint8_t shift, const uint32_t c1,\n             const uint32_t c2)\n{\n    k *= c1;\n    k = ROTL32(k, shift);\n    k *= c2;\n\n    return k;\n}\n\n//-----------------------------------------------------------------------------\n// Finalization mix - force all bits of a hash block to avalanche\n\nstatic FORCE_INLINE uint32_t\nfmix32(uint32_t h)\n{\n    h ^= h >> 16;\n    h *= 0x85ebca6b;\n    h ^= h >> 13;\n    h *= 0xc2b2ae35;\n    h ^= h >> 16;\n\n    return h;\n}\n\n//----------\n\nstatic FORCE_INLINE uint64_t\nfmix64(uint64_t k)\n{\n    k ^= k >> 33;\n    k *= BIG_CONSTANT(0xff51afd7ed558ccd);\n    k ^= k >> 33;\n    k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);\n    k ^= k >> 33;\n\n    return k;\n}\n\n//-----------------------------------------------------------------------------\n// Finalization function\n\nstatic FORCE_INLINE void\ndigest_x64_128_impl(uint64_t h1, uint64_t h2, const uint64_t k1,\n                    const uint64_t k2, const Py_ssize_t len, const char *out)\n{\n    h1 ^= mixK1_x64_128(k1);\n    h2 ^= mixK2_x64_128(k2);\n    h1 ^= len;\n    h2 ^= len;\n\n    h1 += h2;\n    h2 += h1;\n\n    h1 = fmix64(h1);\n    h2 = fmix64(h2);\n\n    h1 += h2;\n    h2 += h1;\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    ((uint64_t *)out)[0] = bswap_64(h1);\n    ((uint64_t *)out)[1] = bswap_64(h2);\n#else\n    ((uint64_t *)out)[0] = h1;\n    ((uint64_t *)out)[1] = h2;\n#endif\n}\n\nstatic FORCE_INLINE void\ndigest_x86_128_impl(uint32_t h1, uint32_t h2, uint32_t h3, uint32_t h4,\n                    const uint32_t k1, const uint32_t k2, const uint32_t k3,\n                    const uint32_t k4, const Py_ssize_t len, const char *out)\n{\n    const uint32_t c1 = 0x239b961b;\n    const uint32_t c2 = 0xab0e9789;\n    const uint32_t c3 = 0x38b34ae5;\n    const uint32_t c4 = 0xa1e38b93;\n\n    h1 ^= mixK_x86_128(k1, 15, c1, c2);\n    h2 ^= mixK_x86_128(k2, 16, c2, c3);\n    h3 ^= mixK_x86_128(k3, 17, c3, c4);\n    h4 ^= mixK_x86_128(k4, 18, c4, c1);\n    h1 ^= len;\n    h2 ^= len;\n    h3 ^= len;\n    h4 ^= len;\n\n    h1 += h2;\n    h1 += h3;\n    h1 += h4;\n    h2 += h1;\n    h3 += h1;\n    h4 += h1;\n\n    h1 = fmix32(h1);\n    h2 = fmix32(h2);\n    h3 = fmix32(h3);\n    h4 = fmix32(h4);\n\n    h1 += h2;\n    h1 += h3;\n    h1 += h4;\n    h2 += h1;\n    h3 += h1;\n    h4 += h1;\n\n#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n    ((uint32_t *)out)[0] = bswap_32(h1);\n    ((uint32_t *)out)[1] = bswap_32(h2);\n    ((uint32_t *)out)[2] = bswap_32(h3);\n    ((uint32_t *)out)[3] = bswap_32(h4);\n#else\n    ((uint32_t *)out)[0] = h1;\n    ((uint32_t *)out)[1] = h2;\n    ((uint32_t *)out)[2] = h3;\n    ((uint32_t *)out)[3] = h4;\n#endif\n}\n\n//-----------------------------------------------------------------------------\n\nvoid\nmurmurhash3_x86_32(const void *key, Py_ssize_t len, uint32_t seed, void *out);\n\nvoid\nmurmurhash3_x86_128(const void *key, Py_ssize_t len, uint32_t seed, void *out);\n\nvoid\nmurmurhash3_x64_128(const void *key, Py_ssize_t len, uint32_t seed, void *out);\n\n//-----------------------------------------------------------------------------\n\n#endif  // _MURMURHASH3_H_\n"
  },
  {
    "path": "src/mmh3/py.typed",
    "content": ""
  },
  {
    "path": "tests/helper.py",
    "content": "\"\"\"Helper functions for tests.\"\"\"\n\n\n# see also https://stackoverflow.com/a/1375939\ndef u32_to_s32(v: int) -> int:\n    \"\"\"Convert unsigned 32-bit integer to signed 32-bit integer.\n\n    Args:\n        v: Unsigned 32-bit integer.\n\n    Returns:\n        Signed 32-bit representation of the input.\n    \"\"\"\n    if v & 0x80000000:\n        return -0x100000000 + v\n    return v\n"
  },
  {
    "path": "tests/test_doctrings.py",
    "content": "# pylint: disable=missing-module-docstring,missing-function-docstring\nimport mmh3\n\n\ndef test_function_docstrings() -> None:\n    assert \"__doc__\" in dir(mmh3.hash)\n    assert mmh3.hash.__doc__ is not None\n    assert mmh3.hash.__doc__.startswith(\"hash(key, seed=0, signed=True) -> int\\n\\n\")\n\n    assert \"__doc__\" in dir(mmh3.hash_from_buffer)\n    assert mmh3.hash_from_buffer.__doc__ is not None\n    assert mmh3.hash_from_buffer.__doc__.startswith(\n        \"hash_from_buffer(key, seed=0, signed=True) -> int\\n\\n\"\n    )\n\n    assert \"__doc__\" in dir(mmh3.hash64)\n    assert mmh3.hash64.__doc__ is not None\n    assert mmh3.hash64.__doc__.startswith(\n        \"hash64(key, seed=0, x64arch=True, signed=True) -> tuple[int, int]\\n\\n\"\n    )\n\n    assert \"__doc__\" in dir(mmh3.hash128)\n    assert mmh3.hash128.__doc__ is not None\n    assert mmh3.hash128.__doc__.startswith(\n        \"hash128(key, seed=0, x64arch=True, signed=False) -> int\\n\\n\"\n    )\n\n    assert \"__doc__\" in dir(mmh3.hash_bytes)\n    assert mmh3.hash_bytes.__doc__ is not None\n    assert mmh3.hash_bytes.__doc__.startswith(\n        \"hash_bytes(key, seed=0, x64arch=True) -> bytes\\n\\n\"\n    )\n\n\ndef test_module_docstring() -> None:\n    assert \"__doc__\" in dir(mmh3)\n    assert mmh3.__doc__ is not None\n    assert mmh3.__doc__.startswith(\"A Python front-end to MurmurHash3\")\n"
  },
  {
    "path": "tests/test_free_threading.py",
    "content": "# pylint: disable=missing-module-docstring,missing-function-docstring\nfrom collections.abc import Callable\nfrom concurrent.futures import ThreadPoolExecutor\nfrom typing import Any\n\nimport mmh3\n\n\ndef run_threaded(func: Callable[..., Any], num_threads: int = 8) -> None:\n    with ThreadPoolExecutor(max_workers=num_threads) as executor:\n        futures = [executor.submit(func) for _ in range(num_threads)]\n        for future in futures:\n            future.result()  # wait for all threads to complete\n\n\ndef test_parallel_hasher_mmh3_32_update() -> None:\n    hasher = mmh3.mmh3_32()\n\n    def closure() -> None:\n        for _ in range(1000):\n            hasher.update(b\"foo\")\n\n    run_threaded(closure, num_threads=8)\n\n    assert hasher.sintdigest() == mmh3.hash(b\"foo\" * 8000)\n\n\ndef test_parallel_hasher_mmh3_x64_128_update() -> None:\n    hasher = mmh3.mmh3_x64_128()\n\n    def closure() -> None:\n        for _ in range(1000):\n            hasher.update(b\"foo\")\n\n    run_threaded(closure, num_threads=8)\n\n    assert hasher.sintdigest() == mmh3.hash128(b\"foo\" * 8000, x64arch=True, signed=True)\n\n\ndef test_parallel_hasher_mmh3_x86_128_update() -> None:\n    hasher = mmh3.mmh3_x86_128()\n\n    def closure() -> None:\n        for _ in range(1000):\n            hasher.update(b\"foo\")\n\n    run_threaded(closure, num_threads=8)\n\n    assert hasher.sintdigest() == mmh3.hash128(\n        b\"foo\" * 8000, x64arch=False, signed=True\n    )\n"
  },
  {
    "path": "tests/test_invalid_inputs.py",
    "content": "# pylint: disable=missing-module-docstring, missing-function-docstring\n# pylint: disable=no-value-for-parameter, too-many-function-args\nfrom typing import no_type_check\n\nimport pytest\n\nimport mmh3\n\n\n@no_type_check\ndef test_hash_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.hash()\n    with pytest.raises(TypeError):\n        mmh3.hash(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.hash(b\"hello, world\", 42, True, 1234)\n    with pytest.raises(TypeError):\n        mmh3.hash(b\"hello, world\", seed=\"42\")\n    with pytest.raises(TypeError):\n        mmh3.hash([1, 2, 3], 42)\n    # pylint: disable=redundant-keyword-arg\n    with pytest.raises(TypeError):\n        mmh3.hash(b\"hello, world\", key=b\"42\")\n\n\n@no_type_check\ndef test_hash_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.hash(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.hash(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_hash128_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.hash128()\n    with pytest.raises(TypeError):\n        mmh3.hash128(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.hash128(b\"hello, world\", 42, True, False, 1234)\n    with pytest.raises(TypeError):\n        mmh3.hash128(b\"hello, world\", seed=\"42\")\n    with pytest.raises(TypeError):\n        mmh3.hash128([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_hash128_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.hash128(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.hash128(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_hash64_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.hash64()\n    with pytest.raises(TypeError):\n        mmh3.hash64(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.hash64(b\"hello, world\", 42, True, False, 1234)\n    with pytest.raises(TypeError):\n        mmh3.hash64(b\"hello, world\", seed=\"42\")\n    with pytest.raises(TypeError):\n        mmh3.hash64([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_hash64_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.hash64(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.hash64(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_hash_bytes_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.hash_bytes()\n    with pytest.raises(TypeError):\n        mmh3.hash_bytes(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.hash_bytes(b\"hello, world\", 42, True, 1234)\n    with pytest.raises(TypeError):\n        mmh3.hash_bytes(b\"hello, world\", seed=\"42\")\n    with pytest.raises(TypeError):\n        mmh3.hash_bytes([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_hash_bytes_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.hash_bytes(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.hash_bytes(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_hash_from_buffer_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.hash_from_buffer()\n    with pytest.raises(TypeError):\n        mmh3.hash_from_buffer(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.hash_from_buffer(b\"hello, world\", 42, True, 1234)\n    with pytest.raises(TypeError):\n        mmh3.hash_from_buffer(b\"hello, world\", seed=\"42\")\n    with pytest.raises(TypeError):\n        mmh3.hash_from_buffer([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_hash_from_buffer_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.hash_from_buffer(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.hash_from_buffer(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_32_digest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_digest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_digest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_digest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_digest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_digest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_32_digest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_32_digest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_32_digest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_32_sintdigest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_sintdigest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_sintdigest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_sintdigest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_sintdigest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_sintdigest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_32_sintdigest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_32_sintdigest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_32_sintdigest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_32_uintdigest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_uintdigest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_uintdigest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_uintdigest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_uintdigest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32_uintdigest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_32_uintdigest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_32_uintdigest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_32_uintdigest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x64_128_digest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_digest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_digest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_digest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_digest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_digest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x64_128_digest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128_digest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128_digest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x64_128_sintdigest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_sintdigest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_sintdigest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_sintdigest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_sintdigest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_sintdigest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x64_128_sintdigest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128_sintdigest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128_sintdigest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x64_128_uintdigest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_uintdigest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_uintdigest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_uintdigest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_uintdigest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_uintdigest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x64_128_uintdigest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128_uintdigest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128_uintdigest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x64_128_stupledigest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_stupledigest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_stupledigest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_stupledigest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_stupledigest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_stupledigest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x64_128_stupledigest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128_stupledigest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128_stupledigest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x64_128_utupledigest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_utupledigest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_utupledigest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_utupledigest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_utupledigest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128_utupledigest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x64_128_utupledigest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128_utupledigest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128_utupledigest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x86_128_digest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_digest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_digest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_digest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_digest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_digest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x86_128_digest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128_digest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128_digest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x86_128_sintdigest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_sintdigest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_sintdigest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_sintdigest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_sintdigest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_sintdigest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x86_128_sintdigest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128_sintdigest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128_sintdigest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x86_128_uintdigest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_uintdigest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_uintdigest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_uintdigest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_uintdigest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_uintdigest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x86_128_uintdigest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128_uintdigest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128_uintdigest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x86_128_stupledigest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_stupledigest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_stupledigest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_stupledigest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_stupledigest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_stupledigest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x86_128_stupledigest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128_stupledigest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128_stupledigest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x86_128_utupledigest_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_utupledigest()\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_utupledigest(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_utupledigest(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_utupledigest(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128_utupledigest([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x86_128_utupledigest_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128_utupledigest(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128_utupledigest(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_32_init_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_32([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_32_init_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_32(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_32(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x64_128_init_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x64_128([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x64_128_init_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x64_128(b\"hello, world\", 2**32)\n\n\n@no_type_check\ndef test_mmh3_x86_128_init_raises_typeerror() -> None:\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128(b\"hello, world\", 42, 1234)\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128(\"hello, world\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128(b\"hello, world\", \"42\")\n    with pytest.raises(TypeError):\n        mmh3.mmh3_x86_128([1, 2, 3], 42)\n\n\n@no_type_check\ndef test_mmh3_x86_128_init_raises_valueerror() -> None:\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128(b\"hello, world\", -1)\n    with pytest.raises(ValueError):\n        mmh3.mmh3_x86_128(b\"hello, world\", 2**32)\n"
  },
  {
    "path": "tests/test_mmh3.py",
    "content": "# pylint: disable=missing-module-docstring,missing-function-docstring\nimport sys\n\nimport mmh3\nfrom helper import u32_to_s32\n\n\ndef test_hash() -> None:\n    assert mmh3.hash(\"foo\") == -156908512\n\n    # Test vectors devised by Ian Boyd\n    # https://stackoverflow.com/a/31929528\n    assert mmh3.hash(b\"\", seed=0) == 0\n    assert mmh3.hash(b\"\", seed=1) == 0x514E28B7\n    assert mmh3.hash(b\"\", seed=0xFFFFFFFF) == u32_to_s32(0x81F16F39)\n    assert mmh3.hash(b\"\\x21\\x43\\x65\\x87\", 0) == u32_to_s32(0xF55B516B)\n    assert mmh3.hash(b\"\\x21\\x43\\x65\\x87\", 0x5082EDEE) == u32_to_s32(0x2362F9DE)\n    assert mmh3.hash(b\"\\x21\\x43\\x65\", 0) == u32_to_s32(0x7E4A8634)\n    assert mmh3.hash(b\"\\x21\\x43\", 0) == u32_to_s32(0xA0F7B07A)\n    assert mmh3.hash(b\"\\x21\", 0) == u32_to_s32(0x72661CF4)\n    assert mmh3.hash(b\"\\xff\\xff\\xff\\xff\", 0) == u32_to_s32(0x76293B50)\n    assert mmh3.hash(b\"\\x00\\x00\\x00\\x00\", 0) == u32_to_s32(0x2362F9DE)\n    assert mmh3.hash(b\"\\x00\\x00\\x00\", 0) == u32_to_s32(0x85F0B427)\n    assert mmh3.hash(b\"\\x00\\x00\", 0) == u32_to_s32(0x30F4C306)\n    assert mmh3.hash(b\"\\x00\", 0) == u32_to_s32(0x514E28B7)\n\n    assert mmh3.hash(\"aaaa\", 0x9747B28C) == u32_to_s32(0x5A97808A)\n    assert mmh3.hash(\"aaa\", 0x9747B28C) == u32_to_s32(0x283E0130)\n    assert mmh3.hash(\"aa\", 0x9747B28C) == u32_to_s32(0x5D211726)\n    assert mmh3.hash(\"a\", 0x9747B28C) == u32_to_s32(0x7FA09EA6)\n\n    assert mmh3.hash(\"abcd\", 0x9747B28C) == u32_to_s32(0xF0478627)\n    assert mmh3.hash(\"abc\", 0x9747B28C) == u32_to_s32(0xC84A62DD)\n    assert mmh3.hash(\"ab\", 0x9747B28C) == u32_to_s32(0x74875592)\n    assert mmh3.hash(\"a\", 0x9747B28C) == u32_to_s32(0x7FA09EA6)\n\n    assert mmh3.hash(\"Hello, world!\", 0x9747B28C) == u32_to_s32(0x24884CBA)\n\n    assert mmh3.hash(\"ππππππππ\".encode(), 0x9747B28C) == u32_to_s32(0xD58063C1)\n\n    assert mmh3.hash(\"a\" * 256, 0x9747B28C) == u32_to_s32(0x37405BDC)\n\n    assert mmh3.hash(\"abc\", 0) == u32_to_s32(0xB3DD93FA)\n    assert mmh3.hash(\n        \"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq\", 0\n    ) == u32_to_s32(0xEE925B90)\n\n    assert mmh3.hash(\n        \"The quick brown fox jumps over the lazy dog\", 0x9747B28C\n    ) == u32_to_s32(0x2FA826CD)\n\n\ndef test_hash_unsigned() -> None:\n    assert mmh3.hash(\"foo\", signed=False) == 4138058784\n\n    # Test vectors devised by Ian Boyd\n    # https://stackoverflow.com/a/31929528\n    assert mmh3.hash(b\"\", seed=0, signed=False) == 0\n    assert mmh3.hash(b\"\", seed=1, signed=False) == 0x514E28B7\n    assert mmh3.hash(b\"\", seed=0xFFFFFFFF, signed=False) == 0x81F16F39\n    assert mmh3.hash(b\"\\x21\\x43\\x65\\x87\", 0, signed=False) == 0xF55B516B\n    assert mmh3.hash(b\"\\x21\\x43\\x65\\x87\", 0x5082EDEE, signed=False) == 0x2362F9DE\n    assert mmh3.hash(b\"\\x21\\x43\\x65\", 0, signed=False) == 0x7E4A8634\n    assert mmh3.hash(b\"\\x21\\x43\", 0, signed=False) == 0xA0F7B07A\n    assert mmh3.hash(b\"\\x21\", 0, signed=False) == 0x72661CF4\n    assert mmh3.hash(b\"\\xff\\xff\\xff\\xff\", 0, signed=False) == 0x76293B50\n    assert mmh3.hash(b\"\\x00\\x00\\x00\\x00\", 0, signed=False) == 0x2362F9DE\n    assert mmh3.hash(b\"\\x00\\x00\\x00\", 0, signed=False) == 0x85F0B427\n    assert mmh3.hash(b\"\\x00\\x00\", 0, signed=False) == 0x30F4C306\n    assert mmh3.hash(b\"\\x00\", 0, signed=False) == 0x514E28B7\n\n    assert mmh3.hash(\"aaaa\", 0x9747B28C, signed=False) == 0x5A97808A\n    assert mmh3.hash(\"aaa\", 0x9747B28C, signed=False) == 0x283E0130\n    assert mmh3.hash(\"aa\", 0x9747B28C, signed=False) == 0x5D211726\n    assert mmh3.hash(\"a\", 0x9747B28C, signed=False) == 0x7FA09EA6\n\n    assert mmh3.hash(\"abcd\", 0x9747B28C, signed=False) == 0xF0478627\n    assert mmh3.hash(\"abc\", 0x9747B28C, signed=False) == 0xC84A62DD\n    assert mmh3.hash(\"ab\", 0x9747B28C, signed=False) == 0x74875592\n    assert mmh3.hash(\"a\", 0x9747B28C, signed=False) == 0x7FA09EA6\n\n    assert mmh3.hash(\"Hello, world!\", 0x9747B28C, signed=False) == 0x24884CBA\n\n    assert mmh3.hash(\"ππππππππ\".encode(), 0x9747B28C, signed=False) == 0xD58063C1\n\n    assert mmh3.hash(\"a\" * 256, 0x9747B28C, signed=False) == 0x37405BDC\n\n    assert mmh3.hash(\"abc\", 0, signed=False) == 0xB3DD93FA\n    assert (\n        mmh3.hash(\n            \"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq\", 0, signed=False\n        )\n        == 0xEE925B90\n    )\n\n    assert (\n        mmh3.hash(\n            \"The quick brown fox jumps over the lazy dog\", 0x9747B28C, signed=False\n        )\n        == 0x2FA826CD\n    )\n\n    assert (\n        mmh3.hash(\n            \"The quick brown fox jumps over the lazy dog\", 0x9747B28C, signed=False\n        )\n        == 0x2FA826CD\n    )\n\n\ndef test_hash2() -> None:\n    assert mmh3.hash(\"foo\") == -156908512\n\n    # Test vectors devised by Ian Boyd\n    # https://stackoverflow.com/a/31929528\n    assert mmh3.hash(b\"\", seed=0) == 0\n    assert mmh3.hash(b\"\", seed=1) == 0x514E28B7\n    assert mmh3.hash(b\"\", seed=0xFFFFFFFF) == u32_to_s32(0x81F16F39)\n    assert mmh3.hash(b\"\\x21\\x43\\x65\\x87\", 0) == u32_to_s32(0xF55B516B)\n    assert mmh3.hash(b\"\\x21\\x43\\x65\\x87\", 0x5082EDEE) == u32_to_s32(0x2362F9DE)\n    assert mmh3.hash(b\"\\x21\\x43\\x65\", 0) == u32_to_s32(0x7E4A8634)\n    assert mmh3.hash(b\"\\x21\\x43\", 0) == u32_to_s32(0xA0F7B07A)\n    assert mmh3.hash(b\"\\x21\", 0) == u32_to_s32(0x72661CF4)\n    assert mmh3.hash(b\"\\xff\\xff\\xff\\xff\", 0) == u32_to_s32(0x76293B50)\n    assert mmh3.hash(b\"\\x00\\x00\\x00\\x00\", 0) == u32_to_s32(0x2362F9DE)\n    assert mmh3.hash(b\"\\x00\\x00\\x00\", 0) == u32_to_s32(0x85F0B427)\n    assert mmh3.hash(b\"\\x00\\x00\", 0) == u32_to_s32(0x30F4C306)\n    assert mmh3.hash(b\"\\x00\", 0) == u32_to_s32(0x514E28B7)\n\n    assert mmh3.hash(\"aaaa\", 0x9747B28C) == u32_to_s32(0x5A97808A)\n    assert mmh3.hash(\"aaa\", 0x9747B28C) == u32_to_s32(0x283E0130)\n    assert mmh3.hash(\"aa\", 0x9747B28C) == u32_to_s32(0x5D211726)\n    assert mmh3.hash(\"a\", 0x9747B28C) == u32_to_s32(0x7FA09EA6)\n\n    assert mmh3.hash(\"abcd\", 0x9747B28C) == u32_to_s32(0xF0478627)\n    assert mmh3.hash(\"abc\", 0x9747B28C) == u32_to_s32(0xC84A62DD)\n    assert mmh3.hash(\"ab\", 0x9747B28C) == u32_to_s32(0x74875592)\n    assert mmh3.hash(\"a\", 0x9747B28C) == u32_to_s32(0x7FA09EA6)\n\n    assert mmh3.hash(\"Hello, world!\", 0x9747B28C) == u32_to_s32(0x24884CBA)\n\n    assert mmh3.hash(\"ππππππππ\".encode(), 0x9747B28C) == u32_to_s32(0xD58063C1)\n\n    assert mmh3.hash(\"a\" * 256, 0x9747B28C) == u32_to_s32(0x37405BDC)\n\n    assert mmh3.hash(\"abc\", 0) == u32_to_s32(0xB3DD93FA)\n    assert mmh3.hash(\n        \"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq\", 0\n    ) == u32_to_s32(0xEE925B90)\n\n    assert mmh3.hash(\n        \"The quick brown fox jumps over the lazy dog\", 0x9747B28C\n    ) == u32_to_s32(0x2FA826CD)\n\n\ndef test_hash_from_buffer() -> None:\n    mview = memoryview(b\"foo\")\n    assert mmh3.hash_from_buffer(mview) == -156908512\n    assert mmh3.hash_from_buffer(mview, signed=False) == 4138058784\n\n\ndef test_hash_bytes() -> None:\n    assert mmh3.hash_bytes(\"foo\") == b\"aE\\xf5\\x01W\\x86q\\xe2\\x87}\\xba+\\xe4\\x87\\xaf~\"\n    assert (\n        mmh3.hash_bytes(\"foo\", 0, True)\n        == b\"aE\\xf5\\x01W\\x86q\\xe2\\x87}\\xba+\\xe4\\x87\\xaf~\"\n    )\n\n    # Test vectors from https://github.com/PeterScott/murmur3/blob/master/test.c\n    assert mmh3.hash_bytes(\"Hello, world!\", 123, x64arch=False) == (\n        0x9E37C886A41621625A1AACD761C9129E\n    ).to_bytes(16, \"little\")\n    assert mmh3.hash_bytes(\"\", 123, x64arch=False) == (\n        0x26F3E79926F3E79926F3E799FEDC5245\n    ).to_bytes(16, \"little\")\n\n\ndef test_hash64() -> None:\n    assert mmh3.hash64(\"foo\") == (-2129773440516405919, 9128664383759220103)\n    assert mmh3.hash64(\"foo\", signed=False) == (\n        16316970633193145697,\n        9128664383759220103,\n    )\n\n    assert mmh3.hash64(\"The quick brown fox jumps over the lazy dog\", 0x9747B28C) == (\n        8325606756057297185,\n        -484854449282476315,\n    )\n    assert mmh3.hash64(\n        \"The quick brown fox jumps over the lazy dog\", 0x9747B28C, signed=False\n    ) == (\n        8325606756057297185,\n        17961889624427075301,\n    )\n    assert mmh3.hash64(\"foo\", signed=False, x64arch=True) == (\n        16316970633193145697,\n        9128664383759220103,\n    )\n\n    # Test vectors from https://github.com/PeterScott/murmur3/blob/master/test.c\n    assert mmh3.hash64(\"Hello, world!\", 123, signed=False, x64arch=False) == (\n        0x5A1AACD761C9129E,\n        0x9E37C886A4162162,\n    )\n\n    assert mmh3.hash64(\"\", 123, False, False) == (\n        0x26F3E799FEDC5245,\n        0x26F3E79926F3E799,\n    )\n\n\ndef test_hash128() -> None:\n    assert mmh3.hash128(\"foo\") == 168394135621993849475852668931176482145\n    assert mmh3.hash128(\"foo\", 42) == 215966891540331383248189432718888555506\n    assert (\n        mmh3.hash128(\"foo\", 42, signed=False) == 215966891540331383248189432718888555506\n    )\n    assert (\n        mmh3.hash128(\"foo\", 42, signed=True) == -124315475380607080215185174712879655950\n    )\n    # Test vectors from https://github.com/PeterScott/murmur3/blob/master/test.c\n    assert (\n        mmh3.hash128(\"Hello, world!\", 123, signed=False, x64arch=False)\n        == 0x9E37C886A41621625A1AACD761C9129E\n    )\n    assert mmh3.hash128(\"\", 123, False, False) == 0x26F3E79926F3E79926F3E799FEDC5245\n\n\ndef test_mmh3_32_digest() -> None:\n    assert mmh3.mmh3_32_digest(b\"\") == b\"\\0\\0\\0\\0\"\n    assert mmh3.mmh3_32_digest(b\"\", 0) == b\"\\0\\0\\0\\0\"\n    assert mmh3.mmh3_32_digest(b\"\\x21\\x43\\x65\\x87\", 0) == (0xF55B516B).to_bytes(\n        4, \"little\"\n    )\n    assert mmh3.mmh3_32_digest(b\"\\x21\\x43\\x65\\x87\", u32_to_s32(0x5082EDEE)) == (\n        0x2362F9DE\n    ).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(b\"\\x21\\x43\\x65\", 0) == (0x7E4A8634).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(b\"\\x21\\x43\", 0) == (0xA0F7B07A).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(b\"\\x21\", 0) == (0x72661CF4).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(b\"\\xff\\xff\\xff\\xff\", 0) == (0x76293B50).to_bytes(\n        4, \"little\"\n    )\n    assert mmh3.mmh3_32_digest(b\"\\x00\\x00\\x00\\x00\", 0) == (0x2362F9DE).to_bytes(\n        4, \"little\"\n    )\n    assert mmh3.mmh3_32_digest(b\"\\x00\\x00\\x00\", 0) == (0x85F0B427).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(b\"\\x00\\x00\", 0) == (0x30F4C306).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(b\"\\x00\", 0) == (0x514E28B7).to_bytes(4, \"little\")\n\n    assert mmh3.mmh3_32_digest(b\"aaaa\", 0x9747B28C) == (0x5A97808A).to_bytes(\n        4, \"little\"\n    )\n    assert mmh3.mmh3_32_digest(b\"aaa\", 0x9747B28C) == (0x283E0130).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(b\"aa\", 0x9747B28C) == (0x5D211726).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(b\"a\", 0x9747B28C) == (0x7FA09EA6).to_bytes(4, \"little\")\n\n    assert mmh3.mmh3_32_digest(b\"abcd\", 0x9747B28C) == (0xF0478627).to_bytes(\n        4, \"little\"\n    )\n    assert mmh3.mmh3_32_digest(b\"abc\", 0x9747B28C) == (0xC84A62DD).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(b\"ab\", 0x9747B28C) == (0x74875592).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(b\"a\", 0x9747B28C) == (0x7FA09EA6).to_bytes(4, \"little\")\n\n    assert mmh3.mmh3_32_digest(b\"Hello, world!\", 0x9747B28C) == (0x24884CBA).to_bytes(\n        4, \"little\"\n    )\n\n    assert mmh3.mmh3_32_digest(\"ππππππππ\".encode(), 0x9747B28C) == (\n        0xD58063C1\n    ).to_bytes(4, \"little\")\n\n    assert mmh3.mmh3_32_digest(b\"a\" * 256, 0x9747B28C) == (0x37405BDC).to_bytes(\n        4, \"little\"\n    )\n\n    assert mmh3.mmh3_32_digest(b\"abc\", 0) == (0xB3DD93FA).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(\n        b\"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq\", 0\n    ) == (0xEE925B90).to_bytes(4, \"little\")\n\n    assert mmh3.mmh3_32_digest(\n        b\"The quick brown fox jumps over the lazy dog\", 0x9747B28C\n    ) == (0x2FA826CD).to_bytes(4, \"little\")\n\n    assert mmh3.mmh3_32_digest(bytearray(b\"aaaa\"), 0x9747B28C) == (0x5A97808A).to_bytes(\n        4, \"little\"\n    )\n    v = memoryview(b\"aaaa\")\n    assert mmh3.mmh3_32_digest(v, 0x9747B28C) == (0x5A97808A).to_bytes(4, \"little\")\n    assert mmh3.mmh3_32_digest(v[1:3], 0x9747B28C) == (0x5D211726).to_bytes(4, \"little\")\n\n\ndef test_mmh3_sintdigest() -> None:\n    assert mmh3.mmh3_32_sintdigest(b\"foo\") == -156908512\n    assert mmh3.mmh3_32_sintdigest(bytearray(b\"foo\")) == -156908512\n    assert mmh3.mmh3_32_sintdigest(memoryview(b\"foobar\")[0:3]) == -156908512\n\n    # Test vectors devised by Ian Boyd\n    # https://stackoverflow.com/a/31929528\n    assert mmh3.mmh3_32_sintdigest(b\"\", 0) == 0\n    assert mmh3.mmh3_32_sintdigest(b\"\", 1) == 0x514E28B7\n    assert mmh3.mmh3_32_sintdigest(b\"\", 0xFFFFFFFF) == u32_to_s32(0x81F16F39)\n    assert mmh3.mmh3_32_sintdigest(b\"\\x21\\x43\\x65\\x87\", 0) == u32_to_s32(0xF55B516B)\n    assert mmh3.mmh3_32_sintdigest(\n        b\"\\x21\\x43\\x65\\x87\", u32_to_s32(0x5082EDEE)\n    ) == u32_to_s32(0x2362F9DE)\n    assert mmh3.mmh3_32_sintdigest(b\"\\x21\\x43\\x65\", 0) == u32_to_s32(0x7E4A8634)\n    assert mmh3.mmh3_32_sintdigest(b\"\\x21\\x43\", 0) == u32_to_s32(0xA0F7B07A)\n    assert mmh3.mmh3_32_sintdigest(b\"\\x21\", 0) == u32_to_s32(0x72661CF4)\n    assert mmh3.mmh3_32_sintdigest(b\"\\xff\\xff\\xff\\xff\", 0) == u32_to_s32(0x76293B50)\n    assert mmh3.mmh3_32_sintdigest(b\"\\x00\\x00\\x00\\x00\", 0) == u32_to_s32(0x2362F9DE)\n    assert mmh3.mmh3_32_sintdigest(b\"\\x00\\x00\\x00\", 0) == u32_to_s32(0x85F0B427)\n    assert mmh3.mmh3_32_sintdigest(b\"\\x00\\x00\", 0) == u32_to_s32(0x30F4C306)\n    assert mmh3.mmh3_32_sintdigest(b\"\\x00\", 0) == u32_to_s32(0x514E28B7)\n\n    assert mmh3.mmh3_32_sintdigest(b\"aaaa\", 0x9747B28C) == u32_to_s32(0x5A97808A)\n    assert mmh3.mmh3_32_sintdigest(b\"aaa\", 0x9747B28C) == u32_to_s32(0x283E0130)\n    assert mmh3.mmh3_32_sintdigest(b\"aa\", 0x9747B28C) == u32_to_s32(0x5D211726)\n    assert mmh3.mmh3_32_sintdigest(b\"a\", 0x9747B28C) == u32_to_s32(0x7FA09EA6)\n\n    assert mmh3.mmh3_32_sintdigest(b\"abcd\", 0x9747B28C) == u32_to_s32(0xF0478627)\n    assert mmh3.mmh3_32_sintdigest(b\"abc\", 0x9747B28C) == u32_to_s32(0xC84A62DD)\n    assert mmh3.mmh3_32_sintdigest(b\"ab\", 0x9747B28C) == u32_to_s32(0x74875592)\n    assert mmh3.mmh3_32_sintdigest(b\"a\", 0x9747B28C) == u32_to_s32(0x7FA09EA6)\n\n    assert mmh3.mmh3_32_sintdigest(b\"Hello, world!\", 0x9747B28C) == u32_to_s32(\n        0x24884CBA\n    )\n\n    assert mmh3.mmh3_32_sintdigest(\"ππππππππ\".encode(), 0x9747B28C) == u32_to_s32(\n        0xD58063C1\n    )\n\n    assert mmh3.mmh3_32_sintdigest(b\"a\" * 256, 0x9747B28C) == u32_to_s32(0x37405BDC)\n\n    assert mmh3.mmh3_32_sintdigest(b\"abc\", 0) == u32_to_s32(0xB3DD93FA)\n    assert mmh3.mmh3_32_sintdigest(\n        b\"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq\", 0\n    ) == u32_to_s32(0xEE925B90)\n\n    assert mmh3.mmh3_32_sintdigest(\n        b\"The quick brown fox jumps over the lazy dog\", 0x9747B28C\n    ) == u32_to_s32(0x2FA826CD)\n\n\ndef test_mmh3_uintdigest() -> None:\n    assert mmh3.mmh3_32_uintdigest(b\"foo\") == 4138058784\n    assert mmh3.mmh3_32_uintdigest(bytearray(b\"foo\")) == 4138058784\n    assert mmh3.mmh3_32_uintdigest(memoryview(b\"foobar\")[0:3]) == 4138058784\n\n    # Test vectors devised by Ian Boyd\n    # https://stackoverflow.com/a/31929528\n    assert mmh3.mmh3_32_uintdigest(b\"\") == 0\n    assert mmh3.mmh3_32_uintdigest(b\"\", 0) == 0\n    assert mmh3.mmh3_32_uintdigest(b\"\", 1) == 0x514E28B7\n    assert mmh3.mmh3_32_uintdigest(b\"\", 0xFFFFFFFF) == 0x81F16F39\n    assert mmh3.mmh3_32_uintdigest(b\"\\x21\\x43\\x65\\x87\", 0) == 0xF55B516B\n    assert mmh3.mmh3_32_uintdigest(b\"\\x21\\x43\\x65\\x87\", 0x5082EDEE) == 0x2362F9DE\n    assert mmh3.mmh3_32_uintdigest(b\"\\x21\\x43\\x65\", 0) == 0x7E4A8634\n    assert mmh3.mmh3_32_uintdigest(b\"\\x21\\x43\", 0) == 0xA0F7B07A\n    assert mmh3.mmh3_32_uintdigest(b\"\\x21\", 0) == 0x72661CF4\n    assert mmh3.mmh3_32_uintdigest(b\"\\xff\\xff\\xff\\xff\", 0) == 0x76293B50\n    assert mmh3.mmh3_32_uintdigest(b\"\\x00\\x00\\x00\\x00\", 0) == 0x2362F9DE\n    assert mmh3.mmh3_32_uintdigest(b\"\\x00\\x00\\x00\", 0) == 0x85F0B427\n    assert mmh3.mmh3_32_uintdigest(b\"\\x00\\x00\", 0) == 0x30F4C306\n    assert mmh3.mmh3_32_uintdigest(b\"\\x00\", 0) == 0x514E28B7\n\n    assert mmh3.mmh3_32_uintdigest(b\"aaaa\", 0x9747B28C) == 0x5A97808A\n    assert mmh3.mmh3_32_uintdigest(b\"aaa\", 0x9747B28C) == 0x283E0130\n    assert mmh3.mmh3_32_uintdigest(b\"aa\", 0x9747B28C) == 0x5D211726\n    assert mmh3.mmh3_32_uintdigest(b\"a\", 0x9747B28C) == 0x7FA09EA6\n\n    assert mmh3.mmh3_32_uintdigest(b\"abcd\", 0x9747B28C) == 0xF0478627\n    assert mmh3.mmh3_32_uintdigest(b\"abc\", 0x9747B28C) == 0xC84A62DD\n    assert mmh3.mmh3_32_uintdigest(b\"ab\", 0x9747B28C) == 0x74875592\n    assert mmh3.mmh3_32_uintdigest(b\"a\", 0x9747B28C) == 0x7FA09EA6\n\n    assert mmh3.mmh3_32_uintdigest(b\"Hello, world!\", 0x9747B28C) == 0x24884CBA\n\n    assert mmh3.mmh3_32_uintdigest(\"ππππππππ\".encode(), 0x9747B28C) == 0xD58063C1\n\n    assert mmh3.mmh3_32_uintdigest(b\"a\" * 256, 0x9747B28C) == 0x37405BDC\n\n    assert mmh3.mmh3_32_uintdigest(b\"abc\", 0) == 0xB3DD93FA\n    assert (\n        mmh3.mmh3_32_uintdigest(\n            b\"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq\", 0\n        )\n        == 0xEE925B90\n    )\n\n    assert (\n        mmh3.mmh3_32_uintdigest(\n            b\"The quick brown fox jumps over the lazy dog\", 0x9747B28C\n        )\n        == 0x2FA826CD\n    )\n\n    assert (\n        mmh3.mmh3_32_uintdigest(\n            b\"The quick brown fox jumps over the lazy dog\", 0x9747B28C\n        )\n        == 0x2FA826CD\n    )\n\n\ndef test_mmh3_x64_128_digest() -> None:\n    assert (\n        mmh3.mmh3_x64_128_digest(b\"foo\")\n        == b\"aE\\xf5\\x01W\\x86q\\xe2\\x87}\\xba+\\xe4\\x87\\xaf~\"\n    )\n\n    assert (\n        mmh3.mmh3_x64_128_digest(\n            b\"The quick brown fox jumps over the lazy dog\", 0x9747B28C\n        )\n        == b\"!1c\\xd2;\\x7f\\x8as\\xe5\\x16\\xc0~rsE\\xf9\"\n    )\n\n    v = bytearray(b\"bar boo bar\")\n    mv = memoryview(v)\n    v[4] = ord(\"f\")\n\n    assert (\n        mmh3.mmh3_x64_128_digest(mv[4:7])\n        == b\"aE\\xf5\\x01W\\x86q\\xe2\\x87}\\xba+\\xe4\\x87\\xaf~\"\n    )\n\n\ndef test_mmh3_x64_128_sintdigest() -> None:\n    assert mmh3.mmh3_x64_128_sintdigest(b\"\") == 0\n\n    assert (\n        mmh3.mmh3_x64_128_sintdigest(\n            b\"The quick brown fox jumps over the lazy dog\", 0x9747B28C\n        )\n        == -8943985938913228316176695348732677855\n    )\n\n\ndef test_mmh3_x64_128_uintdigest() -> None:\n    assert mmh3.mmh3_x64_128_uintdigest(b\"\") == 0\n\n    assert (\n        mmh3.mmh3_x64_128_uintdigest(b\"foo\", 42)\n        == 215966891540331383248189432718888555506\n    )\n\n\ndef test_mmh3_x64_128_stupledigest() -> None:\n    assert mmh3.mmh3_x64_128_stupledigest(b\"\") == (0, 0)\n\n    assert mmh3.mmh3_x64_128_stupledigest(\n        memoryview(b\"The quick brown fox jumps over the lazy dog\"), 0x9747B28C\n    ) == (\n        8325606756057297185,\n        -484854449282476315,\n    )\n\n\ndef test_mmh3_x64_128_utupledigest() -> None:\n    assert mmh3.mmh3_x64_128_utupledigest(b\"\") == (0, 0)\n\n    assert mmh3.mmh3_x64_128_utupledigest(memoryview(b\"foo\")) == (\n        16316970633193145697,\n        9128664383759220103,\n    )\n\n\ndef test_mmh3_x86_128_digest() -> None:\n    assert mmh3.mmh3_x86_128_digest(b\"\", 123) == (\n        0x26F3E79926F3E79926F3E799FEDC5245\n    ).to_bytes(16, \"little\")\n\n    assert mmh3.mmh3_x86_128_digest(b\"Hello, world!\", 123) == (\n        0x9E37C886A41621625A1AACD761C9129E\n    ).to_bytes(16, \"little\")\n\n    assert mmh3.mmh3_x86_128_digest(bytearray(b\"Hello, world!\"), 123) == (\n        0x9E37C886A41621625A1AACD761C9129E\n    ).to_bytes(16, \"little\")\n\n    v = bytearray(b\"hello, world!!!\")\n    mv = memoryview(v)\n    v[0] = ord(\"H\")\n\n    assert mmh3.mmh3_x86_128_digest(mv[0:13], 123) == (\n        0x9E37C886A41621625A1AACD761C9129E\n    ).to_bytes(16, \"little\")\n\n\ndef test_mmh3_x86_128_sintdigest() -> None:\n    assert mmh3.mmh3_x64_128_sintdigest(b\"\") == 0\n\n    assert (\n        mmh3.mmh3_x64_128_sintdigest(\n            b\"The quick brown fox jumps over the lazy dog\", 0x9747B28C\n        )\n        == -8943985938913228316176695348732677855\n    )\n\n\ndef test_mmh3_x86_128_uintdigest() -> None:\n    assert mmh3.mmh3_x64_128_uintdigest(b\"\", 0) == 0\n\n    # Test vector from https://github.com/PeterScott/murmur3/blob/master/test.c\n    assert (\n        mmh3.mmh3_x86_128_uintdigest(b\"Hello, world!\", 123)\n        == 0x9E37C886A41621625A1AACD761C9129E\n    )\n\n\ndef test_mmh3_x86_128_stupledigest() -> None:\n    assert mmh3.mmh3_x86_128_stupledigest(b\"\", 0) == (0, 0)\n\n    assert mmh3.mmh3_x86_128_stupledigest(\n        memoryview(b\"The quick brown fox jumps over the lazy dog\"), 0x9747B28C\n    ) == (\n        5528275682885686622,\n        -3623575540584727908,\n    )\n\n\ndef test_mmh3_x86_128_utupledigest() -> None:\n    assert mmh3.mmh3_x86_128_utupledigest(b\"\", 0) == (0, 0)\n\n    # Test vector from https://github.com/PeterScott/murmur3/blob/master/test.c\n    assert mmh3.mmh3_x86_128_utupledigest(memoryview(b\"Hello, world!\"), 123) == (\n        0x5A1AACD761C9129E,\n        0x9E37C886A4162162,\n    )\n\n\ndef test_64bit() -> None:\n    if sys.maxsize < (1 << 32):  # Skip this test under 32-bit environments\n        return\n    a = bytes(2**32 + 1)\n    assert mmh3.hash(a) == -1710109261\n    assert (\n        mmh3.hash_bytes(a) == b\"\\x821\\x93\\x0c\\xe7\\xa8\\x02\\x9d\\xe5 \\xa6\\xf9\\xeb8\\xd6\\x0e\"\n    )\n\n\n# from hex string \"0xff00de\" to integer\ndef hex_to_int(hex_str: str) -> int:\n    return int(hex_str, 16)\n"
  },
  {
    "path": "tests/test_mmh3_hasher.py",
    "content": "# pylint: disable=missing-module-docstring,missing-function-docstring\nimport mmh3\nfrom helper import u32_to_s32\n\n\ndef test_mmh3_32_digest() -> None:\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"\")\n    assert hasher.digest() == b\"\\x00\\x00\\x00\\x00\"\n\n    # Test vectors devised by Ian Boyd\n    # https://stackoverflow.com/a/31929528\n    hasher = mmh3.mmh3_32(seed=0x9747B28C)\n    hasher.update(b\"Hello, world!\")\n    assert hasher.digest() == b\"\\xba\\x4c\\x88\\x24\"\n\n    hasher = mmh3.mmh3_32(seed=0x9747B28C)\n    hasher.update(b\"Hello,\")\n    hasher.update(b\" world!\")\n    assert hasher.digest() == b\"\\xba\\x4c\\x88\\x24\"\n\n    hasher = mmh3.mmh3_32(b\"\", 0x9747B28C)\n    hasher.update(b\"Hello,\")\n    hasher.update(b\" world!\")\n    assert hasher.digest() == b\"\\xba\\x4c\\x88\\x24\"\n\n    hasher = mmh3.mmh3_32(b\"Hello,\", 0x9747B28C)\n    hasher.update(b\" world!\")\n    assert hasher.digest() == b\"\\xba\\x4c\\x88\\x24\"\n\n    hasher = mmh3.mmh3_32(b\"Hello,\", seed=0x9747B28C)\n    hasher.update(b\" world!\")\n    assert hasher.digest() == b\"\\xba\\x4c\\x88\\x24\"\n\n\ndef test_mmh3_32_sintdigest() -> None:\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"foo\")\n    assert hasher.sintdigest() == -156908512\n\n    # Test vectors devised by Ian Boyd\n    # https://stackoverflow.com/a/31929528\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"\")\n    assert hasher.sintdigest() == 0\n\n    hasher = mmh3.mmh3_32(seed=1)\n    hasher.update(b\"\")\n    assert hasher.sintdigest() == 0x514E28B7\n\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"\\x21\\x43\")\n    hasher.update(b\"\\x65\")\n    assert hasher.sintdigest() == u32_to_s32(0x7E4A8634)\n\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"\\x21\\x43\\x65\\x87\")\n    assert hasher.sintdigest() == u32_to_s32(0xF55B516B)\n\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"\\x21\\x43\")\n    hasher.update(b\"\\x65\\x87\")\n    assert hasher.sintdigest() == u32_to_s32(0xF55B516B)\n\n    hasher = mmh3.mmh3_32(seed=0x9747B28C)\n    hasher.update(b\"Hello, world!\")\n    assert hasher.sintdigest() == u32_to_s32(0x24884CBA)\n\n    hasher = mmh3.mmh3_32(seed=0x9747B28C)\n    hasher.update(b\"Hello,\")\n    hasher.update(b\" world!\")\n    assert hasher.sintdigest() == u32_to_s32(0x24884CBA)\n\n    hasher = mmh3.mmh3_32(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fo\")\n    hasher.update(b\"x jumps over the lazy dog\")\n    assert hasher.sintdigest() == u32_to_s32(0x2FA826CD)\n\n\ndef test_mmh3_32_uintdigest() -> None:\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"foo\")\n    assert hasher.uintdigest() == 4138058784\n\n    # Test vectors devised by Ian Boyd\n    # https://stackoverflow.com/a/31929528\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"\")\n    assert hasher.uintdigest() == 0\n\n    hasher = mmh3.mmh3_32(seed=1)\n    hasher.update(b\"\")\n    assert hasher.uintdigest() == 0x514E28B7\n\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"\\x21\\x43\")\n    hasher.update(b\"\\x65\")\n    assert hasher.uintdigest() == 0x7E4A8634\n\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"\\x21\\x43\\x65\\x87\")\n    assert hasher.uintdigest() == 0xF55B516B\n\n    hasher = mmh3.mmh3_32()\n    hasher.update(b\"\\x21\\x43\")\n    hasher.update(b\"\\x65\\x87\")\n    assert hasher.uintdigest() == 0xF55B516B\n\n    hasher = mmh3.mmh3_32(seed=0x9747B28C)\n    hasher.update(b\"Hello, world!\")\n    assert hasher.uintdigest() == 0x24884CBA\n\n    hasher = mmh3.mmh3_32(seed=0x9747B28C)\n    hasher.update(b\"Hello,\")\n    hasher.update(b\" world!\")\n    assert hasher.uintdigest() == 0x24884CBA\n\n    hasher = mmh3.mmh3_32(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fo\")\n    hasher.update(b\"x jumps over the lazy dog\")\n    assert hasher.uintdigest() == 0x2FA826CD\n\n\ndef test_mmh3_32_copy() -> None:\n    hasher = mmh3.mmh3_32(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox\")\n\n    hasher2 = hasher.copy()\n\n    hasher.update(b\" jumps over the lazy dog\")\n    assert hasher.uintdigest() == 0x2FA826CD\n\n    hasher2.update(b\" jumps over the lazy dog\")\n    assert hasher2.uintdigest() == 0x2FA826CD\n\n\ndef test_mmh3_x64_128_basic_ops() -> None:\n    hasher = mmh3.mmh3_x64_128()\n    assert hasher.digest_size == 16\n    assert hasher.block_size == 32\n    assert hasher.name == \"mmh3_x64_128\"\n\n\ndef test_mmh3_x64_128_digest() -> None:\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"foo\")\n    assert hasher.digest() == b\"aE\\xf5\\x01W\\x86q\\xe2\\x87}\\xba+\\xe4\\x87\\xaf~\"\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.digest() == b\"!1c\\xd2;\\x7f\\x8as\\xe5\\x16\\xc0~rsE\\xf9\"\n\n    hasher = mmh3.mmh3_x64_128(b\"\", 0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.digest() == b\"!1c\\xd2;\\x7f\\x8as\\xe5\\x16\\xc0~rsE\\xf9\"\n\n    hasher = mmh3.mmh3_x64_128(b\"The quick brown \", seed=0x9747B28C)\n    hasher.update(b\"fox jumps over the lazy dog\")\n    assert hasher.digest() == b\"!1c\\xd2;\\x7f\\x8as\\xe5\\x16\\xc0~rsE\\xf9\"\n\n    hasher = mmh3.mmh3_x64_128(b\"The quick brown \", 0x9747B28C)\n    hasher.update(b\"fox jumps over the lazy dog\")\n    assert hasher.digest() == b\"!1c\\xd2;\\x7f\\x8as\\xe5\\x16\\xc0~rsE\\xf9\"\n\n\ndef test_mmh3_x64_128_sintdigest() -> None:\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"\")\n    assert hasher.sintdigest() == 0\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.sintdigest() == -8943985938913228316176695348732677855\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox j\")\n    hasher.update(b\"umps over the lazy dog\")\n    assert hasher.sintdigest() == -8943985938913228316176695348732677855\n\n\ndef test_mmh3_x64_128_uintdigest() -> None:\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"\")\n    assert hasher.uintdigest() == 0\n\n    hasher = mmh3.mmh3_x64_128(seed=1)\n    hasher.update(b\"\")\n    assert hasher.uintdigest() == 108177238965372658051732455265379769525\n\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"foo\")\n    assert hasher.uintdigest() == 168394135621993849475852668931176482145\n\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"fo\")\n    hasher.update(b\"o\")\n    assert hasher.uintdigest() == 168394135621993849475852668931176482145\n\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"fooo\")\n    assert hasher.uintdigest() == 93757880664175803030724836966881520758\n\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"fooofooo\")\n    assert hasher.uintdigest() == 211983152696995059280678248292944636041\n\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"fooo\")\n    hasher.update(b\"fooo\")\n    assert hasher.uintdigest() == 211983152696995059280678248292944636041\n\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"fooofoooo\")\n    assert hasher.uintdigest() == 338423359992422647011971677127905553798\n\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"fooo\")\n    hasher.update(b\"foooo\")\n    assert hasher.uintdigest() == 338423359992422647011971677127905553798\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.uintdigest() == 331338380982025235147197912083035533601\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"T\")\n    hasher.update(b\"he quick brown fox jumps over the lazy dog\")\n    assert hasher.uintdigest() == 331338380982025235147197912083035533601\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quic\")  # 8 bytes\n    hasher.update(b\"k brown fox jumps over the lazy dog\")\n    assert hasher.uintdigest() == 331338380982025235147197912083035533601\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quick\")\n    hasher.update(b\" brown fox jumps over the lazy dog\")\n    assert hasher.uintdigest() == 331338380982025235147197912083035533601\n\n\ndef test_mmh3_x64_128_stupledigest() -> None:\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"\")\n    assert hasher.stupledigest() == (0, 0)\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.stupledigest() == (8325606756057297185, -484854449282476315)\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quic\")\n    hasher.update(b\"k brown fox jumps over the lazy dog\")\n    assert hasher.stupledigest() == (8325606756057297185, -484854449282476315)\n\n\ndef test_mmh3_x64_128_utupledigest() -> None:\n    hasher = mmh3.mmh3_x64_128()\n    hasher.update(b\"\")\n    assert hasher.utupledigest() == (0, 0)\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.utupledigest() == (8325606756057297185, 17961889624427075301)\n\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quic\")\n    hasher.update(b\"k brown fox jumps over the lazy dog\")\n    assert hasher.utupledigest() == (8325606756057297185, 17961889624427075301)\n\n\ndef test_mmh3_x64_128_copy() -> None:\n    hasher = mmh3.mmh3_x64_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox\")\n\n    hasher2 = hasher.copy()\n\n    hasher.update(b\" jumps over the lazy dog\")\n    assert hasher.digest() == b\"!1c\\xd2;\\x7f\\x8as\\xe5\\x16\\xc0~rsE\\xf9\"\n\n    hasher2.update(b\" jumps over the lazy dog\")\n    assert hasher2.digest() == b\"!1c\\xd2;\\x7f\\x8as\\xe5\\x16\\xc0~rsE\\xf9\"\n\n\ndef test_mmh3_x86_128_basic_ops() -> None:\n    hasher = mmh3.mmh3_x86_128()\n    assert hasher.digest_size == 16\n    assert hasher.block_size == 32\n    assert hasher.name == \"mmh3_x86_128\"\n\n\ndef test_mmh3_x86_128_digest() -> None:\n    hasher = mmh3.mmh3_x86_128()\n    hasher.update(b\"\")\n    assert (\n        hasher.digest()\n        == b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n    )\n\n    hasher = mmh3.mmh3_x86_128(seed=1)\n    hasher.update(b\"\")\n    assert hasher.digest() == b\"\\xec\\xad\\xc4\\x88\\xb9\\x01\\xd2T\\xb9\\x01\\xd2T\\xb9\\x01\\xd2T\"\n\n    hasher = mmh3.mmh3_x86_128()\n    hasher.update(b\"foo\")\n    assert hasher.digest() == b\"%\\x1b|We%\\xb6`e%\\xb6`e%\\xb6`\"\n\n    hasher = mmh3.mmh3_x86_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown\")  # 15 bytes\n    assert (\n        hasher.digest() == b\"2\\xc3\\n\\xdaW\\xc2\\xcb\\xa9\\xc4\\xbe\\x12\\xb9\\xdc\\x01\\xe1\\x8e\"\n    )\n\n    hasher = mmh3.mmh3_x86_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown \")  # 16 bytes\n    assert hasher.digest() == b\"u\\xb6\\xf9\\x07\\xf5|\\x93,\\x0e\\xf5\\xf1\\xf0k\\x98\\x83\\x19\"\n\n    hasher = mmh3.mmh3_x86_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown\")  # 15 bytes\n    hasher.update(b\" fox jumps over the lazy dog\")\n    assert hasher.digest() == b\"^\\xd5\\xd4\\x8aqa\\xb8L\\x9c:\\xa7\\x8e>y\\xb6\\xcd\"\n\n    hasher = mmh3.mmh3_x86_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown \")  # 16 bytes\n    hasher.update(b\"fox jumps over the lazy dog\")\n    assert hasher.digest() == b\"^\\xd5\\xd4\\x8aqa\\xb8L\\x9c:\\xa7\\x8e>y\\xb6\\xcd\"\n\n    hasher = mmh3.mmh3_x86_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.digest() == b\"^\\xd5\\xd4\\x8aqa\\xb8L\\x9c:\\xa7\\x8e>y\\xb6\\xcd\"\n\n    hasher = mmh3.mmh3_x86_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox ju\")\n    hasher.update(b\"mps ove\")\n    hasher.update(b\"r the la\")\n    hasher.update(b\"zy dog\")\n    assert hasher.digest() == b\"^\\xd5\\xd4\\x8aqa\\xb8L\\x9c:\\xa7\\x8e>y\\xb6\\xcd\"\n\n    hasher = mmh3.mmh3_x86_128(b\"\", 0x9747B28C)\n    hasher.update(b\"The quick brown fox ju\")\n    hasher.update(b\"mps ove\")\n    hasher.update(b\"r the la\")\n    hasher.update(b\"zy dog\")\n    assert hasher.digest() == b\"^\\xd5\\xd4\\x8aqa\\xb8L\\x9c:\\xa7\\x8e>y\\xb6\\xcd\"\n\n    hasher = mmh3.mmh3_x86_128(b\"The quick brown fox ju\", seed=0x9747B28C)\n    hasher.update(b\"mps ove\")\n    hasher.update(b\"r the la\")\n    hasher.update(b\"zy dog\")\n    assert hasher.digest() == b\"^\\xd5\\xd4\\x8aqa\\xb8L\\x9c:\\xa7\\x8e>y\\xb6\\xcd\"\n\n    hasher = mmh3.mmh3_x86_128(b\"The quick brown fox ju\", 0x9747B28C)\n    hasher.update(b\"mps ove\")\n    hasher.update(b\"r the la\")\n    hasher.update(b\"zy dog\")\n    assert hasher.digest() == b\"^\\xd5\\xd4\\x8aqa\\xb8L\\x9c:\\xa7\\x8e>y\\xb6\\xcd\"\n\n\ndef test_mmh3_x86_128_sintdigest() -> None:\n    hasher = mmh3.mmh3_x86_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.sintdigest() == -66843170628920214366208380873156012706\n\n\ndef test_mmh3_x86_128_uintdigest() -> None:\n    hasher = mmh3.mmh3_x86_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.uintdigest() == 273439196292018249097166226558612198750\n\n\ndef test_mmh3_x86_128_stupledigest() -> None:\n    hasher = mmh3.mmh3_x86_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.stupledigest() == (5528275682885686622, -3623575540584727908)\n\n\ndef test_mmh3_x86_128_utupledigest() -> None:\n    hasher = mmh3.mmh3_x86_128(seed=0x9747B28C)\n    hasher.update(b\"The quick brown fox jumps over the lazy dog\")\n    assert hasher.utupledigest() == (5528275682885686622, 14823168533124823708)\n"
  },
  {
    "path": "tox.ini",
    "content": "[tox]\nrequires =\n    tox>=4\nenvlist = lint, type, py{310,311,312,313,314,314t}\n\n[testenv]\ndescription = run unit tests\ncommands_pre =\n    uv pip install \".[test]\"\ncommands =\n    pytest {posargs}\n\n[testenv:lint]\ndescription = run linters with formatting\nskip_install = true\nallowlist_externals =\n    find\n    npx\ncommands_pre =\n    uv pip install \".[lint]\"\ncommands =\n    ruff format .\n    ruff check --fix .\n    find ./src/mmh3 -name '*.[ch]' -exec clang-format -i {} +\n    npx prettier --write .\n    pylint --recursive=y .\n    npx markdownlint --config .markdown-lint.yml \\\n      --ignore-path .gitignore **/*.md\n    codespell\n    actionlint\n\n[testenv:type]\ndescription = run type checks\ncommands_pre =\n    uv pip install \".[test,type]\"\ncommands =\n    mypy --strict tests\n\n[testenv:docs]\ndescription = run documentation build\nallowlist_externals =\n    make\ncommands_pre =\n    uv pip install \".[docs]\"\ncommands =\n    make -C docs clean\n    make -C docs html\n\n[testenv:build_cfiles]\nallowlist_externals =\n    find\n    git\ncommands_pre =\n    uv pip install \".[lint]\"\ncommands =\n    git submodule update --init\n    python util/refresh.py\n    find ./src/mmh3 -name '*.[ch]' -exec clang-format -i {} +\n\n[testenv:benchmark]\ndescription = run benchmarks\ncommands_pre =\n    uv pip install \".[benchmark]\"\ncommands =\n    python benchmark/benchmark.py {posargs}\n\n[testenv:plot]\ndescription = plot benchmark results\ncommands_pre =\n    uv pip install \".[benchmark,plot]\"\ncommands =\n    python benchmark/plot_graph.py {posargs}\n"
  },
  {
    "path": "util/FILE_HEADER",
    "content": "/***\n * This file is under MIT <year> Hajime Senuma, just like other files.\n * See LICENSE for details.\n *\n * It was originally written by Austin Appleby in C++ under the public domain,\n * but ported to PEP 7 C for Python 3.6 and later by the mmh3 project.\n *\n * Any issues should be reported to https://github.com/hajimes/mmh3/issues.\n *\n * The following is the original public domain notice by Austin Appleby.\n */\n\n//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the public\n// domain. The author hereby disclaims copyright to this source code."
  },
  {
    "path": "util/refresh.py",
    "content": "# pylint: disable=missing-function-docstring\n\"\"\"A script to generate Murmurhash3 C files from the original C++ source.\"\"\"\n\n# For forward references\nfrom __future__ import annotations\n\nimport os\nimport re\nimport textwrap\nfrom collections.abc import Callable\n\n###\n# Simple classes to handle the transformation of the original code.\n#\n\n\nclass MMH3Source:\n    \"\"\"A data class to represent the original source code of MurmurHash3.\n\n    Lines to be retrieved are hard-coded, as the original code is effectively frozen.\n    \"\"\"\n\n    def __init__(self, code: str) -> None:\n        self._code_lines = code.split(\"\\n\")\n\n    @property\n    def note_comment(self) -> str:\n        return \"\\n\".join(self._code_lines[4:8])\n\n    @property\n    def header_include(self) -> str:\n        return \"\\n\".join(self._code_lines[9:10])\n\n    @property\n    def macros(self) -> str:\n        return \"\\n\".join(self._code_lines[11:50])\n\n    @property\n    def getblock_functions(self) -> str:\n        return \"\\n\".join(self._code_lines[50:64])\n\n    @property\n    def finalization_mixes(self) -> str:\n        return \"\\n\".join(self._code_lines[64:91])\n\n    @property\n    def body(self) -> str:\n        return \"\\n\".join(self._code_lines[91:336])\n\n    @property\n    def finalization_x86_128(self) -> str:\n        return \"\\n\".join(self._code_lines[233:246])\n\n    @property\n    def finalization_x64_128(self) -> str:\n        return \"\\n\".join(self._code_lines[318:329])\n\n    @property\n    def constants_x86_128(self) -> str:\n        return \"\\n\".join(self._code_lines[160:164])\n\n    @property\n    def constants_x64_128(self) -> str:\n        return \"\\n\".join(self._code_lines[263:265])\n\n\nclass MMH3Header:\n    \"\"\"A data class to represent the original header code of MurmurHash3.\n\n    Lines to be retrieved are hard-coded, as the original code is effectively frozen.\n    \"\"\"\n\n    def __init__(self, code: str) -> None:\n        self._code_lines = code.split(\"\\n\")\n\n    @property\n    def header_guards_begin(self) -> str:\n        return \"\\n\".join(self._code_lines[4:7])\n\n    @property\n    def stdint(self) -> str:\n        return \"\\n\".join(self._code_lines[7:26])\n\n    @property\n    def declarations(self) -> str:\n        return \"\\n\".join(self._code_lines[26:36])\n\n    @property\n    def header_guards_end(self) -> str:\n        return \"\\n\".join(self._code_lines[36:37])\n\n\nclass MMH3CodeBuilder:\n    \"\"\"A builder class to generate the new MurmurHash3 C code.\"\"\"\n\n    def __init__(self) -> None:\n        self._code: list[tuple[str, list[Callable[[str], str]]]] = []\n\n    def add(\n        self, subcode: str, transforms: list[Callable[[str], str]] | None = None\n    ) -> MMH3CodeBuilder:\n        if transforms is None:\n            transforms = []\n\n        self._code.append((subcode, transforms))\n        return self\n\n    def build(self) -> str:\n        new_code = \"\"\n\n        for subcode, transforms in self._code:\n            for tr in transforms:\n                subcode = tr(subcode)\n            new_code += subcode + \"\\n\\n\"\n\n        return new_code\n\n\n###\n# The following functions are used to transform the original MurmurHash3 code.\n#\n\n\ndef append_python_directives(subcode: str) -> str:\n    \"\"\"Append Python.h, as well as a macro definition to handle 64-bit data.\n\n    Args:\n        subcode (str): The code to be appended.\n\n    Returns:\n        str: The appended code.\n    \"\"\"\n    subcode += \"\\n\\n\"\n\n    subcode += textwrap.dedent(\"\"\"\\\n        // To handle 64-bit data; see https://docs.python.org/3/c-api/arg.html\n        #ifndef PY_SSIZE_T_CLEAN\n        #define PY_SSIZE_T_CLEAN\n        #endif\n        #include <Python.h>\n        \"\"\")\n\n    return subcode\n\n\ndef append_byteswap_header(subcode: str) -> str:\n    \"\"\"Append a header to the code that includes byteswap.h if the system is big endian.\n\n    Args:\n        subcode (str): The code to be appended.\n\n    Returns:\n        str: The appended code.\n    \"\"\"\n    subcode += \"\\n\"\n\n    subcode += textwrap.dedent(\"\"\"\\\n        #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n        #include <byteswap.h>\n        #endif\n        \"\"\")\n\n    return subcode\n\n\ndef introduce_py_ssize_t(subcode: str) -> str:\n    \"\"\"Use Py_ssize_t instead of int as the index type.\n\n    Py_ssize_t is the type used by Python to represent the size of objects.\n    It is required to handle 64-bit data in Python extensions.\n\n    See https://docs.python.org/3/c-api/intro.html#c.Py_ssize_t\n    and\n    https://peps.python.org/pep-0353/\n\n    Args:\n        subcode (str): The code to be transformed.\n\n    Returns:\n        str: The transformed code.\n    \"\"\"\n    transformations = [\n        [\"int len\", \"Py_ssize_t len\"],\n        [\"const int nblocks\", \"const Py_ssize_t nblocks\"],\n        [\"for(int i\", \"for(Py_ssize_t i\"],\n    ]\n\n    for tr in transformations:\n        subcode = subcode.replace(tr[0], tr[1])\n\n    return subcode\n\n\ndef transform_getblocks(subcode: str) -> str:\n    \"\"\"Revise getblock functions so that it handles big endian and 64-bit data.\n\n    Args:\n        subcode (str): The code to be transformed.\n\n    Returns:\n        str: The transformed code.\n    \"\"\"\n    # pylint: disable=invalid-name\n\n    transformations = [\n        [\"FORCE_INLINE\", \"static FORCE_INLINE\"],\n        [\"int i\", \"Py_ssize_t i\"],\n    ]\n\n    for tr in transformations:\n        subcode = subcode.replace(tr[0], tr[1])\n\n    BYTE_SWAP_IF_BIG_ENDIAN = textwrap.dedent(\"\"\"\\\n        #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n            return bswap_\\\\1(p[i]);\n        #else\n            return p[i];\n        #endif\n        \"\"\")\n\n    subcode = re.sub(\n        r\"getblock(.*?)(\\s\\(.*?\\{\\n).*?\\}\",\n        \"getblock\\\\1\\\\2\" + BYTE_SWAP_IF_BIG_ENDIAN + \"}\",\n        subcode,\n        flags=re.DOTALL | re.MULTILINE,\n    )\n\n    return subcode\n\n\ndef transform_finalization_mixes(subcode: str) -> str:\n    \"\"\"Revise the finalization operations in MurmurHash3.\n\n    Args:\n        subcode (str): The code to be transformed.\n\n    Returns:\n        str: The transformed code.\n    \"\"\"\n\n    transformations = [\n        [\"FORCE_INLINE\", \"static FORCE_INLINE\"],\n        [\"int i\", \"Py_ssize_t i\"],\n    ]\n\n    for tr in transformations:\n        subcode = subcode.replace(tr[0], tr[1])\n\n    return subcode\n\n\ndef transform_x86_128_return(subcode: str) -> str:\n    \"\"\"Revise the return block of MurmurHash3_x86_128 so that it handles big endian.\n\n    Args:\n        subcode (str): The code to be transformed.\n\n    Returns:\n        str: The transformed code.\n    \"\"\"\n    # pylint: disable=invalid-name\n\n    BYTE_SWAP_IF_BIG_ENDIAN = textwrap.dedent(\"\"\"\\\n        #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n                ((uint32_t *)out)[0] = h2;\n                ((uint32_t *)out)[1] = h1;\n                ((uint32_t *)out)[2] = h4;\n                ((uint32_t *)out)[3] = h3;\n        #else\n            \\\\1\n        #endif\n        \"\"\")\n\n    subcode = re.sub(\n        r\"(\\(\\(uint32_t\\*\\)out\\)\\[0\\] = h1;[\\s\\S]*\\(\\(uint32_t\\*\\)out\\)\\[3\\] = h4;)\",\n        BYTE_SWAP_IF_BIG_ENDIAN,\n        subcode,\n        flags=re.DOTALL | re.MULTILINE,\n    )\n\n    return subcode\n\n\ndef expand_win_stdint_typedefs(subcode: str) -> str:\n    \"\"\"Delineate int type definitions for the older versions of the VS compiler.\n\n    Args:\n        subcode (str): The code to be transformed.\n\n    Returns:\n        str: The transformed code.\n    \"\"\"\n    # pylint: disable=invalid-name\n\n    MSC_STDINT_TYPEDEFS = textwrap.dedent(\"\"\"\\\n        typedef signed __int8 int8_t;\n        typedef signed __int32 int32_t;\n        typedef signed __int64 int64_t;\n        typedef unsigned __int8 uint8_t;\n        typedef unsigned __int32 uint32_t;\n        typedef unsigned __int64 uint64_t;\n        \"\"\")\n\n    return re.sub(\n        r\"typedef unsigned char(.*)uint64_t;\",\n        MSC_STDINT_TYPEDEFS,\n        subcode,\n        flags=re.DOTALL,\n    )\n\n\ndef append_mur_macros(subcode: str) -> str:\n    \"\"\"Append building blocks for multiply and rotate (MUR) operations.\n\n    These functions are used by mmh3 hashers.\n\n    In future updates, they may be also used by one-shot hash functions,\n    although performance tests must be employed before such refactoring.\n\n    Args:\n        subcode (str): The code to be transformed.\n\n    Returns:\n        str: The transformed code.\n    \"\"\"\n    subcode += \"\\n\\n\"\n\n    subcode += textwrap.dedent(\"\"\"\\\n        //-----------------------------------------------------------------------------\n        // Building blocks for multiply and rotate (MUR) operations.\n        // Names are taken from Google Guava's implementation\n        \"\"\")\n\n    subcode += \"\\n\"\n\n    subcode += textwrap.dedent(\"\"\"\\\n        static FORCE_INLINE uint32_t\n        mixK1(uint32_t k1)\n        {\n            const uint32_t c1 = 0xcc9e2d51;\n            const uint32_t c2 = 0x1b873593;\n\n            k1 *= c1;\n            k1 = ROTL32(k1, 15);\n            k1 *= c2;\n\n            return k1;\n        }\n        \"\"\")\n\n    subcode += textwrap.dedent(\"\"\"\\\n        static FORCE_INLINE uint32_t\n        mixH1(uint32_t h1, const uint32_t h2, const uint8_t shift, const uint32_t c1)\n        {\n            h1 = ROTL32(h1, shift);\n            h1 += h2;\n            h1 = h1 * 5 + c1;\n\n            return h1;\n        }\n        \"\"\")\n\n    subcode += textwrap.dedent(\"\"\"\\\n        static FORCE_INLINE uint64_t\n        mixK_x64_128(uint64_t k1, const uint8_t shift,\n                    const uint64_t c1, const uint64_t c2)\n        {\n            k1 *= c1;\n            k1 = ROTL64(k1, shift);\n            k1 *= c2;\n\n            return k1;\n        }\n        \"\"\")\n\n    subcode += textwrap.dedent(\"\"\"\\\n        static FORCE_INLINE uint64_t\n        mixK1_x64_128(uint64_t k1)\n        {\n            const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);\n            const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);\n\n            k1 *= c1;\n            k1 = ROTL64(k1, 31);\n            k1 *= c2;\n\n            return k1;\n        }\n        \"\"\")\n\n    subcode += textwrap.dedent(\"\"\"\\\n        static FORCE_INLINE uint64_t\n        mixK2_x64_128(uint64_t k2)\n        {\n            const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);\n            const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);\n\n            k2 *= c2;\n            k2 = ROTL64(k2, 33);\n            k2 *= c1;\n\n            return k2;\n        }\n        \"\"\")\n\n    subcode += textwrap.dedent(\"\"\"\\\n        static FORCE_INLINE uint64_t\n        mixH_x64_128(uint64_t h1, uint64_t h2, const uint8_t shift, const uint32_t c)\n        {\n            h1 = ROTL64(h1, shift);\n            h1 += h2;\n            h1 = h1 * 5 + c;\n\n            return h1;\n        }\n        \"\"\")\n\n    subcode += textwrap.dedent(\"\"\"\\\n        static FORCE_INLINE uint64_t\n        mixK_x86_128(uint32_t k, const uint8_t shift, const uint32_t c1,\n                    const uint32_t c2)\n        {\n            k *= c1;\n            k = ROTL32(k, shift);\n            k *= c2;\n\n            return k;\n        }\n        \"\"\")\n\n    return subcode\n\n\ndef generate_hasher_digest_x86_128_pre(subcode: str) -> str:\n    \"\"\"Generate the first part of the digest function for x86_128.\n\n    Args:\n        subcode (str): The constants in mmh3_x86_128.\n\n    Returns:\n        str: The first part of the digest function for x86_128.\n    \"\"\"\n    hasher_digests = \"\\n\\n\"\n\n    hasher_digests += textwrap.dedent(\"\"\"\\\n        static FORCE_INLINE void\n        digest_x86_128_impl(uint32_t h1, uint32_t h2, uint32_t h3, uint32_t h4,\n            const uint32_t k1, const uint32_t k2, const uint32_t k3,\n            const uint32_t k4, const Py_ssize_t len, const char *out)\n        {\n        \"\"\")\n\n    hasher_digests += subcode + \"\\n\"\n\n    return hasher_digests\n\n\ndef generate_hasher_digest_x86_128_main(subcode: str) -> str:\n    \"\"\"Generate the main part of the digest function for x86_128.\n\n    Args:\n        subcode (str): The finalization code in mmh3 x86_128.\n\n    Returns:\n        str: The main part of the digest function for x86_128.\n    \"\"\"\n    hasher_digests = \"\"\n\n    hasher_digests += textwrap.dedent(\"\"\"\\\n        h1 ^= mixK_x86_128(k1, 15, c1, c2);\n        h2 ^= mixK_x86_128(k2, 16, c2, c3);\n        h3 ^= mixK_x86_128(k3, 17, c3, c4);\n        h4 ^= mixK_x86_128(k4, 18, c4, c1);\n        \"\"\")\n\n    hasher_digests += subcode + \"\\n\"\n    hasher_digests += textwrap.dedent(\"\"\"\\\n        #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n            ((uint32_t *)out)[0] = bswap_32(h1);\n            ((uint32_t *)out)[1] = bswap_32(h2);\n            ((uint32_t *)out)[2] = bswap_32(h3);\n            ((uint32_t *)out)[3] = bswap_32(h4);\n        #else\n            ((uint32_t *)out)[0] = h1;\n            ((uint32_t *)out)[1] = h2;\n            ((uint32_t *)out)[2] = h3;\n            ((uint32_t *)out)[3] = h4;\n        #endif\n        \"\"\")\n    hasher_digests += \"\\n}\"\n\n    return hasher_digests\n\n\ndef generate_hasher_digest_x64_128(subcode: str) -> str:\n    \"\"\"Generate the digest function for x64_128.\n\n    Args:\n        subcode (str): The finalization code in mmh3 x64_128.\n\n    Returns:\n        str: The digest function for x64_128.\n    \"\"\"\n    hasher_digests = \"\\n\\n\"\n\n    hasher_digests += textwrap.dedent(\"\"\"\\\n        //-----------------------------------------------------------------------------\n        // Finalization function\n        \"\"\")\n\n    hasher_digests += \"\\n\"\n\n    hasher_digests += textwrap.dedent(\"\"\"\\\n        static FORCE_INLINE void\n        digest_x64_128_impl(uint64_t h1, uint64_t h2, const uint64_t k1,\n            const uint64_t k2, const Py_ssize_t len, const char *out)\n        {\n        \"\"\")\n    hasher_digests += textwrap.dedent(\"\"\"\\\n        h1 ^= mixK1_x64_128(k1);\n        h2 ^= mixK2_x64_128(k2);\n        \"\"\")\n    hasher_digests += subcode + \"\\n\"\n    hasher_digests += textwrap.dedent(\"\"\"\\\n        #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)\n            ((uint64_t *)out)[0] = bswap_64(h1);\n            ((uint64_t *)out)[1] = bswap_64(h2);\n        #else\n            ((uint64_t *)out)[0] = h1;\n            ((uint64_t *)out)[1] = h2;\n        #endif\n        \"\"\")\n    hasher_digests += \"\\n}\"\n\n    return hasher_digests\n\n\ndef fix_non_win_force_inline(subcode: str) -> str:\n    \"\"\"Fix the FORCE_INLINE macro so that it works on old GCC and RHEL.\n\n    Based on a commit from Micha Gorelick (@mynameisfiber).\n    https://github.com/hajimes/mmh3/pull/1\n\n    Args:\n        subcode (str): The code to be transformed.\n\n    Returns:\n        str: The transformed code.\n    \"\"\"\n    # pylint: disable=invalid-name\n\n    NON_WIN_FORCE_INLINE_ORIGINAL = (\n        \"#define\tFORCE_INLINE inline __attribute__((always_inline))\"\n    )\n\n    NON_WIN_FORCE_INLINE_REVISED = textwrap.dedent(\"\"\"\\\n        #if ((__GNUC__ > 4) || (__GNUC__ == 4 && GNUC_MINOR >= 4))\n        /* gcc version >= 4.4 4.1 = RHEL 5, 4.4 = RHEL 6. Don't inline for RHEL 5 gcc\n        * which is 4.1*/\n        #define FORCE_INLINE inline __attribute__((always_inline))\n        #else\n        #define FORCE_INLINE\n        #endif\n        \"\"\")\n\n    return subcode.replace(NON_WIN_FORCE_INLINE_ORIGINAL, NON_WIN_FORCE_INLINE_REVISED)\n\n\ndef force_inline_force_inline(subcode: str) -> str:\n    \"\"\"Force inline to use static FORCE_INLINE.\n\n    Args:\n        subcode (str): The code to be transformed.\n\n    Returns:\n        str: The transformed code.\n    \"\"\"\n    return re.sub(r\"^inline \", \"static FORCE_INLINE \", subcode, flags=re.MULTILINE)\n\n\ndef lowercase_function_names(subcode: str) -> str:\n    \"\"\"Lowercase functions names. Purely for style.\n\n    Args:\n        subcode (str): The code to be transformed.\n\n    Returns:\n        str: The transformed code.\n    \"\"\"\n\n    function_names = [\n        \"MurmurHash3_x86_32\",\n        \"MurmurHash3_x86_128\",\n        \"MurmurHash3_x64_128\",\n    ]\n\n    for fn in function_names:\n        subcode = subcode.replace(fn, fn.lower())\n\n    return subcode\n\n\nif __name__ == \"__main__\":\n    file_path = os.path.realpath(__file__)\n    dir_path = os.path.dirname(file_path)\n\n    original_source_path = os.path.join(dir_path, \"smhasher/src/MurmurHash3.cpp\")\n    original_header_path = os.path.join(dir_path, \"smhasher/src/MurmurHash3.h\")\n\n    NEW_SOURCE_NAME = \"murmurhash3.c\"\n    NEW_HEADER_NAME = \"murmurhash3.h\"\n    FILE_HEADER_NAME = \"FILE_HEADER\"\n\n    new_source_path = os.path.join(dir_path, \"../src/mmh3\", NEW_SOURCE_NAME)\n    new_header_path = os.path.join(dir_path, \"../src/mmh3\", NEW_HEADER_NAME)\n    file_header_path = os.path.join(dir_path, FILE_HEADER_NAME)\n\n    with (\n        open(original_source_path, encoding=\"utf-8\") as source_file,\n        open(original_header_path, encoding=\"utf-8\") as header_file,\n        open(file_header_path, encoding=\"utf-8\") as file_header_file,\n    ):\n        source = MMH3Source(source_file.read())\n        header = MMH3Header(header_file.read())\n        file_header = file_header_file.read()\n\n        new_source_builder = MMH3CodeBuilder()\n        new_source_builder.add(file_header)\n        new_source_builder.add(source.note_comment)\n        new_source_builder.add(source.header_include, [str.lower])\n        new_source_builder.add(\n            source.body,\n            [introduce_py_ssize_t, transform_x86_128_return, lowercase_function_names],\n        )\n\n        new_header_builder = MMH3CodeBuilder()\n        new_header_builder.add(file_header)\n        new_header_builder.add(\n            header.header_guards_begin,\n            [append_python_directives, append_byteswap_header],\n        )\n        new_header_builder.add(\n            header.stdint,\n            [expand_win_stdint_typedefs],\n        )\n        new_header_builder.add(\n            source.macros,\n            [fix_non_win_force_inline, force_inline_force_inline],\n        )\n        new_header_builder.add(\n            source.getblock_functions,\n            [transform_getblocks],\n        )\n        new_header_builder.add(\n            \"\",\n            [append_mur_macros],\n        )\n        new_header_builder.add(\n            source.finalization_mixes,\n            [transform_finalization_mixes],\n        )\n        new_header_builder.add(\n            source.finalization_x64_128,\n            [generate_hasher_digest_x64_128],\n        )\n        new_header_builder.add(\n            source.constants_x86_128,\n            [generate_hasher_digest_x86_128_pre],\n        )\n        new_header_builder.add(\n            source.finalization_x86_128,\n            [generate_hasher_digest_x86_128_main],\n        )\n        new_header_builder.add(\n            header.declarations,\n            [lowercase_function_names, introduce_py_ssize_t],\n        )\n        new_header_builder.add(header.header_guards_end)\n\n        with open(new_source_path, \"w\", encoding=\"utf-8\") as f:\n            f.write(new_source_builder.build())\n        with open(new_header_path, \"w\", encoding=\"utf-8\") as f:\n            f.write(new_header_builder.build())\n"
  }
]